webscout 8.3.1__py3-none-any.whl → 8.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +46 -53
- webscout/Bing_search.py +418 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/OPENAI/BLACKBOXAI.py +1 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/README.md +2 -0
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/__init__.py +15 -1
- webscout/Provider/OPENAI/autoproxy.py +332 -39
- webscout/Provider/OPENAI/base.py +15 -5
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +2 -9
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TogetherAI.py +366 -0
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +8 -3
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/auth/__init__.py +44 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +257 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +526 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +312 -0
- webscout/auth/static/favicon.svg +11 -0
- webscout/auth/swagger_ui.py +203 -0
- webscout/auth/templates/components/authentication.html +237 -0
- webscout/auth/templates/components/base.html +103 -0
- webscout/auth/templates/components/endpoints.html +750 -0
- webscout/auth/templates/components/examples.html +491 -0
- webscout/auth/templates/components/footer.html +75 -0
- webscout/auth/templates/components/header.html +27 -0
- webscout/auth/templates/components/models.html +286 -0
- webscout/auth/templates/components/navigation.html +70 -0
- webscout/auth/templates/static/api.js +455 -0
- webscout/auth/templates/static/icons.js +168 -0
- webscout/auth/templates/static/main.js +784 -0
- webscout/auth/templates/static/particles.js +201 -0
- webscout/auth/templates/static/styles.css +3353 -0
- webscout/auth/templates/static/ui.js +374 -0
- webscout/auth/templates/swagger_ui.html +170 -0
- webscout/client.py +49 -3
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/RECORD +74 -36
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
|
@@ -201,12 +201,12 @@ class Genspark(AISearch):
|
|
|
201
201
|
json={},
|
|
202
202
|
stream=True,
|
|
203
203
|
timeout=self.timeout,
|
|
204
|
-
) as
|
|
205
|
-
if not
|
|
204
|
+
) as resp:
|
|
205
|
+
if not resp.ok:
|
|
206
206
|
raise exceptions.APIConnectionError(
|
|
207
|
-
f"Failed to generate SearchResponse - ({
|
|
207
|
+
f"Failed to generate SearchResponse - ({resp.status_code}, {resp.reason}) - {resp.text}"
|
|
208
208
|
)
|
|
209
|
-
for line in
|
|
209
|
+
for line in resp.iter_lines(decode_unicode=True):
|
|
210
210
|
if not line or not line.startswith("data: "):
|
|
211
211
|
continue
|
|
212
212
|
try:
|
|
@@ -287,7 +287,7 @@ class Genspark(AISearch):
|
|
|
287
287
|
yield processed_event_payload
|
|
288
288
|
except json.JSONDecodeError:
|
|
289
289
|
continue
|
|
290
|
-
except cloudscraper.exceptions as e:
|
|
290
|
+
except cloudscraper.exceptions.CloudflareException as e:
|
|
291
291
|
raise exceptions.APIConnectionError(f"Request failed due to Cloudscraper issue: {e}")
|
|
292
292
|
except requests.exceptions.RequestException as e:
|
|
293
293
|
raise exceptions.APIConnectionError(f"Request failed: {e}")
|
|
@@ -315,8 +315,8 @@ if __name__ == "__main__":
|
|
|
315
315
|
from rich import print
|
|
316
316
|
ai = Genspark()
|
|
317
317
|
try:
|
|
318
|
-
|
|
319
|
-
for chunk in
|
|
318
|
+
search_result_stream = ai.search(input(">>> "), stream=True, raw=False)
|
|
319
|
+
for chunk in search_result_stream:
|
|
320
320
|
print(chunk, end="", flush=True)
|
|
321
321
|
except KeyboardInterrupt:
|
|
322
322
|
print("\nSearch interrupted by user.")
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union, Generator
|
|
2
|
+
import requests
|
|
3
|
+
import base64
|
|
4
|
+
from webscout.litagent import LitAgent
|
|
5
|
+
from webscout.AIutel import Optimizers, AwesomePrompts
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIbase import Provider
|
|
8
|
+
from webscout import exceptions
|
|
9
|
+
|
|
10
|
+
class GeminiProxy(Provider):
|
|
11
|
+
"""
|
|
12
|
+
GeminiProxy is a provider class for interacting with the Gemini API via a proxy endpoint.
|
|
13
|
+
"""
|
|
14
|
+
AVAILABLE_MODELS = [
|
|
15
|
+
"gemini-2.0-flash-lite",
|
|
16
|
+
"gemini-2.0-flash",
|
|
17
|
+
"gemini-2.5-pro-preview-06-05",
|
|
18
|
+
"gemini-2.5-pro-preview-05-06",
|
|
19
|
+
"gemini-2.5-flash-preview-04-17",
|
|
20
|
+
"gemini-2.5-flash-preview-05-20",
|
|
21
|
+
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
is_conversation: bool = True,
|
|
27
|
+
max_tokens: int = 2048,
|
|
28
|
+
timeout: int = 30,
|
|
29
|
+
intro: str = None,
|
|
30
|
+
filepath: str = None,
|
|
31
|
+
update_file: bool = True,
|
|
32
|
+
proxies: dict = {},
|
|
33
|
+
history_offset: int = 10250,
|
|
34
|
+
act: str = None,
|
|
35
|
+
model: str = "gemini-2.0-flash-lite",
|
|
36
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
37
|
+
browser: str = "chrome"
|
|
38
|
+
):
|
|
39
|
+
if model not in self.AVAILABLE_MODELS:
|
|
40
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
41
|
+
self.base_url = "https://us-central1-infinite-chain-295909.cloudfunctions.net/gemini-proxy-staging-v1"
|
|
42
|
+
self.agent = LitAgent()
|
|
43
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
44
|
+
self.headers = self.fingerprint.copy()
|
|
45
|
+
self.session = requests.Session()
|
|
46
|
+
self.session.headers.update(self.headers)
|
|
47
|
+
self.session.proxies.update(proxies)
|
|
48
|
+
self.is_conversation = is_conversation
|
|
49
|
+
self.max_tokens_to_sample = max_tokens
|
|
50
|
+
self.timeout = timeout
|
|
51
|
+
self.last_response = {}
|
|
52
|
+
self.model = model
|
|
53
|
+
self.system_prompt = system_prompt
|
|
54
|
+
self.__available_optimizers = (
|
|
55
|
+
method
|
|
56
|
+
for method in dir(Optimizers)
|
|
57
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
58
|
+
)
|
|
59
|
+
Conversation.intro = (
|
|
60
|
+
AwesomePrompts().get_act(
|
|
61
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
62
|
+
)
|
|
63
|
+
if act
|
|
64
|
+
else intro or Conversation.intro
|
|
65
|
+
)
|
|
66
|
+
self.conversation = Conversation(
|
|
67
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
68
|
+
)
|
|
69
|
+
self.conversation.history_offset = history_offset
|
|
70
|
+
|
|
71
|
+
def get_image(self, img_url):
|
|
72
|
+
try:
|
|
73
|
+
response = self.session.get(img_url, stream=True, timeout=self.timeout)
|
|
74
|
+
response.raise_for_status()
|
|
75
|
+
mime_type = response.headers.get("content-type", "application/octet-stream")
|
|
76
|
+
data = base64.b64encode(response.content).decode("utf-8")
|
|
77
|
+
return {"mime_type": mime_type, "data": data}
|
|
78
|
+
except Exception as e:
|
|
79
|
+
raise exceptions.FailedToGenerateResponseError(f"Error fetching image: {e}")
|
|
80
|
+
|
|
81
|
+
def ask(
|
|
82
|
+
self,
|
|
83
|
+
prompt: str,
|
|
84
|
+
stream: bool = False,
|
|
85
|
+
raw: bool = False,
|
|
86
|
+
optimizer: str = None,
|
|
87
|
+
conversationally: bool = False,
|
|
88
|
+
img_url: Optional[str] = None,
|
|
89
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
90
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
91
|
+
if optimizer:
|
|
92
|
+
if optimizer in self.__available_optimizers:
|
|
93
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
94
|
+
conversation_prompt if conversationally else prompt
|
|
95
|
+
)
|
|
96
|
+
else:
|
|
97
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
98
|
+
parts = []
|
|
99
|
+
if img_url:
|
|
100
|
+
parts.append({"inline_data": self.get_image(img_url)})
|
|
101
|
+
parts.append({"text": conversation_prompt})
|
|
102
|
+
request_data = {
|
|
103
|
+
"model": self.model,
|
|
104
|
+
"contents": [{"parts": parts}]
|
|
105
|
+
}
|
|
106
|
+
def for_non_stream():
|
|
107
|
+
try:
|
|
108
|
+
response = self.session.post(self.base_url, json=request_data, headers=self.headers, timeout=self.timeout)
|
|
109
|
+
response.raise_for_status()
|
|
110
|
+
data = response.json()
|
|
111
|
+
self.last_response = data
|
|
112
|
+
self.conversation.update_chat_history(prompt, self.get_message(data))
|
|
113
|
+
return data
|
|
114
|
+
except Exception as e:
|
|
115
|
+
raise exceptions.FailedToGenerateResponseError(f"Error during chat request: {e}")
|
|
116
|
+
# Gemini proxy does not support streaming, so only non-stream
|
|
117
|
+
return for_non_stream()
|
|
118
|
+
|
|
119
|
+
def chat(
|
|
120
|
+
self,
|
|
121
|
+
prompt: str,
|
|
122
|
+
stream: bool = False,
|
|
123
|
+
optimizer: str = None,
|
|
124
|
+
conversationally: bool = False,
|
|
125
|
+
img_url: Optional[str] = None,
|
|
126
|
+
) -> str:
|
|
127
|
+
data = self.ask(prompt, stream=stream, optimizer=optimizer, conversationally=conversationally, img_url=img_url)
|
|
128
|
+
return self.get_message(data)
|
|
129
|
+
|
|
130
|
+
def get_message(self, response: dict) -> str:
|
|
131
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
132
|
+
try:
|
|
133
|
+
return response['candidates'][0]['content']['parts'][0]['text']
|
|
134
|
+
except Exception:
|
|
135
|
+
return str(response)
|
|
136
|
+
|
|
137
|
+
if __name__ == "__main__":
|
|
138
|
+
ai = GeminiProxy(timeout=30, model="gemini-2.5-flash-preview-05-20")
|
|
139
|
+
response = ai.chat("write a poem about AI")
|
|
140
|
+
print(response)
|
webscout/Provider/MCPCore.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import uuid
|
|
3
|
+
import random
|
|
4
|
+
import string
|
|
3
5
|
from typing import Any, Dict, Generator, Union
|
|
4
6
|
|
|
5
7
|
# Use curl_cffi for requests
|
|
@@ -21,37 +23,37 @@ class MCPCore(Provider):
|
|
|
21
23
|
|
|
22
24
|
# Add more models if known, starting with the one from the example
|
|
23
25
|
AVAILABLE_MODELS = [
|
|
24
|
-
"
|
|
25
|
-
"deepseek-ai/deepseek-
|
|
26
|
-
"
|
|
27
|
-
"
|
|
28
|
-
"
|
|
29
|
-
"
|
|
30
|
-
"
|
|
31
|
-
"
|
|
32
|
-
"
|
|
33
|
-
"
|
|
34
|
-
"
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
"
|
|
40
|
-
"
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
"
|
|
44
|
-
"
|
|
45
|
-
"
|
|
46
|
-
"
|
|
47
|
-
"
|
|
48
|
-
"
|
|
49
|
-
"
|
|
26
|
+
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
27
|
+
"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
28
|
+
"@cf/defog/sqlcoder-7b-2",
|
|
29
|
+
"@cf/fblgit/una-cybertron-7b-v2-bf16",
|
|
30
|
+
"@cf/google/gemma-3-12b-it",
|
|
31
|
+
"@cf/meta/llama-2-7b-chat-int8",
|
|
32
|
+
"@hf/thebloke/llama-2-13b-chat-awq",
|
|
33
|
+
"@hf/thebloke/llamaguard-7b-awq",
|
|
34
|
+
"@hf/thebloke/mistral-7b-instruct-v0.1-awq",
|
|
35
|
+
"@hf/thebloke/neural-chat-7b-v3-1-awq",
|
|
36
|
+
"anthropic/claude-3.5-haiku",
|
|
37
|
+
"anthropic/claude-3.5-sonnet",
|
|
38
|
+
"anthropic/claude-3.7-sonnet",
|
|
39
|
+
"anthropic/claude-3.7-sonnet:thinking",
|
|
40
|
+
"anthropic/claude-opus-4",
|
|
41
|
+
"anthropic/claude-sonnet-4",
|
|
42
|
+
"openai/chatgpt-4o-latest",
|
|
43
|
+
"openai/gpt-3.5-turbo",
|
|
44
|
+
"openai/gpt-4.1",
|
|
45
|
+
"openai/gpt-4.1-mini",
|
|
46
|
+
"openai/gpt-4.1-nano",
|
|
47
|
+
"openai/gpt-4o-mini-search-preview",
|
|
48
|
+
"openai/gpt-4o-search-preview",
|
|
49
|
+
"openai/o1-pro",
|
|
50
|
+
"openai/o3-mini",
|
|
51
|
+
"sarvam-m",
|
|
52
|
+
"x-ai/grok-3-beta",
|
|
50
53
|
]
|
|
51
54
|
|
|
52
55
|
def __init__(
|
|
53
56
|
self,
|
|
54
|
-
cookies_path: str,
|
|
55
57
|
is_conversation: bool = True,
|
|
56
58
|
max_tokens: int = 2048,
|
|
57
59
|
timeout: int = 60,
|
|
@@ -70,46 +72,22 @@ class MCPCore(Provider):
|
|
|
70
72
|
|
|
71
73
|
self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
|
|
72
74
|
|
|
73
|
-
# Cache the user-agent at the class level
|
|
74
|
-
if not hasattr(MCPCore, '_cached_user_agent'):
|
|
75
|
-
MCPCore._cached_user_agent = LitAgent().random()
|
|
76
75
|
self.model = model
|
|
77
76
|
self.system_prompt = system_prompt
|
|
78
|
-
self.cookies_path = cookies_path
|
|
79
|
-
self.cookie_string, self.token = self._load_cookies()
|
|
80
77
|
|
|
81
78
|
# Initialize curl_cffi Session
|
|
82
79
|
self.session = Session()
|
|
83
80
|
|
|
84
81
|
# Set up headers based on the provided request
|
|
85
82
|
self.headers = {
|
|
86
|
-
|
|
87
|
-
'accept': '*/*',
|
|
88
|
-
'accept-language': 'en-US,en;q=0.9,en-IN;q=0.8',
|
|
89
|
-
**({'authorization': f'Bearer {self.token}'} if self.token else {}),
|
|
90
|
-
'content-type': 'application/json',
|
|
91
|
-
'dnt': '1',
|
|
83
|
+
**LitAgent().generate_fingerprint(),
|
|
92
84
|
'origin': 'https://chat.mcpcore.xyz',
|
|
93
85
|
'referer': 'https://chat.mcpcore.xyz/',
|
|
94
|
-
'priority': 'u=1, i',
|
|
95
|
-
'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
96
|
-
'sec-ch-ua-mobile': '?0',
|
|
97
|
-
'sec-ch-ua-platform': '"Windows"',
|
|
98
|
-
'sec-fetch-dest': 'empty',
|
|
99
|
-
'sec-fetch-mode': 'cors',
|
|
100
|
-
'sec-fetch-site': 'same-origin',
|
|
101
|
-
'sec-gpc': '1',
|
|
102
|
-
'user-agent': self._cached_user_agent,
|
|
103
86
|
}
|
|
104
87
|
|
|
105
88
|
# Apply headers, proxies, and cookies to the session
|
|
106
89
|
self.session.headers.update(self.headers)
|
|
107
90
|
self.session.proxies = proxies
|
|
108
|
-
self.cookies = {
|
|
109
|
-
'token': self.token,
|
|
110
|
-
}
|
|
111
|
-
for name, value in self.cookies.items():
|
|
112
|
-
self.session.cookies.set(name, value, domain="chat.mcpcore.xyz")
|
|
113
91
|
|
|
114
92
|
# Provider settings
|
|
115
93
|
self.is_conversation = is_conversation
|
|
@@ -136,27 +114,54 @@ class MCPCore(Provider):
|
|
|
136
114
|
)
|
|
137
115
|
self.conversation.history_offset = history_offset
|
|
138
116
|
|
|
139
|
-
|
|
140
|
-
|
|
117
|
+
# Token handling: always auto-fetch token, no cookies logic
|
|
118
|
+
self.token = self._auto_fetch_token()
|
|
119
|
+
|
|
120
|
+
# Set the Authorization header for the session
|
|
121
|
+
self.session.headers.update({
|
|
122
|
+
'authorization': f'Bearer {self.token}',
|
|
123
|
+
})
|
|
124
|
+
|
|
125
|
+
def _auto_fetch_token(self):
|
|
126
|
+
"""Automatically fetch a token from the signup endpoint."""
|
|
127
|
+
session = Session()
|
|
128
|
+
def random_string(length=8):
|
|
129
|
+
return ''.join(random.choices(string.ascii_lowercase, k=length))
|
|
130
|
+
name = random_string(6)
|
|
131
|
+
email = f"{random_string(8)}@gmail.com"
|
|
132
|
+
password = email
|
|
133
|
+
profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
|
|
134
|
+
payload = {
|
|
135
|
+
"name": name,
|
|
136
|
+
"email": email,
|
|
137
|
+
"password": password,
|
|
138
|
+
"profile_image_url": profile_image_url
|
|
139
|
+
}
|
|
140
|
+
headers = {
|
|
141
|
+
**LitAgent().generate_fingerprint(),
|
|
142
|
+
'origin': 'https://chat.mcpcore.xyz',
|
|
143
|
+
'referer': 'https://chat.mcpcore.xyz/auth',
|
|
144
|
+
}
|
|
141
145
|
try:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
(cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
|
|
149
|
-
"",
|
|
150
|
-
)
|
|
151
|
-
return cookie_string, token
|
|
152
|
-
except FileNotFoundError:
|
|
153
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
154
|
-
f"Error: Cookies file not found at {self.cookies_path}!"
|
|
155
|
-
)
|
|
156
|
-
except json.JSONDecodeError:
|
|
157
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
158
|
-
f"Error: Invalid JSON format in cookies file: {self.cookies_path}!"
|
|
146
|
+
resp = session.post(
|
|
147
|
+
"https://chat.mcpcore.xyz/api/v1/auths/signup",
|
|
148
|
+
headers=headers,
|
|
149
|
+
json=payload,
|
|
150
|
+
timeout=30,
|
|
151
|
+
impersonate="chrome110"
|
|
159
152
|
)
|
|
153
|
+
if resp.ok:
|
|
154
|
+
data = resp.json()
|
|
155
|
+
token = data.get("token")
|
|
156
|
+
if token:
|
|
157
|
+
return token
|
|
158
|
+
# fallback: try to get from set-cookie
|
|
159
|
+
set_cookie = resp.headers.get("set-cookie", "")
|
|
160
|
+
if "token=" in set_cookie:
|
|
161
|
+
return set_cookie.split("token=")[1].split(";")[0]
|
|
162
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
|
|
163
|
+
except Exception as e:
|
|
164
|
+
raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
|
|
160
165
|
|
|
161
166
|
def ask(
|
|
162
167
|
self,
|
|
@@ -286,19 +291,17 @@ class MCPCore(Provider):
|
|
|
286
291
|
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
287
292
|
return response.get("text", "")
|
|
288
293
|
|
|
289
|
-
# Example usage (
|
|
294
|
+
# Example usage (no cookies file needed)
|
|
290
295
|
if __name__ == "__main__":
|
|
291
296
|
from rich import print
|
|
292
297
|
|
|
293
|
-
cookies_file_path = "cookies.json"
|
|
294
|
-
|
|
295
298
|
print("-" * 80)
|
|
296
299
|
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
297
300
|
print("-" * 80)
|
|
298
301
|
|
|
299
302
|
for model in MCPCore.AVAILABLE_MODELS:
|
|
300
303
|
try:
|
|
301
|
-
test_ai = MCPCore(
|
|
304
|
+
test_ai = MCPCore(model=model, timeout=60)
|
|
302
305
|
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
303
306
|
response_text = ""
|
|
304
307
|
# Accumulate the response text without printing in the loop
|
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
# from pickle import NONE
|
|
2
2
|
import requests
|
|
3
|
-
import requests
|
|
4
3
|
import random
|
|
5
4
|
import string
|
|
6
5
|
import base64
|
|
@@ -1044,6 +1043,4 @@ if __name__ == "__main__":
|
|
|
1044
1043
|
)
|
|
1045
1044
|
for chunk in response:
|
|
1046
1045
|
print(chunk.choices[0].delta.content, end='', flush=True)
|
|
1047
|
-
|
|
1048
|
-
print("Proxies on instance:", client.proxies)
|
|
1049
|
-
print("Proxies on session:", client.session.proxies)
|
|
1046
|
+
|