webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +13 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +4 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +6 -8
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +52 -57
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -56
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +12 -6
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +9 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -14
- webscout/Provider/OPENAI/toolbaz.py +14 -10
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +18 -11
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -15
- webscout/Provider/TogetherAI.py +136 -142
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -174
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +194 -38
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -11
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/copilot.py +0 -305
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -422
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
from curl_cffi.requests import Session
|
|
2
|
+
from curl_cffi import CurlError
|
|
3
|
+
import json
|
|
4
|
+
import random
|
|
5
|
+
import string
|
|
6
|
+
from typing import Any, Dict, Optional, Generator, Union, List
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider, AsyncProvider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
# Using LitProxy for intelligent proxy management
|
|
15
|
+
try:
|
|
16
|
+
from litproxy import (
|
|
17
|
+
get_auto_proxy, get_proxy_dict, test_proxy, get_working_proxy,
|
|
18
|
+
refresh_proxy_cache, get_proxy_stats, set_proxy_cache_duration,
|
|
19
|
+
patch, use_proxy, proxyify, list_proxies, test_all_proxies,
|
|
20
|
+
current_proxy, make_request_with_auto_retry, create_auto_retry_session
|
|
21
|
+
)
|
|
22
|
+
LITPROXY_AVAILABLE = True
|
|
23
|
+
except ImportError:
|
|
24
|
+
LITPROXY_AVAILABLE = False
|
|
25
|
+
|
|
26
|
+
import requests
|
|
27
|
+
|
|
28
|
+
class VercelAIGateway(Provider):
|
|
29
|
+
"""
|
|
30
|
+
A class to interact with the Vercel AI SDK Gateway Demo API with intelligent proxy management using LitProxy.
|
|
31
|
+
|
|
32
|
+
Install LitProxy for advanced proxy features:
|
|
33
|
+
pip install litproxy
|
|
34
|
+
|
|
35
|
+
Features:
|
|
36
|
+
- Intelligent proxy rotation and health monitoring
|
|
37
|
+
- Automatic retry with proxy fallback on failures
|
|
38
|
+
- Support for multiple proxy sources (Webshare, NordVPN, Remote lists)
|
|
39
|
+
- Seamless curl_cffi session integration
|
|
40
|
+
- Comprehensive proxy diagnostics and statistics
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
AVAILABLE_MODELS = [
|
|
44
|
+
"amazon/nova-lite",
|
|
45
|
+
"amazon/nova-micro",
|
|
46
|
+
"anthropic/claude-3.5-haiku",
|
|
47
|
+
"google/gemini-2.0-flash",
|
|
48
|
+
"meta/llama-3.1-8b",
|
|
49
|
+
"mistral/ministral-3b",
|
|
50
|
+
"openai/gpt-3.5-turbo",
|
|
51
|
+
"openai/gpt-4o-mini",
|
|
52
|
+
"xai/grok-3"
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
def _vercel_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
57
|
+
"""Extracts content from Vercel AI Gateway stream JSON objects."""
|
|
58
|
+
if isinstance(chunk, dict):
|
|
59
|
+
if chunk.get("type") == "text-delta":
|
|
60
|
+
return chunk.get("delta")
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
is_conversation: bool = True,
|
|
66
|
+
max_tokens: int = 2049,
|
|
67
|
+
timeout: int = 30,
|
|
68
|
+
intro: str = None,
|
|
69
|
+
filepath: str = None,
|
|
70
|
+
update_file: bool = True,
|
|
71
|
+
history_offset: int = 10250,
|
|
72
|
+
act: str = None,
|
|
73
|
+
model: str = "openai/gpt-4o-mini",
|
|
74
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
75
|
+
browser: str = "chrome",
|
|
76
|
+
use_proxy: bool = True,
|
|
77
|
+
max_proxy_attempts: int = 3,
|
|
78
|
+
proxy_cache_duration: int = 300
|
|
79
|
+
):
|
|
80
|
+
"""
|
|
81
|
+
Initializes the Vercel AI Gateway API client with LitProxy integration.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
use_proxy (bool): Enable proxy usage via LitProxy (default: True)
|
|
85
|
+
max_proxy_attempts (int): Maximum proxy retry attempts (default: 3)
|
|
86
|
+
proxy_cache_duration (int): Proxy cache duration in seconds (default: 300)
|
|
87
|
+
"""
|
|
88
|
+
if model not in self.AVAILABLE_MODELS:
|
|
89
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
90
|
+
|
|
91
|
+
self.url = "https://ai-sdk-gateway-demo.labs.vercel.dev/api/chat"
|
|
92
|
+
|
|
93
|
+
# Initialize LitAgent
|
|
94
|
+
self.agent = LitAgent()
|
|
95
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
96
|
+
|
|
97
|
+
self.headers = {
|
|
98
|
+
"Accept": "*/*",
|
|
99
|
+
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
100
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
101
|
+
"Content-Type": "application/json",
|
|
102
|
+
"DNT": "1",
|
|
103
|
+
"Origin": "https://ai-sdk-gateway-demo.labs.vercel.dev",
|
|
104
|
+
"Priority": "u=1, i",
|
|
105
|
+
"Referer": f"https://ai-sdk-gateway-demo.labs.vercel.dev/?modelId={model.replace('/', '%2F')}",
|
|
106
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
107
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
108
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
|
|
109
|
+
"Sec-Fetch-Dest": "empty",
|
|
110
|
+
"Sec-Fetch-Mode": "cors",
|
|
111
|
+
"Sec-Fetch-Site": "same-origin",
|
|
112
|
+
"Sec-GPC": "1",
|
|
113
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
114
|
+
"X-Forwarded-For": self.fingerprint.get("x-forwarded-for", ""),
|
|
115
|
+
"X-Real-IP": self.fingerprint.get("x-real-ip", ""),
|
|
116
|
+
"X-Client-IP": self.fingerprint.get("x-client-ip", ""),
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
# Initialize curl_cffi Session
|
|
120
|
+
self.session = Session()
|
|
121
|
+
self.session.headers.update(self.headers)
|
|
122
|
+
|
|
123
|
+
# Configure proxy settings
|
|
124
|
+
self.use_proxy = use_proxy
|
|
125
|
+
self.max_proxy_attempts = max_proxy_attempts
|
|
126
|
+
self.proxy_cache_duration = proxy_cache_duration
|
|
127
|
+
|
|
128
|
+
# Integrate LitProxy for intelligent proxy management
|
|
129
|
+
if use_proxy and LITPROXY_AVAILABLE:
|
|
130
|
+
try:
|
|
131
|
+
# Configure proxy cache duration
|
|
132
|
+
set_proxy_cache_duration(proxy_cache_duration)
|
|
133
|
+
# Patch the session with proxy support
|
|
134
|
+
patch(self.session)
|
|
135
|
+
self.proxy_enabled = True
|
|
136
|
+
except Exception as e:
|
|
137
|
+
self.proxy_enabled = False
|
|
138
|
+
else:
|
|
139
|
+
self.proxy_enabled = False
|
|
140
|
+
if use_proxy and not LITPROXY_AVAILABLE:
|
|
141
|
+
# Silently disable proxy if LitProxy not available
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
self.system_prompt = system_prompt
|
|
145
|
+
self.is_conversation = is_conversation
|
|
146
|
+
self.max_tokens_to_sample = max_tokens
|
|
147
|
+
self.timeout = timeout
|
|
148
|
+
self.last_response = {}
|
|
149
|
+
self.model = model
|
|
150
|
+
|
|
151
|
+
self.__available_optimizers = (
|
|
152
|
+
method
|
|
153
|
+
for method in dir(Optimizers)
|
|
154
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
Conversation.intro = (
|
|
158
|
+
AwesomePrompts().get_act(
|
|
159
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
160
|
+
)
|
|
161
|
+
if act
|
|
162
|
+
else intro or Conversation.intro
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
self.conversation = Conversation(
|
|
166
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
167
|
+
)
|
|
168
|
+
self.conversation.history_offset = history_offset
|
|
169
|
+
|
|
170
|
+
def refresh_identity(self, browser: str = None):
|
|
171
|
+
"""
|
|
172
|
+
Refreshes the browser identity fingerprint.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
browser: Specific browser to use for the new fingerprint
|
|
176
|
+
"""
|
|
177
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
178
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
179
|
+
|
|
180
|
+
# Update headers with new fingerprint
|
|
181
|
+
self.headers.update({
|
|
182
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
183
|
+
"User-Agent": self.fingerprint.get("user_agent", ""),
|
|
184
|
+
"Sec-CH-UA": self.fingerprint.get("sec_ch_ua", ""),
|
|
185
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint.get("platform", "")}"',
|
|
186
|
+
})
|
|
187
|
+
|
|
188
|
+
# Update session headers
|
|
189
|
+
self.session.headers.update(self.headers)
|
|
190
|
+
return self.fingerprint
|
|
191
|
+
|
|
192
|
+
def _make_request(self, payload: dict, stream: bool = False):
|
|
193
|
+
"""
|
|
194
|
+
Make a request to the API. The session is already patched with LitProxy auto-retry if enabled.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
payload: Request payload
|
|
198
|
+
stream: Whether to stream the response
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Response object
|
|
202
|
+
"""
|
|
203
|
+
# Use the session directly - it's already patched with proxy auto-retry if enabled
|
|
204
|
+
response = self.session.post(
|
|
205
|
+
self.url,
|
|
206
|
+
data=json.dumps(payload),
|
|
207
|
+
stream=stream,
|
|
208
|
+
timeout=self.timeout,
|
|
209
|
+
impersonate="chrome110"
|
|
210
|
+
)
|
|
211
|
+
response.raise_for_status()
|
|
212
|
+
return response
|
|
213
|
+
|
|
214
|
+
def ask(
|
|
215
|
+
self,
|
|
216
|
+
prompt: str,
|
|
217
|
+
stream: bool = False,
|
|
218
|
+
raw: bool = False,
|
|
219
|
+
optimizer: str = None,
|
|
220
|
+
conversationally: bool = False,
|
|
221
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
222
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
223
|
+
if optimizer:
|
|
224
|
+
if optimizer in self.__available_optimizers:
|
|
225
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
226
|
+
conversation_prompt if conversationally else prompt
|
|
227
|
+
)
|
|
228
|
+
else:
|
|
229
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
230
|
+
|
|
231
|
+
# Generate random IDs
|
|
232
|
+
conversation_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
|
233
|
+
message_id = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
|
|
234
|
+
|
|
235
|
+
# Payload construction
|
|
236
|
+
payload = {
|
|
237
|
+
"modelId": self.model,
|
|
238
|
+
"id": conversation_id,
|
|
239
|
+
"messages": [
|
|
240
|
+
{
|
|
241
|
+
"parts": [{"type": "text", "text": conversation_prompt}],
|
|
242
|
+
"id": message_id,
|
|
243
|
+
"role": "user"
|
|
244
|
+
}
|
|
245
|
+
],
|
|
246
|
+
"trigger": "submit-message"
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
def for_stream():
|
|
250
|
+
streaming_text = ""
|
|
251
|
+
try:
|
|
252
|
+
response = self._make_request(payload, stream=True)
|
|
253
|
+
|
|
254
|
+
# Use sanitize_stream for SSE format
|
|
255
|
+
processed_stream = sanitize_stream(
|
|
256
|
+
data=response.iter_content(chunk_size=None),
|
|
257
|
+
intro_value="data:",
|
|
258
|
+
to_json=True,
|
|
259
|
+
skip_markers=["[DONE]"],
|
|
260
|
+
content_extractor=self._vercel_extractor,
|
|
261
|
+
yield_raw_on_error=False
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
for content_chunk in processed_stream:
|
|
265
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
266
|
+
streaming_text += content_chunk
|
|
267
|
+
resp = dict(text=content_chunk)
|
|
268
|
+
yield resp if not raw else content_chunk
|
|
269
|
+
|
|
270
|
+
except CurlError as e:
|
|
271
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}") from e
|
|
272
|
+
except Exception as e:
|
|
273
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)}") from e
|
|
274
|
+
finally:
|
|
275
|
+
if streaming_text:
|
|
276
|
+
self.last_response = {"text": streaming_text}
|
|
277
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
278
|
+
|
|
279
|
+
def for_non_stream():
|
|
280
|
+
try:
|
|
281
|
+
response = self._make_request(payload, stream=False)
|
|
282
|
+
|
|
283
|
+
# Collect all streaming chunks for non-stream mode
|
|
284
|
+
full_text = ""
|
|
285
|
+
processed_stream = sanitize_stream(
|
|
286
|
+
data=response.iter_content(chunk_size=None),
|
|
287
|
+
intro_value="data:",
|
|
288
|
+
to_json=True,
|
|
289
|
+
skip_markers=["[DONE]"],
|
|
290
|
+
content_extractor=self._vercel_extractor,
|
|
291
|
+
yield_raw_on_error=False
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
for content_chunk in processed_stream:
|
|
295
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
296
|
+
full_text += content_chunk
|
|
297
|
+
|
|
298
|
+
self.last_response = {"text": full_text}
|
|
299
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
300
|
+
return self.last_response if not raw else full_text
|
|
301
|
+
|
|
302
|
+
except CurlError as e:
|
|
303
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
304
|
+
except Exception as e:
|
|
305
|
+
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
306
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {e} - {err_text}") from e
|
|
307
|
+
|
|
308
|
+
return for_stream() if stream else for_non_stream()
|
|
309
|
+
|
|
310
|
+
def chat(
|
|
311
|
+
self,
|
|
312
|
+
prompt: str,
|
|
313
|
+
stream: bool = False,
|
|
314
|
+
optimizer: str = None,
|
|
315
|
+
conversationally: bool = False,
|
|
316
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
317
|
+
def for_stream_chat():
|
|
318
|
+
gen = self.ask(
|
|
319
|
+
prompt, stream=True, raw=False,
|
|
320
|
+
optimizer=optimizer, conversationally=conversationally
|
|
321
|
+
)
|
|
322
|
+
for response_dict in gen:
|
|
323
|
+
yield self.get_message(response_dict)
|
|
324
|
+
|
|
325
|
+
def for_non_stream_chat():
|
|
326
|
+
response_data = self.ask(
|
|
327
|
+
prompt, stream=False, raw=False,
|
|
328
|
+
optimizer=optimizer, conversationally=conversationally
|
|
329
|
+
)
|
|
330
|
+
return self.get_message(response_data)
|
|
331
|
+
|
|
332
|
+
return for_stream_chat() if stream else for_non_stream_chat()
|
|
333
|
+
|
|
334
|
+
def get_message(self, response: dict) -> str:
|
|
335
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
336
|
+
return response["text"]
|
|
337
|
+
if __name__ == "__main__":
|
|
338
|
+
test_ai = VercelAIGateway(use_proxy=True, max_proxy_attempts=3, proxy_cache_duration=300)
|
|
339
|
+
print(test_ai.chat("Hello, how are you?"))
|
webscout/Provider/Venice.py
CHANGED
|
@@ -17,6 +17,7 @@ class Venice(Provider):
|
|
|
17
17
|
A class to interact with the Venice AI API.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
+
required_auth = False
|
|
20
21
|
AVAILABLE_MODELS = [
|
|
21
22
|
"mistral-31-24b",
|
|
22
23
|
"dolphin-3.0-mistral-24b",
|
|
@@ -247,4 +248,4 @@ if __name__ == "__main__":
|
|
|
247
248
|
display_text = "Empty or invalid response"
|
|
248
249
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
249
250
|
except Exception as e:
|
|
250
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
251
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/VercelAI.py
CHANGED
webscout/Provider/WiseCat.py
CHANGED
|
@@ -17,6 +17,7 @@ class WiseCat(Provider):
|
|
|
17
17
|
A class to interact with the WiseCat API.
|
|
18
18
|
"""
|
|
19
19
|
|
|
20
|
+
required_auth = False
|
|
20
21
|
AVAILABLE_MODELS = [
|
|
21
22
|
"chat-model-small",
|
|
22
23
|
# "chat-model-large", # >>> NOT WORKING <<<
|
|
@@ -228,4 +229,4 @@ if __name__ == "__main__":
|
|
|
228
229
|
display_text = "Empty or invalid response"
|
|
229
230
|
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
230
231
|
except Exception as e:
|
|
231
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
|
232
|
+
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/WrDoChat.py
CHANGED
|
@@ -28,6 +28,7 @@ class WrDoChat(Provider):
|
|
|
28
28
|
>>> print(response)
|
|
29
29
|
"""
|
|
30
30
|
|
|
31
|
+
required_auth = True
|
|
31
32
|
AVAILABLE_MODELS = [
|
|
32
33
|
"deepseek-chat-v3-0324",
|
|
33
34
|
"deepseek-r1",
|
|
@@ -363,4 +364,4 @@ if __name__ == "__main__":
|
|
|
363
364
|
ai = WrDoChat(cookies_path="cookies.json")
|
|
364
365
|
response = ai.chat("write me a poem about AI", stream=True)
|
|
365
366
|
for chunk in response:
|
|
366
|
-
print(chunk, end="", flush=True)
|
|
367
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -1,174 +1,18 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
from .Llama3 import *
|
|
20
|
-
from .koala import *
|
|
21
|
-
from .meta import *
|
|
22
|
-
from .julius import *
|
|
23
|
-
from .yep import *
|
|
24
|
-
from .Cloudflare import *
|
|
25
|
-
from .turboseek import *
|
|
26
|
-
from .TeachAnything import *
|
|
27
|
-
from .x0gpt import *
|
|
28
|
-
from .cerebras import *
|
|
29
|
-
from .geminiapi import *
|
|
30
|
-
from .elmo import *
|
|
31
|
-
from .Netwrck import Netwrck
|
|
32
|
-
from .llmchat import *
|
|
33
|
-
from .llmchatco import LLMChatCo # Add new LLMChat.co provider
|
|
34
|
-
from .talkai import *
|
|
35
|
-
from .llama3mitril import *
|
|
36
|
-
from .Marcus import *
|
|
37
|
-
from .multichat import *
|
|
38
|
-
from .Jadve import *
|
|
39
|
-
from .chatglm import *
|
|
40
|
-
from .hermes import *
|
|
41
|
-
from .TextPollinationsAI import *
|
|
42
|
-
from .Glider import *
|
|
43
|
-
from .QwenLM import *
|
|
44
|
-
from .granite import *
|
|
45
|
-
from .WiseCat import *
|
|
46
|
-
from .freeaichat import FreeAIChat
|
|
47
|
-
from .akashgpt import *
|
|
48
|
-
from .Perplexitylabs import *
|
|
49
|
-
from .AllenAI import *
|
|
50
|
-
from .HeckAI import *
|
|
51
|
-
from .TwoAI import *
|
|
52
|
-
from .Venice import *
|
|
53
|
-
from .GithubChat import *
|
|
54
|
-
from .copilot import *
|
|
55
|
-
from .sonus import *
|
|
56
|
-
from .LambdaChat import *
|
|
57
|
-
from .ChatGPTClone import *
|
|
58
|
-
from .VercelAI import *
|
|
59
|
-
from .ExaChat import *
|
|
60
|
-
from .asksteve import *
|
|
61
|
-
from .Aitopia import *
|
|
62
|
-
from .searchchat import *
|
|
63
|
-
from .ExaAI import ExaAI
|
|
64
|
-
from .OpenGPT import OpenGPT
|
|
65
|
-
from .scira_chat import *
|
|
66
|
-
from .StandardInput import *
|
|
67
|
-
from .toolbaz import Toolbaz
|
|
68
|
-
from .scnet import SCNet
|
|
69
|
-
from .MCPCore import MCPCore
|
|
70
|
-
from .TypliAI import TypliAI
|
|
71
|
-
from .ChatSandbox import ChatSandbox
|
|
72
|
-
from .GizAI import GizAI
|
|
73
|
-
from .WrDoChat import WrDoChat
|
|
74
|
-
from .Nemotron import NEMOTRON
|
|
75
|
-
from .FreeGemini import FreeGemini
|
|
76
|
-
from .Flowith import Flowith
|
|
77
|
-
from .samurai import samurai
|
|
78
|
-
from .lmarena import lmarena
|
|
79
|
-
from .oivscode import oivscode
|
|
80
|
-
from .XenAI import XenAI
|
|
81
|
-
from .deepseek_assistant import DeepSeekAssistant
|
|
82
|
-
from .GeminiProxy import GeminiProxy
|
|
83
|
-
from .TogetherAI import TogetherAI
|
|
84
|
-
from .MiniMax import MiniMax
|
|
85
|
-
from .Qodo import *
|
|
86
|
-
from .monochat import MonoChat
|
|
87
|
-
__all__ = [
|
|
88
|
-
'SCNet',
|
|
89
|
-
'MonoChat',
|
|
90
|
-
'MiniMax',
|
|
91
|
-
'QodoAI',
|
|
92
|
-
'GeminiProxy',
|
|
93
|
-
'TogetherAI',
|
|
94
|
-
'oivscode',
|
|
95
|
-
'DeepSeekAssistant',
|
|
96
|
-
'lmarena',
|
|
97
|
-
'XenAI',
|
|
98
|
-
'NEMOTRON',
|
|
99
|
-
'Flowith',
|
|
100
|
-
'samurai',
|
|
101
|
-
'FreeGemini',
|
|
102
|
-
'WrDoChat',
|
|
103
|
-
'GizAI',
|
|
104
|
-
'ChatSandbox',
|
|
105
|
-
'SciraAI',
|
|
106
|
-
'StandardInputAI',
|
|
107
|
-
'OpenGPT',
|
|
108
|
-
'Venice',
|
|
109
|
-
'ExaAI',
|
|
110
|
-
'Copilot',
|
|
111
|
-
'TwoAI',
|
|
112
|
-
'HeckAI',
|
|
113
|
-
'AllenAI',
|
|
114
|
-
'PerplexityLabs',
|
|
115
|
-
'AkashGPT',
|
|
116
|
-
'WiseCat',
|
|
117
|
-
'IBMGranite',
|
|
118
|
-
'QwenLM',
|
|
119
|
-
'LambdaChat',
|
|
120
|
-
'TextPollinationsAI',
|
|
121
|
-
'GliderAI',
|
|
122
|
-
'Cohere',
|
|
123
|
-
'REKA',
|
|
124
|
-
'GROQ',
|
|
125
|
-
'AsyncGROQ',
|
|
126
|
-
'OPENAI',
|
|
127
|
-
'AsyncOPENAI',
|
|
128
|
-
'KOBOLDAI',
|
|
129
|
-
'AsyncKOBOLDAI',
|
|
130
|
-
'BLACKBOXAI',
|
|
131
|
-
'GEMINI',
|
|
132
|
-
'DeepInfra',
|
|
133
|
-
'AI4Chat',
|
|
134
|
-
'OLLAMA',
|
|
135
|
-
'AndiSearch',
|
|
136
|
-
'Sambanova',
|
|
137
|
-
'KOALA',
|
|
138
|
-
'Meta',
|
|
139
|
-
'PiAI',
|
|
140
|
-
'Julius',
|
|
141
|
-
'YEPCHAT',
|
|
142
|
-
'Cloudflare',
|
|
143
|
-
'TurboSeek',
|
|
144
|
-
'TeachAnything',
|
|
145
|
-
'X0GPT',
|
|
146
|
-
'Cerebras',
|
|
147
|
-
'GEMINIAPI',
|
|
148
|
-
'SonusAI',
|
|
149
|
-
'Cleeai',
|
|
150
|
-
'Elmo',
|
|
151
|
-
'ChatGPTClone',
|
|
152
|
-
'TypefullyAI',
|
|
153
|
-
'Netwrck',
|
|
154
|
-
'LLMChat',
|
|
155
|
-
'LLMChatCo',
|
|
156
|
-
'Talkai',
|
|
157
|
-
'Llama3Mitril',
|
|
158
|
-
'Marcus',
|
|
159
|
-
'Netwrck',
|
|
160
|
-
'MultiChatAI',
|
|
161
|
-
'JadveOpenAI',
|
|
162
|
-
'ChatGLM',
|
|
163
|
-
'NousHermes',
|
|
164
|
-
'FreeAIChat',
|
|
165
|
-
'GithubChat',
|
|
166
|
-
'VercelAI',
|
|
167
|
-
'ExaChat',
|
|
168
|
-
'AskSteve',
|
|
169
|
-
'Aitopia',
|
|
170
|
-
'SearchChatAI',
|
|
171
|
-
'Toolbaz',
|
|
172
|
-
'MCPCore',
|
|
173
|
-
'TypliAI',
|
|
174
|
-
]
|
|
1
|
+
# This file marks the directory as a Python package.
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import importlib
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# Get current directory
|
|
8
|
+
current_dir = Path(__file__).parent
|
|
9
|
+
|
|
10
|
+
# Auto-import all .py files (except __init__.py)
|
|
11
|
+
for file_path in current_dir.glob("*.py"):
|
|
12
|
+
if file_path.name != "__init__.py":
|
|
13
|
+
module_name = file_path.stem
|
|
14
|
+
try:
|
|
15
|
+
module = importlib.import_module(f".{module_name}", package=__name__)
|
|
16
|
+
globals().update(vars(module))
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass # Skip files that can't be imported
|
webscout/Provider/ai4chat.py
CHANGED
webscout/Provider/akashgpt.py
CHANGED
|
@@ -27,17 +27,14 @@ class AkashGPT(Provider):
|
|
|
27
27
|
>>> print(response)
|
|
28
28
|
'The weather today depends on your location. I don't have access to real-time weather data.'
|
|
29
29
|
"""
|
|
30
|
-
|
|
30
|
+
required_auth = True
|
|
31
31
|
AVAILABLE_MODELS = [
|
|
32
|
-
"Qwen3-
|
|
33
|
-
"
|
|
34
|
-
"
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"
|
|
38
|
-
"AkashGen"
|
|
39
|
-
|
|
40
|
-
|
|
32
|
+
"Qwen3-Next-80B-A3B-Instruct",
|
|
33
|
+
"DeepSeek-V3.1",
|
|
34
|
+
"openai-gpt-oss-120b",
|
|
35
|
+
"Qwen3-235B-A22B-Instruct-2507-FP8"
|
|
36
|
+
"meta-llama-Llama-4-Maverick-17B-128E-Instruct-FP8"
|
|
37
|
+
"Meta-Llama-3-3-70B-Instruct"
|
|
41
38
|
]
|
|
42
39
|
|
|
43
40
|
def __init__(
|