webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +52 -1016
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
- webscout/Provider/AISEARCH/__init__.py +11 -10
- webscout/Provider/AISEARCH/felo_search.py +7 -3
- webscout/Provider/AISEARCH/scira_search.py +2 -0
- webscout/Provider/AISEARCH/stellar_search.py +53 -8
- webscout/Provider/Deepinfra.py +13 -1
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +12 -8
- webscout/Provider/OPENAI/TwoAI.py +94 -1
- webscout/Provider/OPENAI/__init__.py +4 -4
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +12 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/scirachat.py +4 -0
- webscout/Provider/OPENAI/textpollinations.py +11 -10
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/OPENAI/venice.py +1 -0
- webscout/Provider/Perplexitylabs.py +163 -147
- webscout/Provider/Qodo.py +30 -6
- webscout/Provider/TTI/__init__.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +11 -9
- webscout/Provider/TTI/venice.py +368 -0
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +11 -10
- webscout/Provider/TogetherAI.py +12 -4
- webscout/Provider/TwoAI.py +96 -2
- webscout/Provider/TypliAI.py +33 -27
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
- webscout/Provider/Venice.py +1 -0
- webscout/Provider/WiseCat.py +18 -20
- webscout/Provider/__init__.py +2 -96
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/scira_chat.py +4 -0
- webscout/Provider/toolbaz.py +6 -10
- webscout/Provider/typefully.py +1 -11
- webscout/__init__.py +3 -15
- webscout/auth/__init__.py +19 -4
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/auth_system.py +25 -40
- webscout/auth/config.py +105 -6
- webscout/auth/database.py +377 -22
- webscout/auth/models.py +185 -130
- webscout/auth/request_processing.py +175 -11
- webscout/auth/routes.py +99 -2
- webscout/auth/server.py +9 -2
- webscout/auth/simple_logger.py +236 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +1078 -0
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/OPENAI/typegpt.py +0 -368
- webscout/Provider/OPENAI/uncovrAI.py +0 -477
- webscout/Provider/TTS/sthir.py +0 -94
- webscout/Provider/WritingMate.py +0 -273
- webscout/Provider/typegpt.py +0 -284
- webscout/Provider/uncovr.py +0 -333
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,445 @@
|
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
import json
|
|
4
|
+
import random
|
|
5
|
+
from typing import Any, Dict, Optional, Generator, Union, List
|
|
6
|
+
import uuid
|
|
7
|
+
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import Conversation
|
|
10
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
+
from webscout.AIbase import Provider
|
|
12
|
+
from webscout import exceptions
|
|
13
|
+
from webscout.litagent import LitAgent
|
|
14
|
+
|
|
15
|
+
class Kimi(Provider):
|
|
16
|
+
"""
|
|
17
|
+
A class to interact with the Kimi API (kimi.com).
|
|
18
|
+
|
|
19
|
+
This provider uses the Kimi web interface API endpoints to provide
|
|
20
|
+
access to Kimi's AI models.
|
|
21
|
+
|
|
22
|
+
Examples:
|
|
23
|
+
>>> from webscout.Provider.Kimi import Kimi
|
|
24
|
+
>>> ai = Kimi()
|
|
25
|
+
>>> response = ai.chat("What's the weather today?")
|
|
26
|
+
>>> print(response)
|
|
27
|
+
'The weather today is sunny...'
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
AVAILABLE_MODELS = ["k1.5", "k2", "k1.5-thinking"]
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
is_conversation: bool = True,
|
|
35
|
+
max_tokens: int = 4000,
|
|
36
|
+
timeout: int = 30,
|
|
37
|
+
intro: str = None,
|
|
38
|
+
filepath: str = None,
|
|
39
|
+
update_file: bool = True,
|
|
40
|
+
proxies: dict = {},
|
|
41
|
+
history_offset: int = 10250,
|
|
42
|
+
act: str = None,
|
|
43
|
+
model: str = "k2",
|
|
44
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
45
|
+
browser: str = "chrome",
|
|
46
|
+
web_search: bool = False,
|
|
47
|
+
):
|
|
48
|
+
"""
|
|
49
|
+
Initializes the Kimi API client with given parameters.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
is_conversation: Whether to maintain conversation history
|
|
53
|
+
max_tokens: Maximum tokens for response
|
|
54
|
+
timeout: Request timeout in seconds
|
|
55
|
+
intro: Introduction message
|
|
56
|
+
filepath: Path to conversation history file
|
|
57
|
+
update_file: Whether to update conversation file
|
|
58
|
+
proxies: Proxy configuration
|
|
59
|
+
history_offset: History offset for conversation
|
|
60
|
+
act: Act/persona for the assistant
|
|
61
|
+
model: Model to use (k1.5, k2, kimi, kimi-plus)
|
|
62
|
+
system_prompt: System prompt for the assistant
|
|
63
|
+
browser: Browser to impersonate
|
|
64
|
+
web_search: Whether to enable web search
|
|
65
|
+
"""
|
|
66
|
+
if model not in self.AVAILABLE_MODELS:
|
|
67
|
+
raise ValueError(
|
|
68
|
+
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
self.session = Session()
|
|
72
|
+
self.is_conversation = is_conversation
|
|
73
|
+
self.max_tokens_to_sample = max_tokens
|
|
74
|
+
self.timeout = timeout
|
|
75
|
+
self.last_response = {}
|
|
76
|
+
self.model = model
|
|
77
|
+
self.system_prompt = system_prompt
|
|
78
|
+
self.web_search = web_search
|
|
79
|
+
|
|
80
|
+
# Kimi API endpoints
|
|
81
|
+
self.register_endpoint = "https://www.kimi.com/api/device/register"
|
|
82
|
+
self.chat_create_endpoint = "https://www.kimi.com/api/chat"
|
|
83
|
+
self.chat_completion_endpoint = "https://www.kimi.com/api/chat/{chat_id}/completion/stream"
|
|
84
|
+
|
|
85
|
+
# Initialize LitAgent for browser fingerprinting
|
|
86
|
+
self.agent = LitAgent()
|
|
87
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
88
|
+
|
|
89
|
+
# Generate device ID
|
|
90
|
+
self.device_id = str(random.randint(1000000000000000, 9999999999999999))
|
|
91
|
+
|
|
92
|
+
# Headers for Kimi API
|
|
93
|
+
self.headers = {
|
|
94
|
+
"Accept": "text/event-stream",
|
|
95
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
96
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
97
|
+
"Cache-Control": "no-cache",
|
|
98
|
+
"Connection": "keep-alive",
|
|
99
|
+
"Content-Type": "application/json",
|
|
100
|
+
"DNT": "1",
|
|
101
|
+
"Origin": "https://www.kimi.com",
|
|
102
|
+
"Pragma": "no-cache",
|
|
103
|
+
"Referer": "https://www.kimi.com/",
|
|
104
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"],
|
|
105
|
+
"Sec-CH-UA-Mobile": "?0",
|
|
106
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
107
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
108
|
+
"x-msh-device-id": self.device_id,
|
|
109
|
+
"x-msh-platform": "web",
|
|
110
|
+
"x-traffic-id": self.device_id,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
# Initialize authentication
|
|
114
|
+
self.access_token = None
|
|
115
|
+
self.chat_id = None
|
|
116
|
+
|
|
117
|
+
self.__available_optimizers = (
|
|
118
|
+
method
|
|
119
|
+
for method in dir(Optimizers)
|
|
120
|
+
if callable(getattr(Optimizers, method))
|
|
121
|
+
and not method.startswith("__")
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
Conversation.intro = (
|
|
125
|
+
AwesomePrompts().get_act(
|
|
126
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
127
|
+
)
|
|
128
|
+
if act
|
|
129
|
+
else intro or Conversation.intro
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
self.conversation = Conversation(
|
|
133
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
134
|
+
)
|
|
135
|
+
self.conversation.history_offset = history_offset
|
|
136
|
+
|
|
137
|
+
# Update session headers and proxies
|
|
138
|
+
self.session.headers.update(self.headers)
|
|
139
|
+
self.session.proxies = proxies
|
|
140
|
+
|
|
141
|
+
def _authenticate(self) -> str:
|
|
142
|
+
"""Authenticate with Kimi API and get access token."""
|
|
143
|
+
if self.access_token:
|
|
144
|
+
return self.access_token
|
|
145
|
+
|
|
146
|
+
max_retries = 3
|
|
147
|
+
last_exception = None
|
|
148
|
+
|
|
149
|
+
for attempt in range(max_retries):
|
|
150
|
+
try:
|
|
151
|
+
response = self.session.post(
|
|
152
|
+
self.register_endpoint,
|
|
153
|
+
json={},
|
|
154
|
+
timeout=self.timeout,
|
|
155
|
+
impersonate="chrome110"
|
|
156
|
+
)
|
|
157
|
+
response.raise_for_status()
|
|
158
|
+
|
|
159
|
+
data = response.json()
|
|
160
|
+
if not data.get("access_token"):
|
|
161
|
+
raise exceptions.FailedToGenerateResponseError("No access token received")
|
|
162
|
+
|
|
163
|
+
self.access_token = data["access_token"]
|
|
164
|
+
self.session.headers["Authorization"] = f"Bearer {self.access_token}"
|
|
165
|
+
return self.access_token
|
|
166
|
+
|
|
167
|
+
except CurlError as e:
|
|
168
|
+
last_exception = e
|
|
169
|
+
if attempt < max_retries - 1:
|
|
170
|
+
continue
|
|
171
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts (CurlError): {e}")
|
|
172
|
+
except Exception as e:
|
|
173
|
+
last_exception = e
|
|
174
|
+
if attempt < max_retries - 1:
|
|
175
|
+
continue
|
|
176
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {e}")
|
|
177
|
+
|
|
178
|
+
# This should never be reached, but just in case
|
|
179
|
+
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {last_exception}")
|
|
180
|
+
|
|
181
|
+
def _create_chat(self) -> str:
|
|
182
|
+
"""Create a new chat session and return chat ID."""
|
|
183
|
+
if self.chat_id:
|
|
184
|
+
return self.chat_id
|
|
185
|
+
|
|
186
|
+
self._authenticate()
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
response = self.session.post(
|
|
190
|
+
self.chat_create_endpoint,
|
|
191
|
+
json={
|
|
192
|
+
"name": "Unnamed Chat",
|
|
193
|
+
"born_from": "home",
|
|
194
|
+
"kimiplus_id": "kimi",
|
|
195
|
+
"is_example": False,
|
|
196
|
+
"source": "web",
|
|
197
|
+
"tags": []
|
|
198
|
+
},
|
|
199
|
+
timeout=self.timeout,
|
|
200
|
+
impersonate="chrome110"
|
|
201
|
+
)
|
|
202
|
+
response.raise_for_status()
|
|
203
|
+
|
|
204
|
+
data = response.json()
|
|
205
|
+
self.chat_id = data.get("id")
|
|
206
|
+
if not self.chat_id:
|
|
207
|
+
raise exceptions.FailedToGenerateResponseError("No chat ID received")
|
|
208
|
+
|
|
209
|
+
return self.chat_id
|
|
210
|
+
|
|
211
|
+
except CurlError as e:
|
|
212
|
+
raise exceptions.FailedToGenerateResponseError(f"Chat creation failed (CurlError): {e}")
|
|
213
|
+
except Exception as e:
|
|
214
|
+
raise exceptions.FailedToGenerateResponseError(f"Chat creation failed: {e}")
|
|
215
|
+
|
|
216
|
+
@staticmethod
|
|
217
|
+
def _kimi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
218
|
+
"""Extract content from Kimi SSE stream."""
|
|
219
|
+
if isinstance(chunk, dict):
|
|
220
|
+
if chunk.get("event") == "cmpl":
|
|
221
|
+
return chunk.get("text")
|
|
222
|
+
return None
|
|
223
|
+
|
|
224
|
+
def ask(
|
|
225
|
+
self,
|
|
226
|
+
prompt: str,
|
|
227
|
+
stream: bool = False,
|
|
228
|
+
raw: bool = False,
|
|
229
|
+
optimizer: str = None,
|
|
230
|
+
conversationally: bool = False,
|
|
231
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
232
|
+
"""
|
|
233
|
+
Send a prompt to Kimi API and return the response.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
prompt: The prompt to send
|
|
237
|
+
stream: Whether to stream the response
|
|
238
|
+
raw: Whether to return raw response
|
|
239
|
+
optimizer: Optimizer to use
|
|
240
|
+
conversationally: Whether to generate conversationally
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Dict or Generator with the response
|
|
244
|
+
"""
|
|
245
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
246
|
+
if optimizer:
|
|
247
|
+
if optimizer in self.__available_optimizers:
|
|
248
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
249
|
+
conversation_prompt if conversationally else prompt
|
|
250
|
+
)
|
|
251
|
+
else:
|
|
252
|
+
raise Exception(
|
|
253
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
self._create_chat()
|
|
257
|
+
|
|
258
|
+
# Fixed payload structure based on actual Kimi API requirements
|
|
259
|
+
payload = {
|
|
260
|
+
"kimiplus_id": "kimi",
|
|
261
|
+
"extend": {"sidebar": True},
|
|
262
|
+
"model": self.model,
|
|
263
|
+
"use_search": self.web_search,
|
|
264
|
+
"messages": [
|
|
265
|
+
{
|
|
266
|
+
"role": "user",
|
|
267
|
+
"content": conversation_prompt
|
|
268
|
+
}
|
|
269
|
+
],
|
|
270
|
+
"refs": [],
|
|
271
|
+
"history": [],
|
|
272
|
+
"scene_labels": [],
|
|
273
|
+
"use_semantic_memory": False,
|
|
274
|
+
"use_deep_research": False
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
def for_stream():
|
|
278
|
+
try:
|
|
279
|
+
response = self.session.post(
|
|
280
|
+
self.chat_completion_endpoint.format(chat_id=self.chat_id),
|
|
281
|
+
json=payload,
|
|
282
|
+
stream=True,
|
|
283
|
+
timeout=self.timeout,
|
|
284
|
+
impersonate="chrome110"
|
|
285
|
+
)
|
|
286
|
+
response.raise_for_status()
|
|
287
|
+
|
|
288
|
+
streaming_text = ""
|
|
289
|
+
processed_stream = sanitize_stream(
|
|
290
|
+
data=response.iter_content(chunk_size=None),
|
|
291
|
+
intro_value="data:",
|
|
292
|
+
to_json=True,
|
|
293
|
+
skip_markers=["[DONE]"],
|
|
294
|
+
content_extractor=self._kimi_extractor,
|
|
295
|
+
yield_raw_on_error=False
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
for content_chunk in processed_stream:
|
|
299
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
300
|
+
streaming_text += content_chunk
|
|
301
|
+
resp = dict(text=content_chunk)
|
|
302
|
+
yield resp if not raw else content_chunk
|
|
303
|
+
|
|
304
|
+
self.last_response = {"text": streaming_text}
|
|
305
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
306
|
+
|
|
307
|
+
except CurlError as e:
|
|
308
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
309
|
+
except Exception as e:
|
|
310
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
311
|
+
|
|
312
|
+
def for_non_stream():
|
|
313
|
+
try:
|
|
314
|
+
response = self.session.post(
|
|
315
|
+
self.chat_completion_endpoint.format(chat_id=self.chat_id),
|
|
316
|
+
json=payload,
|
|
317
|
+
timeout=self.timeout,
|
|
318
|
+
impersonate="chrome110"
|
|
319
|
+
)
|
|
320
|
+
response.raise_for_status()
|
|
321
|
+
|
|
322
|
+
# Collect all streaming data
|
|
323
|
+
full_text = ""
|
|
324
|
+
processed_stream = sanitize_stream(
|
|
325
|
+
data=response.text,
|
|
326
|
+
to_json=True,
|
|
327
|
+
intro_value="data:",
|
|
328
|
+
skip_markers=["[DONE]"],
|
|
329
|
+
content_extractor=self._kimi_extractor,
|
|
330
|
+
yield_raw_on_error=False
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
for content_chunk in processed_stream:
|
|
334
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
335
|
+
full_text += content_chunk
|
|
336
|
+
|
|
337
|
+
self.last_response = {"text": full_text}
|
|
338
|
+
self.conversation.update_chat_history(prompt, full_text)
|
|
339
|
+
return self.last_response if not raw else full_text
|
|
340
|
+
|
|
341
|
+
except CurlError as e:
|
|
342
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
343
|
+
except Exception as e:
|
|
344
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
345
|
+
|
|
346
|
+
return for_stream() if stream else for_non_stream()
|
|
347
|
+
|
|
348
|
+
def chat(
|
|
349
|
+
self,
|
|
350
|
+
prompt: str,
|
|
351
|
+
stream: bool = False,
|
|
352
|
+
optimizer: str = None,
|
|
353
|
+
conversationally: bool = False,
|
|
354
|
+
raw: bool = False,
|
|
355
|
+
) -> Union[str, Generator[str, None, None]]:
|
|
356
|
+
"""
|
|
357
|
+
Chat with Kimi API.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
prompt: The prompt to send
|
|
361
|
+
stream: Whether to stream the response
|
|
362
|
+
optimizer: Optimizer to use
|
|
363
|
+
conversationally: Whether to generate conversationally
|
|
364
|
+
raw: Whether to return raw response
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
str or Generator with the response
|
|
368
|
+
"""
|
|
369
|
+
def for_stream():
|
|
370
|
+
for response in self.ask(
|
|
371
|
+
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
372
|
+
):
|
|
373
|
+
if raw:
|
|
374
|
+
yield response
|
|
375
|
+
else:
|
|
376
|
+
yield self.get_message(response)
|
|
377
|
+
|
|
378
|
+
def for_non_stream():
|
|
379
|
+
result = self.ask(
|
|
380
|
+
prompt,
|
|
381
|
+
False,
|
|
382
|
+
raw=raw,
|
|
383
|
+
optimizer=optimizer,
|
|
384
|
+
conversationally=conversationally,
|
|
385
|
+
)
|
|
386
|
+
if raw:
|
|
387
|
+
return result
|
|
388
|
+
else:
|
|
389
|
+
return self.get_message(result)
|
|
390
|
+
|
|
391
|
+
return for_stream() if stream else for_non_stream()
|
|
392
|
+
|
|
393
|
+
def get_message(self, response: dict) -> str:
|
|
394
|
+
"""Extract message from response."""
|
|
395
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
396
|
+
return response["text"]
|
|
397
|
+
|
|
398
|
+
def refresh_identity(self, browser: str = None):
|
|
399
|
+
"""
|
|
400
|
+
Refresh browser identity fingerprint.
|
|
401
|
+
|
|
402
|
+
Args:
|
|
403
|
+
browser: Specific browser to use
|
|
404
|
+
"""
|
|
405
|
+
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
406
|
+
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
407
|
+
|
|
408
|
+
self.headers.update({
|
|
409
|
+
"Accept-Language": self.fingerprint["accept_language"],
|
|
410
|
+
"Sec-CH-UA": self.fingerprint["sec_ch_ua"],
|
|
411
|
+
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
412
|
+
"User-Agent": self.fingerprint["user_agent"],
|
|
413
|
+
})
|
|
414
|
+
|
|
415
|
+
self.session.headers.update(self.headers)
|
|
416
|
+
|
|
417
|
+
# Generate new device ID
|
|
418
|
+
self.device_id = str(random.randint(1000000000000000, 9999999999999999))
|
|
419
|
+
self.session.headers.update({
|
|
420
|
+
"x-msh-device-id": self.device_id,
|
|
421
|
+
"x-traffic-id": self.device_id,
|
|
422
|
+
})
|
|
423
|
+
|
|
424
|
+
return self.fingerprint
|
|
425
|
+
|
|
426
|
+
if __name__ == "__main__":
|
|
427
|
+
# Test the Kimi provider
|
|
428
|
+
print("-" * 80)
|
|
429
|
+
print(f"{'Model':<20} {'Status':<10} {'Response'}")
|
|
430
|
+
print("-" * 80)
|
|
431
|
+
|
|
432
|
+
for model in Kimi.AVAILABLE_MODELS:
|
|
433
|
+
try:
|
|
434
|
+
ai = Kimi(model=model, timeout=30)
|
|
435
|
+
response = ai.chat("Say 'Hello' in one word")
|
|
436
|
+
|
|
437
|
+
if response and len(response.strip()) > 0:
|
|
438
|
+
status = "✓"
|
|
439
|
+
display_text = response.strip()[:50] + "..." if len(response.strip()) > 50 else response.strip()
|
|
440
|
+
else:
|
|
441
|
+
status = "✗"
|
|
442
|
+
display_text = "Empty or invalid response"
|
|
443
|
+
print(f"{model:<20} {status:<10} {display_text}")
|
|
444
|
+
except Exception as e:
|
|
445
|
+
print(f"{model:<20} {'✗':<10} {str(e)}")
|
webscout/Provider/Netwrck.py
CHANGED
|
@@ -18,16 +18,13 @@ class Netwrck(Provider):
|
|
|
18
18
|
"sao10k/l3-euryale-70b",
|
|
19
19
|
"deepseek/deepseek-chat",
|
|
20
20
|
"deepseek/deepseek-r1",
|
|
21
|
-
"anthropic/claude-sonnet-4-20250514",
|
|
22
|
-
"openai/gpt-4.1-mini",
|
|
23
21
|
"gryphe/mythomax-l2-13b",
|
|
24
|
-
"google/gemini-2.5-flash-preview-04-17",
|
|
25
22
|
"nvidia/llama-3.1-nemotron-70b-instruct",
|
|
26
23
|
]
|
|
27
24
|
|
|
28
25
|
def __init__(
|
|
29
26
|
self,
|
|
30
|
-
model: str = "
|
|
27
|
+
model: str = "deepseek/deepseek-r1",
|
|
31
28
|
is_conversation: bool = True,
|
|
32
29
|
max_tokens: int = 4096, # Note: max_tokens is not used by this API
|
|
33
30
|
timeout: int = 30,
|
|
@@ -155,10 +152,10 @@ class Netwrck(Provider):
|
|
|
155
152
|
self.last_response = {"text": buffer}
|
|
156
153
|
self.conversation.update_chat_history(payload["query"], buffer)
|
|
157
154
|
except CurlError as e:
|
|
158
|
-
raise exceptions.
|
|
155
|
+
raise exceptions.APIConnectionError(f"Network error (CurlError): {str(e)}") from e
|
|
159
156
|
except Exception as e:
|
|
160
157
|
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
161
|
-
raise exceptions.
|
|
158
|
+
raise exceptions.APIConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
|
|
162
159
|
|
|
163
160
|
def for_non_stream():
|
|
164
161
|
try:
|
|
@@ -199,10 +199,6 @@ class Chat(BaseChat):
|
|
|
199
199
|
self.completions = Completions(client)
|
|
200
200
|
|
|
201
201
|
|
|
202
|
-
class TogetherAI(OpenAICompatibleProvider):
|
|
203
|
-
"""
|
|
204
|
-
OpenAI-compatible client for TogetherAI API.
|
|
205
|
-
"""
|
|
206
202
|
class TogetherAI(OpenAICompatibleProvider):
|
|
207
203
|
"""
|
|
208
204
|
OpenAI-compatible client for TogetherAI API.
|
|
@@ -214,29 +210,35 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
214
210
|
"Qwen/Qwen2-VL-72B-Instruct",
|
|
215
211
|
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
216
212
|
"Qwen/Qwen2.5-7B-Instruct-Turbo",
|
|
213
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
217
214
|
"Qwen/Qwen2.5-VL-72B-Instruct",
|
|
215
|
+
"Qwen/Qwen3-235B-A22B-Instruct-2507-tput",
|
|
216
|
+
"Qwen/Qwen3-235B-A22B-Thinking-2507",
|
|
218
217
|
"Qwen/Qwen3-235B-A22B-fp8-tput",
|
|
218
|
+
"Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
|
219
219
|
"Salesforce/Llama-Rank-V1",
|
|
220
|
-
"
|
|
221
|
-
"arcee-ai/
|
|
220
|
+
"Virtue-AI/VirtueGuard-Text-Lite",
|
|
221
|
+
"arcee-ai/AFM-4.5B",
|
|
222
222
|
"arcee-ai/coder-large",
|
|
223
223
|
"arcee-ai/maestro-reasoning",
|
|
224
224
|
"arcee-ai/virtuoso-large",
|
|
225
|
-
"arcee-ai/virtuoso-medium-v2",
|
|
226
225
|
"arcee_ai/arcee-spotlight",
|
|
227
226
|
"blackbox/meta-llama-3-1-8b",
|
|
227
|
+
"deepcogito/cogito-v2-preview-deepseek-671b",
|
|
228
228
|
"deepseek-ai/DeepSeek-R1",
|
|
229
|
+
"deepseek-ai/DeepSeek-R1-0528-tput",
|
|
229
230
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
230
231
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
|
|
231
232
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
|
|
232
233
|
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
|
|
233
234
|
"deepseek-ai/DeepSeek-V3",
|
|
234
235
|
"google/gemma-2-27b-it",
|
|
236
|
+
"google/gemma-3n-E4B-it",
|
|
235
237
|
"lgai/exaone-3-5-32b-instruct",
|
|
236
238
|
"lgai/exaone-deep-32b",
|
|
237
239
|
"marin-community/marin-8b-instruct",
|
|
238
|
-
"meta-llama-llama-2-70b-hf",
|
|
239
240
|
"meta-llama/Llama-2-70b-hf",
|
|
241
|
+
"meta-llama/Llama-3-70b-chat-hf",
|
|
240
242
|
"meta-llama/Llama-3-8b-chat-hf",
|
|
241
243
|
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
|
242
244
|
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
|
|
@@ -256,11 +258,13 @@ class TogetherAI(OpenAICompatibleProvider):
|
|
|
256
258
|
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
257
259
|
"mistralai/Mistral-Small-24B-Instruct-2501",
|
|
258
260
|
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
261
|
+
"moonshotai/Kimi-K2-Instruct",
|
|
259
262
|
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
260
263
|
"perplexity-ai/r1-1776",
|
|
261
264
|
"scb10x/scb10x-llama3-1-typhoon2-70b-instruct",
|
|
262
265
|
"scb10x/scb10x-typhoon-2-1-gemma3-12b",
|
|
263
266
|
"togethercomputer/Refuel-Llm-V2-Small",
|
|
267
|
+
"zai-org/GLM-4.5-Air-FP8"
|
|
264
268
|
]
|
|
265
269
|
|
|
266
270
|
def __init__(self, browser: str = "chrome"):
|
|
@@ -5,6 +5,9 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
import re
|
|
7
7
|
import urllib.parse
|
|
8
|
+
import os
|
|
9
|
+
import pickle
|
|
10
|
+
import tempfile
|
|
8
11
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
12
|
|
|
10
13
|
from webscout.Extra.tempmail import get_random_email
|
|
@@ -208,6 +211,96 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
208
211
|
"""OpenAI-compatible client for the TwoAI API."""
|
|
209
212
|
|
|
210
213
|
AVAILABLE_MODELS = ["sutra-v2", "sutra-r0"]
|
|
214
|
+
|
|
215
|
+
# Class-level cache for API keys
|
|
216
|
+
_api_key_cache = None
|
|
217
|
+
_cache_file = os.path.join(tempfile.gettempdir(), "webscout_twoai_openai_cache.pkl")
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def _load_cached_api_key(cls) -> Optional[str]:
|
|
221
|
+
"""Load cached API key from file."""
|
|
222
|
+
try:
|
|
223
|
+
if os.path.exists(cls._cache_file):
|
|
224
|
+
with open(cls._cache_file, 'rb') as f:
|
|
225
|
+
cache_data = pickle.load(f)
|
|
226
|
+
# Check if cache is not too old (24 hours)
|
|
227
|
+
if time.time() - cache_data.get('timestamp', 0) < 86400:
|
|
228
|
+
return cache_data.get('api_key')
|
|
229
|
+
except Exception:
|
|
230
|
+
# If cache is corrupted or unreadable, ignore and regenerate
|
|
231
|
+
pass
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
@classmethod
|
|
235
|
+
def _save_cached_api_key(cls, api_key: str):
|
|
236
|
+
"""Save API key to cache file."""
|
|
237
|
+
try:
|
|
238
|
+
cache_data = {
|
|
239
|
+
'api_key': api_key,
|
|
240
|
+
'timestamp': time.time()
|
|
241
|
+
}
|
|
242
|
+
with open(cls._cache_file, 'wb') as f:
|
|
243
|
+
pickle.dump(cache_data, f)
|
|
244
|
+
except Exception:
|
|
245
|
+
# If caching fails, continue without caching
|
|
246
|
+
pass
|
|
247
|
+
|
|
248
|
+
@classmethod
|
|
249
|
+
def _validate_api_key(cls, api_key: str) -> bool:
|
|
250
|
+
"""Validate if an API key is still working."""
|
|
251
|
+
try:
|
|
252
|
+
session = Session()
|
|
253
|
+
headers = {
|
|
254
|
+
'User-Agent': LitAgent().random(),
|
|
255
|
+
'Accept': 'application/json',
|
|
256
|
+
'Content-Type': 'application/json',
|
|
257
|
+
'Authorization': f'Bearer {api_key}',
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
# Test with a simple request
|
|
261
|
+
test_payload = {
|
|
262
|
+
"messages": [{"role": "user", "content": "test"}],
|
|
263
|
+
"model": "sutra-v2",
|
|
264
|
+
"max_tokens": 1,
|
|
265
|
+
"stream": False
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
response = session.post(
|
|
269
|
+
"https://api.two.ai/v2/chat/completions",
|
|
270
|
+
headers=headers,
|
|
271
|
+
json=test_payload,
|
|
272
|
+
timeout=10,
|
|
273
|
+
impersonate="chrome120"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# If we get a 200 or 400 (bad request but auth worked), key is valid
|
|
277
|
+
# If we get 401/403, key is invalid
|
|
278
|
+
return response.status_code not in [401, 403]
|
|
279
|
+
except Exception:
|
|
280
|
+
# If validation fails, assume key is invalid
|
|
281
|
+
return False
|
|
282
|
+
|
|
283
|
+
@classmethod
|
|
284
|
+
def get_cached_api_key(cls) -> str:
|
|
285
|
+
"""Get a cached API key or generate a new one if needed."""
|
|
286
|
+
# First check class-level cache
|
|
287
|
+
if cls._api_key_cache:
|
|
288
|
+
if cls._validate_api_key(cls._api_key_cache):
|
|
289
|
+
return cls._api_key_cache
|
|
290
|
+
else:
|
|
291
|
+
cls._api_key_cache = None
|
|
292
|
+
|
|
293
|
+
# Then check file cache
|
|
294
|
+
cached_key = cls._load_cached_api_key()
|
|
295
|
+
if cached_key and cls._validate_api_key(cached_key):
|
|
296
|
+
cls._api_key_cache = cached_key
|
|
297
|
+
return cached_key
|
|
298
|
+
|
|
299
|
+
# Generate new key if no valid cached key
|
|
300
|
+
new_key = cls.generate_api_key()
|
|
301
|
+
cls._api_key_cache = new_key
|
|
302
|
+
cls._save_cached_api_key(new_key)
|
|
303
|
+
return new_key
|
|
211
304
|
|
|
212
305
|
@staticmethod
|
|
213
306
|
def generate_api_key() -> str:
|
|
@@ -302,7 +395,7 @@ class TwoAI(OpenAICompatibleProvider):
|
|
|
302
395
|
return api_key
|
|
303
396
|
|
|
304
397
|
def __init__(self, browser: str = "chrome"):
|
|
305
|
-
api_key = self.
|
|
398
|
+
api_key = self.get_cached_api_key()
|
|
306
399
|
self.timeout = 30
|
|
307
400
|
self.base_url = "https://api.two.ai/v2/chat/completions"
|
|
308
401
|
self.api_key = api_key
|