webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/Kimi.py
DELETED
|
@@ -1,445 +0,0 @@
|
|
|
1
|
-
from curl_cffi import CurlError
|
|
2
|
-
from curl_cffi.requests import Session
|
|
3
|
-
import json
|
|
4
|
-
import random
|
|
5
|
-
from typing import Any, Dict, Optional, Generator, Union, List
|
|
6
|
-
import uuid
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
from webscout.litagent import LitAgent
|
|
14
|
-
|
|
15
|
-
class Kimi(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the Kimi API (kimi.com).
|
|
18
|
-
|
|
19
|
-
This provider uses the Kimi web interface API endpoints to provide
|
|
20
|
-
access to Kimi's AI models.
|
|
21
|
-
|
|
22
|
-
Examples:
|
|
23
|
-
>>> from webscout.Provider.Kimi import Kimi
|
|
24
|
-
>>> ai = Kimi()
|
|
25
|
-
>>> response = ai.chat("What's the weather today?")
|
|
26
|
-
>>> print(response)
|
|
27
|
-
'The weather today is sunny...'
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
AVAILABLE_MODELS = ["k1.5", "k2", "k1.5-thinking"]
|
|
31
|
-
|
|
32
|
-
def __init__(
|
|
33
|
-
self,
|
|
34
|
-
is_conversation: bool = True,
|
|
35
|
-
max_tokens: int = 4000,
|
|
36
|
-
timeout: int = 30,
|
|
37
|
-
intro: str = None,
|
|
38
|
-
filepath: str = None,
|
|
39
|
-
update_file: bool = True,
|
|
40
|
-
proxies: dict = {},
|
|
41
|
-
history_offset: int = 10250,
|
|
42
|
-
act: str = None,
|
|
43
|
-
model: str = "k2",
|
|
44
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
45
|
-
browser: str = "chrome",
|
|
46
|
-
web_search: bool = False,
|
|
47
|
-
):
|
|
48
|
-
"""
|
|
49
|
-
Initializes the Kimi API client with given parameters.
|
|
50
|
-
|
|
51
|
-
Args:
|
|
52
|
-
is_conversation: Whether to maintain conversation history
|
|
53
|
-
max_tokens: Maximum tokens for response
|
|
54
|
-
timeout: Request timeout in seconds
|
|
55
|
-
intro: Introduction message
|
|
56
|
-
filepath: Path to conversation history file
|
|
57
|
-
update_file: Whether to update conversation file
|
|
58
|
-
proxies: Proxy configuration
|
|
59
|
-
history_offset: History offset for conversation
|
|
60
|
-
act: Act/persona for the assistant
|
|
61
|
-
model: Model to use (k1.5, k2, kimi, kimi-plus)
|
|
62
|
-
system_prompt: System prompt for the assistant
|
|
63
|
-
browser: Browser to impersonate
|
|
64
|
-
web_search: Whether to enable web search
|
|
65
|
-
"""
|
|
66
|
-
if model not in self.AVAILABLE_MODELS:
|
|
67
|
-
raise ValueError(
|
|
68
|
-
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
self.session = Session()
|
|
72
|
-
self.is_conversation = is_conversation
|
|
73
|
-
self.max_tokens_to_sample = max_tokens
|
|
74
|
-
self.timeout = timeout
|
|
75
|
-
self.last_response = {}
|
|
76
|
-
self.model = model
|
|
77
|
-
self.system_prompt = system_prompt
|
|
78
|
-
self.web_search = web_search
|
|
79
|
-
|
|
80
|
-
# Kimi API endpoints
|
|
81
|
-
self.register_endpoint = "https://www.kimi.com/api/device/register"
|
|
82
|
-
self.chat_create_endpoint = "https://www.kimi.com/api/chat"
|
|
83
|
-
self.chat_completion_endpoint = "https://www.kimi.com/api/chat/{chat_id}/completion/stream"
|
|
84
|
-
|
|
85
|
-
# Initialize LitAgent for browser fingerprinting
|
|
86
|
-
self.agent = LitAgent()
|
|
87
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
88
|
-
|
|
89
|
-
# Generate device ID
|
|
90
|
-
self.device_id = str(random.randint(1000000000000000, 9999999999999999))
|
|
91
|
-
|
|
92
|
-
# Headers for Kimi API
|
|
93
|
-
self.headers = {
|
|
94
|
-
"Accept": "text/event-stream",
|
|
95
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
96
|
-
"Accept-Encoding": "gzip, deflate, br",
|
|
97
|
-
"Cache-Control": "no-cache",
|
|
98
|
-
"Connection": "keep-alive",
|
|
99
|
-
"Content-Type": "application/json",
|
|
100
|
-
"DNT": "1",
|
|
101
|
-
"Origin": "https://www.kimi.com",
|
|
102
|
-
"Pragma": "no-cache",
|
|
103
|
-
"Referer": "https://www.kimi.com/",
|
|
104
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"],
|
|
105
|
-
"Sec-CH-UA-Mobile": "?0",
|
|
106
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
107
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
108
|
-
"x-msh-device-id": self.device_id,
|
|
109
|
-
"x-msh-platform": "web",
|
|
110
|
-
"x-traffic-id": self.device_id,
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
# Initialize authentication
|
|
114
|
-
self.access_token = None
|
|
115
|
-
self.chat_id = None
|
|
116
|
-
|
|
117
|
-
self.__available_optimizers = (
|
|
118
|
-
method
|
|
119
|
-
for method in dir(Optimizers)
|
|
120
|
-
if callable(getattr(Optimizers, method))
|
|
121
|
-
and not method.startswith("__")
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
Conversation.intro = (
|
|
125
|
-
AwesomePrompts().get_act(
|
|
126
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
127
|
-
)
|
|
128
|
-
if act
|
|
129
|
-
else intro or Conversation.intro
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
self.conversation = Conversation(
|
|
133
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
134
|
-
)
|
|
135
|
-
self.conversation.history_offset = history_offset
|
|
136
|
-
|
|
137
|
-
# Update session headers and proxies
|
|
138
|
-
self.session.headers.update(self.headers)
|
|
139
|
-
self.session.proxies = proxies
|
|
140
|
-
|
|
141
|
-
def _authenticate(self) -> str:
|
|
142
|
-
"""Authenticate with Kimi API and get access token."""
|
|
143
|
-
if self.access_token:
|
|
144
|
-
return self.access_token
|
|
145
|
-
|
|
146
|
-
max_retries = 3
|
|
147
|
-
last_exception = None
|
|
148
|
-
|
|
149
|
-
for attempt in range(max_retries):
|
|
150
|
-
try:
|
|
151
|
-
response = self.session.post(
|
|
152
|
-
self.register_endpoint,
|
|
153
|
-
json={},
|
|
154
|
-
timeout=self.timeout,
|
|
155
|
-
impersonate="chrome110"
|
|
156
|
-
)
|
|
157
|
-
response.raise_for_status()
|
|
158
|
-
|
|
159
|
-
data = response.json()
|
|
160
|
-
if not data.get("access_token"):
|
|
161
|
-
raise exceptions.FailedToGenerateResponseError("No access token received")
|
|
162
|
-
|
|
163
|
-
self.access_token = data["access_token"]
|
|
164
|
-
self.session.headers["Authorization"] = f"Bearer {self.access_token}"
|
|
165
|
-
return self.access_token
|
|
166
|
-
|
|
167
|
-
except CurlError as e:
|
|
168
|
-
last_exception = e
|
|
169
|
-
if attempt < max_retries - 1:
|
|
170
|
-
continue
|
|
171
|
-
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts (CurlError): {e}")
|
|
172
|
-
except Exception as e:
|
|
173
|
-
last_exception = e
|
|
174
|
-
if attempt < max_retries - 1:
|
|
175
|
-
continue
|
|
176
|
-
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {e}")
|
|
177
|
-
|
|
178
|
-
# This should never be reached, but just in case
|
|
179
|
-
raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {last_exception}")
|
|
180
|
-
|
|
181
|
-
def _create_chat(self) -> str:
|
|
182
|
-
"""Create a new chat session and return chat ID."""
|
|
183
|
-
if self.chat_id:
|
|
184
|
-
return self.chat_id
|
|
185
|
-
|
|
186
|
-
self._authenticate()
|
|
187
|
-
|
|
188
|
-
try:
|
|
189
|
-
response = self.session.post(
|
|
190
|
-
self.chat_create_endpoint,
|
|
191
|
-
json={
|
|
192
|
-
"name": "Unnamed Chat",
|
|
193
|
-
"born_from": "home",
|
|
194
|
-
"kimiplus_id": "kimi",
|
|
195
|
-
"is_example": False,
|
|
196
|
-
"source": "web",
|
|
197
|
-
"tags": []
|
|
198
|
-
},
|
|
199
|
-
timeout=self.timeout,
|
|
200
|
-
impersonate="chrome110"
|
|
201
|
-
)
|
|
202
|
-
response.raise_for_status()
|
|
203
|
-
|
|
204
|
-
data = response.json()
|
|
205
|
-
self.chat_id = data.get("id")
|
|
206
|
-
if not self.chat_id:
|
|
207
|
-
raise exceptions.FailedToGenerateResponseError("No chat ID received")
|
|
208
|
-
|
|
209
|
-
return self.chat_id
|
|
210
|
-
|
|
211
|
-
except CurlError as e:
|
|
212
|
-
raise exceptions.FailedToGenerateResponseError(f"Chat creation failed (CurlError): {e}")
|
|
213
|
-
except Exception as e:
|
|
214
|
-
raise exceptions.FailedToGenerateResponseError(f"Chat creation failed: {e}")
|
|
215
|
-
|
|
216
|
-
@staticmethod
|
|
217
|
-
def _kimi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
218
|
-
"""Extract content from Kimi SSE stream."""
|
|
219
|
-
if isinstance(chunk, dict):
|
|
220
|
-
if chunk.get("event") == "cmpl":
|
|
221
|
-
return chunk.get("text")
|
|
222
|
-
return None
|
|
223
|
-
|
|
224
|
-
def ask(
|
|
225
|
-
self,
|
|
226
|
-
prompt: str,
|
|
227
|
-
stream: bool = False,
|
|
228
|
-
raw: bool = False,
|
|
229
|
-
optimizer: str = None,
|
|
230
|
-
conversationally: bool = False,
|
|
231
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
232
|
-
"""
|
|
233
|
-
Send a prompt to Kimi API and return the response.
|
|
234
|
-
|
|
235
|
-
Args:
|
|
236
|
-
prompt: The prompt to send
|
|
237
|
-
stream: Whether to stream the response
|
|
238
|
-
raw: Whether to return raw response
|
|
239
|
-
optimizer: Optimizer to use
|
|
240
|
-
conversationally: Whether to generate conversationally
|
|
241
|
-
|
|
242
|
-
Returns:
|
|
243
|
-
Dict or Generator with the response
|
|
244
|
-
"""
|
|
245
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
246
|
-
if optimizer:
|
|
247
|
-
if optimizer in self.__available_optimizers:
|
|
248
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
249
|
-
conversation_prompt if conversationally else prompt
|
|
250
|
-
)
|
|
251
|
-
else:
|
|
252
|
-
raise Exception(
|
|
253
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
self._create_chat()
|
|
257
|
-
|
|
258
|
-
# Fixed payload structure based on actual Kimi API requirements
|
|
259
|
-
payload = {
|
|
260
|
-
"kimiplus_id": "kimi",
|
|
261
|
-
"extend": {"sidebar": True},
|
|
262
|
-
"model": self.model,
|
|
263
|
-
"use_search": self.web_search,
|
|
264
|
-
"messages": [
|
|
265
|
-
{
|
|
266
|
-
"role": "user",
|
|
267
|
-
"content": conversation_prompt
|
|
268
|
-
}
|
|
269
|
-
],
|
|
270
|
-
"refs": [],
|
|
271
|
-
"history": [],
|
|
272
|
-
"scene_labels": [],
|
|
273
|
-
"use_semantic_memory": False,
|
|
274
|
-
"use_deep_research": False
|
|
275
|
-
}
|
|
276
|
-
|
|
277
|
-
def for_stream():
|
|
278
|
-
try:
|
|
279
|
-
response = self.session.post(
|
|
280
|
-
self.chat_completion_endpoint.format(chat_id=self.chat_id),
|
|
281
|
-
json=payload,
|
|
282
|
-
stream=True,
|
|
283
|
-
timeout=self.timeout,
|
|
284
|
-
impersonate="chrome110"
|
|
285
|
-
)
|
|
286
|
-
response.raise_for_status()
|
|
287
|
-
|
|
288
|
-
streaming_text = ""
|
|
289
|
-
processed_stream = sanitize_stream(
|
|
290
|
-
data=response.iter_content(chunk_size=None),
|
|
291
|
-
intro_value="data:",
|
|
292
|
-
to_json=True,
|
|
293
|
-
skip_markers=["[DONE]"],
|
|
294
|
-
content_extractor=self._kimi_extractor,
|
|
295
|
-
yield_raw_on_error=False
|
|
296
|
-
)
|
|
297
|
-
|
|
298
|
-
for content_chunk in processed_stream:
|
|
299
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
300
|
-
streaming_text += content_chunk
|
|
301
|
-
resp = dict(text=content_chunk)
|
|
302
|
-
yield resp if not raw else content_chunk
|
|
303
|
-
|
|
304
|
-
self.last_response = {"text": streaming_text}
|
|
305
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
306
|
-
|
|
307
|
-
except CurlError as e:
|
|
308
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
309
|
-
except Exception as e:
|
|
310
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
311
|
-
|
|
312
|
-
def for_non_stream():
|
|
313
|
-
try:
|
|
314
|
-
response = self.session.post(
|
|
315
|
-
self.chat_completion_endpoint.format(chat_id=self.chat_id),
|
|
316
|
-
json=payload,
|
|
317
|
-
timeout=self.timeout,
|
|
318
|
-
impersonate="chrome110"
|
|
319
|
-
)
|
|
320
|
-
response.raise_for_status()
|
|
321
|
-
|
|
322
|
-
# Collect all streaming data
|
|
323
|
-
full_text = ""
|
|
324
|
-
processed_stream = sanitize_stream(
|
|
325
|
-
data=response.text,
|
|
326
|
-
to_json=True,
|
|
327
|
-
intro_value="data:",
|
|
328
|
-
skip_markers=["[DONE]"],
|
|
329
|
-
content_extractor=self._kimi_extractor,
|
|
330
|
-
yield_raw_on_error=False
|
|
331
|
-
)
|
|
332
|
-
|
|
333
|
-
for content_chunk in processed_stream:
|
|
334
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
335
|
-
full_text += content_chunk
|
|
336
|
-
|
|
337
|
-
self.last_response = {"text": full_text}
|
|
338
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
339
|
-
return self.last_response if not raw else full_text
|
|
340
|
-
|
|
341
|
-
except CurlError as e:
|
|
342
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
|
|
343
|
-
except Exception as e:
|
|
344
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
345
|
-
|
|
346
|
-
return for_stream() if stream else for_non_stream()
|
|
347
|
-
|
|
348
|
-
def chat(
|
|
349
|
-
self,
|
|
350
|
-
prompt: str,
|
|
351
|
-
stream: bool = False,
|
|
352
|
-
optimizer: str = None,
|
|
353
|
-
conversationally: bool = False,
|
|
354
|
-
raw: bool = False,
|
|
355
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
356
|
-
"""
|
|
357
|
-
Chat with Kimi API.
|
|
358
|
-
|
|
359
|
-
Args:
|
|
360
|
-
prompt: The prompt to send
|
|
361
|
-
stream: Whether to stream the response
|
|
362
|
-
optimizer: Optimizer to use
|
|
363
|
-
conversationally: Whether to generate conversationally
|
|
364
|
-
raw: Whether to return raw response
|
|
365
|
-
|
|
366
|
-
Returns:
|
|
367
|
-
str or Generator with the response
|
|
368
|
-
"""
|
|
369
|
-
def for_stream():
|
|
370
|
-
for response in self.ask(
|
|
371
|
-
prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
372
|
-
):
|
|
373
|
-
if raw:
|
|
374
|
-
yield response
|
|
375
|
-
else:
|
|
376
|
-
yield self.get_message(response)
|
|
377
|
-
|
|
378
|
-
def for_non_stream():
|
|
379
|
-
result = self.ask(
|
|
380
|
-
prompt,
|
|
381
|
-
False,
|
|
382
|
-
raw=raw,
|
|
383
|
-
optimizer=optimizer,
|
|
384
|
-
conversationally=conversationally,
|
|
385
|
-
)
|
|
386
|
-
if raw:
|
|
387
|
-
return result
|
|
388
|
-
else:
|
|
389
|
-
return self.get_message(result)
|
|
390
|
-
|
|
391
|
-
return for_stream() if stream else for_non_stream()
|
|
392
|
-
|
|
393
|
-
def get_message(self, response: dict) -> str:
|
|
394
|
-
"""Extract message from response."""
|
|
395
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
396
|
-
return response["text"]
|
|
397
|
-
|
|
398
|
-
def refresh_identity(self, browser: str = None):
|
|
399
|
-
"""
|
|
400
|
-
Refresh browser identity fingerprint.
|
|
401
|
-
|
|
402
|
-
Args:
|
|
403
|
-
browser: Specific browser to use
|
|
404
|
-
"""
|
|
405
|
-
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
406
|
-
self.fingerprint = self.agent.generate_fingerprint(browser)
|
|
407
|
-
|
|
408
|
-
self.headers.update({
|
|
409
|
-
"Accept-Language": self.fingerprint["accept_language"],
|
|
410
|
-
"Sec-CH-UA": self.fingerprint["sec_ch_ua"],
|
|
411
|
-
"Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
|
|
412
|
-
"User-Agent": self.fingerprint["user_agent"],
|
|
413
|
-
})
|
|
414
|
-
|
|
415
|
-
self.session.headers.update(self.headers)
|
|
416
|
-
|
|
417
|
-
# Generate new device ID
|
|
418
|
-
self.device_id = str(random.randint(1000000000000000, 9999999999999999))
|
|
419
|
-
self.session.headers.update({
|
|
420
|
-
"x-msh-device-id": self.device_id,
|
|
421
|
-
"x-traffic-id": self.device_id,
|
|
422
|
-
})
|
|
423
|
-
|
|
424
|
-
return self.fingerprint
|
|
425
|
-
|
|
426
|
-
if __name__ == "__main__":
|
|
427
|
-
# Test the Kimi provider
|
|
428
|
-
print("-" * 80)
|
|
429
|
-
print(f"{'Model':<20} {'Status':<10} {'Response'}")
|
|
430
|
-
print("-" * 80)
|
|
431
|
-
|
|
432
|
-
for model in Kimi.AVAILABLE_MODELS:
|
|
433
|
-
try:
|
|
434
|
-
ai = Kimi(model=model, timeout=30)
|
|
435
|
-
response = ai.chat("Say 'Hello' in one word")
|
|
436
|
-
|
|
437
|
-
if response and len(response.strip()) > 0:
|
|
438
|
-
status = "✓"
|
|
439
|
-
display_text = response.strip()[:50] + "..." if len(response.strip()) > 50 else response.strip()
|
|
440
|
-
else:
|
|
441
|
-
status = "✗"
|
|
442
|
-
display_text = "Empty or invalid response"
|
|
443
|
-
print(f"{model:<20} {status:<10} {display_text}")
|
|
444
|
-
except Exception as e:
|
|
445
|
-
print(f"{model:<20} {'✗':<10} {str(e)}")
|