webscout 8.3.5__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/Bard.py +12 -6
  2. webscout/DWEBS.py +66 -57
  3. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  4. webscout/Provider/AISEARCH/__init__.py +1 -1
  5. webscout/Provider/Deepinfra.py +6 -0
  6. webscout/Provider/Flowith.py +6 -1
  7. webscout/Provider/GithubChat.py +1 -0
  8. webscout/Provider/GptOss.py +207 -0
  9. webscout/Provider/Kimi.py +445 -0
  10. webscout/Provider/Netwrck.py +3 -6
  11. webscout/Provider/OPENAI/README.md +2 -1
  12. webscout/Provider/OPENAI/TogetherAI.py +50 -55
  13. webscout/Provider/OPENAI/__init__.py +4 -2
  14. webscout/Provider/OPENAI/copilot.py +20 -4
  15. webscout/Provider/OPENAI/deepinfra.py +6 -0
  16. webscout/Provider/OPENAI/e2b.py +60 -8
  17. webscout/Provider/OPENAI/flowith.py +4 -3
  18. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  19. webscout/Provider/OPENAI/gptoss.py +288 -0
  20. webscout/Provider/OPENAI/kimi.py +469 -0
  21. webscout/Provider/OPENAI/netwrck.py +8 -12
  22. webscout/Provider/OPENAI/refact.py +274 -0
  23. webscout/Provider/OPENAI/textpollinations.py +3 -6
  24. webscout/Provider/OPENAI/toolbaz.py +1 -0
  25. webscout/Provider/TTI/bing.py +14 -2
  26. webscout/Provider/TTI/together.py +10 -9
  27. webscout/Provider/TTS/README.md +0 -1
  28. webscout/Provider/TTS/__init__.py +0 -1
  29. webscout/Provider/TTS/base.py +479 -159
  30. webscout/Provider/TTS/deepgram.py +409 -156
  31. webscout/Provider/TTS/elevenlabs.py +425 -111
  32. webscout/Provider/TTS/freetts.py +317 -140
  33. webscout/Provider/TTS/gesserit.py +192 -128
  34. webscout/Provider/TTS/murfai.py +248 -113
  35. webscout/Provider/TTS/openai_fm.py +347 -129
  36. webscout/Provider/TTS/speechma.py +620 -586
  37. webscout/Provider/TextPollinationsAI.py +3 -6
  38. webscout/Provider/TogetherAI.py +50 -55
  39. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  40. webscout/Provider/__init__.py +2 -90
  41. webscout/Provider/cerebras.py +83 -33
  42. webscout/Provider/copilot.py +42 -23
  43. webscout/Provider/toolbaz.py +1 -0
  44. webscout/conversation.py +22 -20
  45. webscout/sanitize.py +14 -10
  46. webscout/scout/README.md +20 -23
  47. webscout/scout/core/crawler.py +125 -38
  48. webscout/scout/core/scout.py +26 -5
  49. webscout/version.py +1 -1
  50. webscout/webscout_search.py +13 -6
  51. webscout/webscout_search_async.py +10 -8
  52. webscout/yep_search.py +13 -5
  53. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/METADATA +2 -1
  54. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/RECORD +59 -56
  55. webscout/Provider/Glider.py +0 -225
  56. webscout/Provider/OPENAI/c4ai.py +0 -394
  57. webscout/Provider/OPENAI/glider.py +0 -330
  58. webscout/Provider/TTS/sthir.py +0 -94
  59. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  60. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  61. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,207 @@
1
+
2
+ import requests
3
+ from typing import Any, Dict, Generator, Optional, Union, List
4
+ from webscout.litagent import LitAgent
5
+ from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
6
+ from webscout.AIbase import Provider
7
+ from webscout import exceptions
8
+
9
+ class GptOss(Provider):
10
+ """
11
+ Provider for GPT-OSS API.
12
+ """
13
+ AVAILABLE_MODELS = ["gpt-oss-20b", "gpt-oss-120b"]
14
+
15
+ def __init__(
16
+ self,
17
+ model: str = "gpt-oss-120b",
18
+ is_conversation: bool = True,
19
+ max_tokens: int = 600,
20
+ timeout: int = 30,
21
+ intro: str = None,
22
+ filepath: str = None,
23
+ update_file: bool = True,
24
+ proxies: dict = {},
25
+ history_offset: int = 10250,
26
+ act: str = None,
27
+ system_prompt: str = "You are a helpful assistant.",
28
+ reasoning_effort: str = "high"
29
+ ):
30
+ self.api_endpoint = "https://api.gpt-oss.com/chatkit"
31
+ self.model = model if model in self.AVAILABLE_MODELS else self.AVAILABLE_MODELS[0]
32
+ self.is_conversation = is_conversation
33
+ self.max_tokens_to_sample = max_tokens
34
+ self.timeout = timeout
35
+ self.last_response = {}
36
+ self.system_prompt = system_prompt
37
+ self.reasoning_effort = reasoning_effort
38
+ self.agent = LitAgent()
39
+ self.proxies = proxies
40
+ self.__available_optimizers = (
41
+ method
42
+ for method in dir(Optimizers)
43
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
44
+ )
45
+ Conversation.intro = (
46
+ AwesomePrompts().get_act(
47
+ act, raise_not_found=True, default=None, case_insensitive=True
48
+ )
49
+ if act
50
+ else intro or Conversation.intro
51
+ )
52
+ self.conversation = Conversation(
53
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
54
+ )
55
+ self.conversation.history_offset = history_offset
56
+
57
+ def ask(
58
+ self,
59
+ prompt: str,
60
+ stream: bool = False,
61
+ raw: bool = False,
62
+ optimizer: str = None,
63
+ conversationally: bool = False,
64
+ ) -> Union[Dict[str, Any], Generator]:
65
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
66
+ if optimizer:
67
+ if optimizer in self.__available_optimizers:
68
+ conversation_prompt = getattr(Optimizers, optimizer)(
69
+ conversation_prompt if conversationally else prompt
70
+ )
71
+ else:
72
+ raise Exception(
73
+ f"Optimizer is not one of {self.__available_optimizers}"
74
+ )
75
+
76
+ data = {
77
+ "op": "threads.create",
78
+ "params": {
79
+ "input": {
80
+ "text": conversation_prompt,
81
+ "content": [{"type": "input_text", "text": conversation_prompt}],
82
+ "quoted_text": "",
83
+ "attachments": []
84
+ }
85
+ }
86
+ }
87
+ headers = self.agent.generate_fingerprint()
88
+ headers.update({
89
+ "accept": "text/event-stream",
90
+ "x-reasoning-effort": self.reasoning_effort,
91
+ "x-selected-model": self.model,
92
+ "x-show-reasoning": "true"
93
+ })
94
+ cookies = {}
95
+
96
+ def for_stream():
97
+ full_response_content = ""
98
+ try:
99
+ with requests.post(
100
+ self.api_endpoint,
101
+ headers=headers,
102
+ cookies=cookies,
103
+ json=data,
104
+ stream=True,
105
+ proxies=self.proxies if self.proxies else None,
106
+ timeout=self.timeout
107
+ ) as response:
108
+ response.raise_for_status()
109
+ for chunk in sanitize_stream(
110
+ response.iter_lines(),
111
+ intro_value="data: ",
112
+ to_json=True,
113
+ skip_markers=["[DONE]"],
114
+ strip_chars=None,
115
+ content_extractor=lambda d: d.get('update', {}).get('delta') if d.get('type') == 'thread.item_updated' and d.get('update', {}).get('type') == 'assistant_message.content_part.text_delta' else None,
116
+ yield_raw_on_error=False,
117
+ encoding="utf-8",
118
+ raw=raw
119
+ ):
120
+ if chunk:
121
+ yield chunk
122
+ full_response_content += chunk
123
+ self.last_response.update(dict(text=full_response_content))
124
+ self.conversation.update_chat_history(
125
+ prompt, self.get_message(self.last_response)
126
+ )
127
+ except Exception as e:
128
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
129
+
130
+ def for_non_stream():
131
+ result = ""
132
+ try:
133
+ with requests.post(
134
+ self.api_endpoint,
135
+ headers=headers,
136
+ cookies=cookies,
137
+ json=data,
138
+ stream=False,
139
+ proxies=self.proxies if self.proxies else None,
140
+ timeout=self.timeout
141
+ ) as response:
142
+ response.raise_for_status()
143
+ # The API is event-stream only, so we simulate non-stream by joining all chunks
144
+ for chunk in sanitize_stream(
145
+ response.iter_lines(),
146
+ intro_value="data: ",
147
+ to_json=True,
148
+ skip_markers=["[DONE]"],
149
+ strip_chars=None,
150
+ content_extractor=lambda d: d.get('update', {}).get('delta') if d.get('type') == 'thread.item_updated' and d.get('update', {}).get('type') == 'assistant_message.content_part.text_delta' else None,
151
+ yield_raw_on_error=False,
152
+ encoding="utf-8",
153
+ raw=raw
154
+ ):
155
+ if chunk:
156
+ result += chunk
157
+ self.last_response.update(dict(text=result))
158
+ self.conversation.update_chat_history(
159
+ prompt, self.get_message(self.last_response)
160
+ )
161
+ return self.last_response
162
+ except Exception as e:
163
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
164
+
165
+ return for_stream() if stream else for_non_stream()
166
+
167
+ def chat(
168
+ self,
169
+ prompt: str,
170
+ stream: bool = False,
171
+ optimizer: str = None,
172
+ conversationally: bool = False,
173
+ raw: bool = False,
174
+ ) -> Union[str, Generator[str, None, None]]:
175
+ def for_stream():
176
+ for response in self.ask(
177
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
178
+ ):
179
+ yield response
180
+
181
+ def for_non_stream():
182
+ result = self.ask(
183
+ prompt,
184
+ False,
185
+ raw=raw,
186
+ optimizer=optimizer,
187
+ conversationally=conversationally,
188
+ )
189
+ return self.get_message(result)
190
+
191
+ return for_stream() if stream else for_non_stream()
192
+
193
+ def get_message(self, response: dict) -> str:
194
+ assert isinstance(response, dict), "Response should be of dict data-type only"
195
+ text = response.get("text", "")
196
+ return text
197
+
198
+ if __name__ == "__main__":
199
+ from webscout.AIutel import timeIt
200
+ from rich import print
201
+ ai = GptOss(timeout=30)
202
+ @timeIt
203
+ def get_response():
204
+ response = ai.chat("write a poem about AI", stream=True, raw=False)
205
+ for chunk in response:
206
+ print(chunk, end="", flush=True)
207
+ get_response()
@@ -0,0 +1,445 @@
1
+ from curl_cffi import CurlError
2
+ from curl_cffi.requests import Session
3
+ import json
4
+ import random
5
+ from typing import Any, Dict, Optional, Generator, Union, List
6
+ import uuid
7
+
8
+ from webscout.AIutel import Optimizers
9
+ from webscout.AIutel import Conversation
10
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
11
+ from webscout.AIbase import Provider
12
+ from webscout import exceptions
13
+ from webscout.litagent import LitAgent
14
+
15
+ class Kimi(Provider):
16
+ """
17
+ A class to interact with the Kimi API (kimi.com).
18
+
19
+ This provider uses the Kimi web interface API endpoints to provide
20
+ access to Kimi's AI models.
21
+
22
+ Examples:
23
+ >>> from webscout.Provider.Kimi import Kimi
24
+ >>> ai = Kimi()
25
+ >>> response = ai.chat("What's the weather today?")
26
+ >>> print(response)
27
+ 'The weather today is sunny...'
28
+ """
29
+
30
+ AVAILABLE_MODELS = ["k1.5", "k2", "k1.5-thinking"]
31
+
32
+ def __init__(
33
+ self,
34
+ is_conversation: bool = True,
35
+ max_tokens: int = 4000,
36
+ timeout: int = 30,
37
+ intro: str = None,
38
+ filepath: str = None,
39
+ update_file: bool = True,
40
+ proxies: dict = {},
41
+ history_offset: int = 10250,
42
+ act: str = None,
43
+ model: str = "k2",
44
+ system_prompt: str = "You are a helpful assistant.",
45
+ browser: str = "chrome",
46
+ web_search: bool = False,
47
+ ):
48
+ """
49
+ Initializes the Kimi API client with given parameters.
50
+
51
+ Args:
52
+ is_conversation: Whether to maintain conversation history
53
+ max_tokens: Maximum tokens for response
54
+ timeout: Request timeout in seconds
55
+ intro: Introduction message
56
+ filepath: Path to conversation history file
57
+ update_file: Whether to update conversation file
58
+ proxies: Proxy configuration
59
+ history_offset: History offset for conversation
60
+ act: Act/persona for the assistant
61
+ model: Model to use (k1.5, k2, kimi, kimi-plus)
62
+ system_prompt: System prompt for the assistant
63
+ browser: Browser to impersonate
64
+ web_search: Whether to enable web search
65
+ """
66
+ if model not in self.AVAILABLE_MODELS:
67
+ raise ValueError(
68
+ f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
69
+ )
70
+
71
+ self.session = Session()
72
+ self.is_conversation = is_conversation
73
+ self.max_tokens_to_sample = max_tokens
74
+ self.timeout = timeout
75
+ self.last_response = {}
76
+ self.model = model
77
+ self.system_prompt = system_prompt
78
+ self.web_search = web_search
79
+
80
+ # Kimi API endpoints
81
+ self.register_endpoint = "https://www.kimi.com/api/device/register"
82
+ self.chat_create_endpoint = "https://www.kimi.com/api/chat"
83
+ self.chat_completion_endpoint = "https://www.kimi.com/api/chat/{chat_id}/completion/stream"
84
+
85
+ # Initialize LitAgent for browser fingerprinting
86
+ self.agent = LitAgent()
87
+ self.fingerprint = self.agent.generate_fingerprint(browser)
88
+
89
+ # Generate device ID
90
+ self.device_id = str(random.randint(1000000000000000, 9999999999999999))
91
+
92
+ # Headers for Kimi API
93
+ self.headers = {
94
+ "Accept": "text/event-stream",
95
+ "Accept-Language": self.fingerprint["accept_language"],
96
+ "Accept-Encoding": "gzip, deflate, br",
97
+ "Cache-Control": "no-cache",
98
+ "Connection": "keep-alive",
99
+ "Content-Type": "application/json",
100
+ "DNT": "1",
101
+ "Origin": "https://www.kimi.com",
102
+ "Pragma": "no-cache",
103
+ "Referer": "https://www.kimi.com/",
104
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"],
105
+ "Sec-CH-UA-Mobile": "?0",
106
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
107
+ "User-Agent": self.fingerprint["user_agent"],
108
+ "x-msh-device-id": self.device_id,
109
+ "x-msh-platform": "web",
110
+ "x-traffic-id": self.device_id,
111
+ }
112
+
113
+ # Initialize authentication
114
+ self.access_token = None
115
+ self.chat_id = None
116
+
117
+ self.__available_optimizers = (
118
+ method
119
+ for method in dir(Optimizers)
120
+ if callable(getattr(Optimizers, method))
121
+ and not method.startswith("__")
122
+ )
123
+
124
+ Conversation.intro = (
125
+ AwesomePrompts().get_act(
126
+ act, raise_not_found=True, default=None, case_insensitive=True
127
+ )
128
+ if act
129
+ else intro or Conversation.intro
130
+ )
131
+
132
+ self.conversation = Conversation(
133
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
134
+ )
135
+ self.conversation.history_offset = history_offset
136
+
137
+ # Update session headers and proxies
138
+ self.session.headers.update(self.headers)
139
+ self.session.proxies = proxies
140
+
141
+ def _authenticate(self) -> str:
142
+ """Authenticate with Kimi API and get access token."""
143
+ if self.access_token:
144
+ return self.access_token
145
+
146
+ max_retries = 3
147
+ last_exception = None
148
+
149
+ for attempt in range(max_retries):
150
+ try:
151
+ response = self.session.post(
152
+ self.register_endpoint,
153
+ json={},
154
+ timeout=self.timeout,
155
+ impersonate="chrome110"
156
+ )
157
+ response.raise_for_status()
158
+
159
+ data = response.json()
160
+ if not data.get("access_token"):
161
+ raise exceptions.FailedToGenerateResponseError("No access token received")
162
+
163
+ self.access_token = data["access_token"]
164
+ self.session.headers["Authorization"] = f"Bearer {self.access_token}"
165
+ return self.access_token
166
+
167
+ except CurlError as e:
168
+ last_exception = e
169
+ if attempt < max_retries - 1:
170
+ continue
171
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts (CurlError): {e}")
172
+ except Exception as e:
173
+ last_exception = e
174
+ if attempt < max_retries - 1:
175
+ continue
176
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {e}")
177
+
178
+ # This should never be reached, but just in case
179
+ raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {last_exception}")
180
+
181
+ def _create_chat(self) -> str:
182
+ """Create a new chat session and return chat ID."""
183
+ if self.chat_id:
184
+ return self.chat_id
185
+
186
+ self._authenticate()
187
+
188
+ try:
189
+ response = self.session.post(
190
+ self.chat_create_endpoint,
191
+ json={
192
+ "name": "Unnamed Chat",
193
+ "born_from": "home",
194
+ "kimiplus_id": "kimi",
195
+ "is_example": False,
196
+ "source": "web",
197
+ "tags": []
198
+ },
199
+ timeout=self.timeout,
200
+ impersonate="chrome110"
201
+ )
202
+ response.raise_for_status()
203
+
204
+ data = response.json()
205
+ self.chat_id = data.get("id")
206
+ if not self.chat_id:
207
+ raise exceptions.FailedToGenerateResponseError("No chat ID received")
208
+
209
+ return self.chat_id
210
+
211
+ except CurlError as e:
212
+ raise exceptions.FailedToGenerateResponseError(f"Chat creation failed (CurlError): {e}")
213
+ except Exception as e:
214
+ raise exceptions.FailedToGenerateResponseError(f"Chat creation failed: {e}")
215
+
216
+ @staticmethod
217
+ def _kimi_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
218
+ """Extract content from Kimi SSE stream."""
219
+ if isinstance(chunk, dict):
220
+ if chunk.get("event") == "cmpl":
221
+ return chunk.get("text")
222
+ return None
223
+
224
+ def ask(
225
+ self,
226
+ prompt: str,
227
+ stream: bool = False,
228
+ raw: bool = False,
229
+ optimizer: str = None,
230
+ conversationally: bool = False,
231
+ ) -> Union[Dict[str, Any], Generator]:
232
+ """
233
+ Send a prompt to Kimi API and return the response.
234
+
235
+ Args:
236
+ prompt: The prompt to send
237
+ stream: Whether to stream the response
238
+ raw: Whether to return raw response
239
+ optimizer: Optimizer to use
240
+ conversationally: Whether to generate conversationally
241
+
242
+ Returns:
243
+ Dict or Generator with the response
244
+ """
245
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
246
+ if optimizer:
247
+ if optimizer in self.__available_optimizers:
248
+ conversation_prompt = getattr(Optimizers, optimizer)(
249
+ conversation_prompt if conversationally else prompt
250
+ )
251
+ else:
252
+ raise Exception(
253
+ f"Optimizer is not one of {self.__available_optimizers}"
254
+ )
255
+
256
+ self._create_chat()
257
+
258
+ # Fixed payload structure based on actual Kimi API requirements
259
+ payload = {
260
+ "kimiplus_id": "kimi",
261
+ "extend": {"sidebar": True},
262
+ "model": self.model,
263
+ "use_search": self.web_search,
264
+ "messages": [
265
+ {
266
+ "role": "user",
267
+ "content": conversation_prompt
268
+ }
269
+ ],
270
+ "refs": [],
271
+ "history": [],
272
+ "scene_labels": [],
273
+ "use_semantic_memory": False,
274
+ "use_deep_research": False
275
+ }
276
+
277
+ def for_stream():
278
+ try:
279
+ response = self.session.post(
280
+ self.chat_completion_endpoint.format(chat_id=self.chat_id),
281
+ json=payload,
282
+ stream=True,
283
+ timeout=self.timeout,
284
+ impersonate="chrome110"
285
+ )
286
+ response.raise_for_status()
287
+
288
+ streaming_text = ""
289
+ processed_stream = sanitize_stream(
290
+ data=response.iter_content(chunk_size=None),
291
+ intro_value="data:",
292
+ to_json=True,
293
+ skip_markers=["[DONE]"],
294
+ content_extractor=self._kimi_extractor,
295
+ yield_raw_on_error=False
296
+ )
297
+
298
+ for content_chunk in processed_stream:
299
+ if content_chunk and isinstance(content_chunk, str):
300
+ streaming_text += content_chunk
301
+ resp = dict(text=content_chunk)
302
+ yield resp if not raw else content_chunk
303
+
304
+ self.last_response = {"text": streaming_text}
305
+ self.conversation.update_chat_history(prompt, streaming_text)
306
+
307
+ except CurlError as e:
308
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
309
+ except Exception as e:
310
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
311
+
312
+ def for_non_stream():
313
+ try:
314
+ response = self.session.post(
315
+ self.chat_completion_endpoint.format(chat_id=self.chat_id),
316
+ json=payload,
317
+ timeout=self.timeout,
318
+ impersonate="chrome110"
319
+ )
320
+ response.raise_for_status()
321
+
322
+ # Collect all streaming data
323
+ full_text = ""
324
+ processed_stream = sanitize_stream(
325
+ data=response.text,
326
+ to_json=True,
327
+ intro_value="data:",
328
+ skip_markers=["[DONE]"],
329
+ content_extractor=self._kimi_extractor,
330
+ yield_raw_on_error=False
331
+ )
332
+
333
+ for content_chunk in processed_stream:
334
+ if content_chunk and isinstance(content_chunk, str):
335
+ full_text += content_chunk
336
+
337
+ self.last_response = {"text": full_text}
338
+ self.conversation.update_chat_history(prompt, full_text)
339
+ return self.last_response if not raw else full_text
340
+
341
+ except CurlError as e:
342
+ raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}")
343
+ except Exception as e:
344
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
345
+
346
+ return for_stream() if stream else for_non_stream()
347
+
348
+ def chat(
349
+ self,
350
+ prompt: str,
351
+ stream: bool = False,
352
+ optimizer: str = None,
353
+ conversationally: bool = False,
354
+ raw: bool = False,
355
+ ) -> Union[str, Generator[str, None, None]]:
356
+ """
357
+ Chat with Kimi API.
358
+
359
+ Args:
360
+ prompt: The prompt to send
361
+ stream: Whether to stream the response
362
+ optimizer: Optimizer to use
363
+ conversationally: Whether to generate conversationally
364
+ raw: Whether to return raw response
365
+
366
+ Returns:
367
+ str or Generator with the response
368
+ """
369
+ def for_stream():
370
+ for response in self.ask(
371
+ prompt, True, raw=raw, optimizer=optimizer, conversationally=conversationally
372
+ ):
373
+ if raw:
374
+ yield response
375
+ else:
376
+ yield self.get_message(response)
377
+
378
+ def for_non_stream():
379
+ result = self.ask(
380
+ prompt,
381
+ False,
382
+ raw=raw,
383
+ optimizer=optimizer,
384
+ conversationally=conversationally,
385
+ )
386
+ if raw:
387
+ return result
388
+ else:
389
+ return self.get_message(result)
390
+
391
+ return for_stream() if stream else for_non_stream()
392
+
393
+ def get_message(self, response: dict) -> str:
394
+ """Extract message from response."""
395
+ assert isinstance(response, dict), "Response should be of dict data-type only"
396
+ return response["text"]
397
+
398
+ def refresh_identity(self, browser: str = None):
399
+ """
400
+ Refresh browser identity fingerprint.
401
+
402
+ Args:
403
+ browser: Specific browser to use
404
+ """
405
+ browser = browser or self.fingerprint.get("browser_type", "chrome")
406
+ self.fingerprint = self.agent.generate_fingerprint(browser)
407
+
408
+ self.headers.update({
409
+ "Accept-Language": self.fingerprint["accept_language"],
410
+ "Sec-CH-UA": self.fingerprint["sec_ch_ua"],
411
+ "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
412
+ "User-Agent": self.fingerprint["user_agent"],
413
+ })
414
+
415
+ self.session.headers.update(self.headers)
416
+
417
+ # Generate new device ID
418
+ self.device_id = str(random.randint(1000000000000000, 9999999999999999))
419
+ self.session.headers.update({
420
+ "x-msh-device-id": self.device_id,
421
+ "x-traffic-id": self.device_id,
422
+ })
423
+
424
+ return self.fingerprint
425
+
426
+ if __name__ == "__main__":
427
+ # Test the Kimi provider
428
+ print("-" * 80)
429
+ print(f"{'Model':<20} {'Status':<10} {'Response'}")
430
+ print("-" * 80)
431
+
432
+ for model in Kimi.AVAILABLE_MODELS:
433
+ try:
434
+ ai = Kimi(model=model, timeout=30)
435
+ response = ai.chat("Say 'Hello' in one word")
436
+
437
+ if response and len(response.strip()) > 0:
438
+ status = "✓"
439
+ display_text = response.strip()[:50] + "..." if len(response.strip()) > 50 else response.strip()
440
+ else:
441
+ status = "✗"
442
+ display_text = "Empty or invalid response"
443
+ print(f"{model:<20} {status:<10} {display_text}")
444
+ except Exception as e:
445
+ print(f"{model:<20} {'✗':<10} {str(e)}")
@@ -18,16 +18,13 @@ class Netwrck(Provider):
18
18
  "sao10k/l3-euryale-70b",
19
19
  "deepseek/deepseek-chat",
20
20
  "deepseek/deepseek-r1",
21
- "anthropic/claude-sonnet-4-20250514",
22
- "openai/gpt-4.1-mini",
23
21
  "gryphe/mythomax-l2-13b",
24
- "google/gemini-2.5-flash-preview-04-17",
25
22
  "nvidia/llama-3.1-nemotron-70b-instruct",
26
23
  ]
27
24
 
28
25
  def __init__(
29
26
  self,
30
- model: str = "anthropic/claude-sonnet-4-20250514",
27
+ model: str = "deepseek/deepseek-r1",
31
28
  is_conversation: bool = True,
32
29
  max_tokens: int = 4096, # Note: max_tokens is not used by this API
33
30
  timeout: int = 30,
@@ -155,10 +152,10 @@ class Netwrck(Provider):
155
152
  self.last_response = {"text": buffer}
156
153
  self.conversation.update_chat_history(payload["query"], buffer)
157
154
  except CurlError as e:
158
- raise exceptions.ProviderConnectionError(f"Network error (CurlError): {str(e)}") from e
155
+ raise exceptions.APIConnectionError(f"Network error (CurlError): {str(e)}") from e
159
156
  except Exception as e:
160
157
  err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
161
- raise exceptions.ProviderConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
158
+ raise exceptions.APIConnectionError(f"Unexpected error ({type(e).__name__}): {str(e)} - {err_text}") from e
162
159
 
163
160
  def for_non_stream():
164
161
  try:
@@ -73,7 +73,8 @@ Currently, the following providers are implemented with OpenAI-compatible interf
73
73
  - Friendli
74
74
  - MiniMax
75
75
  - QodoAI
76
-
76
+ - Kimi
77
+ - GptOss
77
78
  ## 💻 Usage Examples
78
79
 
79
80
  Here are examples of how to use the OpenAI-compatible providers in your code.