webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,469 +0,0 @@
1
- import requests
2
- import json
3
- import time
4
- import uuid
5
- import random
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- # Import curl_cffi for improved request handling
9
- from curl_cffi.requests import Session
10
- from curl_cffi import CurlError
11
-
12
- # Import base classes and utility structures
13
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
14
- from .utils import (
15
- ChatCompletion,
16
- ChatCompletionChunk,
17
- Choice,
18
- ChatCompletionMessage,
19
- ChoiceDelta,
20
- CompletionUsage,
21
- format_prompt,
22
- get_system_prompt,
23
- count_tokens
24
- )
25
-
26
- # Attempt to import LitAgent, fallback if not available
27
- try:
28
- from webscout.litagent import LitAgent
29
- except ImportError:
30
- pass
31
-
32
- from webscout import exceptions
33
-
34
-
35
- class Completions(BaseCompletions):
36
- def __init__(self, client: 'Kimi'):
37
- self._client = client
38
-
39
- def create(
40
- self,
41
- *,
42
- model: str,
43
- messages: List[Dict[str, str]],
44
- max_tokens: Optional[int] = 4000,
45
- stream: bool = False,
46
- temperature: Optional[float] = None,
47
- top_p: Optional[float] = None,
48
- timeout: Optional[int] = None,
49
- proxies: Optional[Dict[str, str]] = None,
50
- **kwargs: Any
51
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
52
- """
53
- Create a completion using the Kimi API.
54
-
55
- Args:
56
- model: The model to use (k1.5, k2, k1.5-thinking)
57
- messages: List of message dictionaries with 'role' and 'content'
58
- max_tokens: Maximum tokens for response
59
- stream: Whether to stream the response
60
- temperature: Sampling temperature (not used by Kimi)
61
- top_p: Top-p sampling (not used by Kimi)
62
- timeout: Request timeout
63
- proxies: Proxy configuration
64
-
65
- Returns:
66
- ChatCompletion or generator of ChatCompletionChunk
67
- """
68
- # Validate model
69
- if model not in self._client.AVAILABLE_MODELS:
70
- raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
71
-
72
- # Ensure authentication and chat creation
73
- self._client._authenticate()
74
- self._client._create_chat()
75
-
76
- # Format messages exactly like the original Kimi.py
77
- # Use the first user message content directly, no formatting needed
78
- user_content = ""
79
- system_content = ""
80
-
81
- for msg in messages:
82
- role = msg.get("role", "user")
83
- content = msg.get("content", "")
84
- if role == "system":
85
- system_content = content
86
- elif role == "user":
87
- user_content = content
88
-
89
- # If we have system content, prepend it to user content
90
- if system_content:
91
- final_content = f"{system_content}\n\n{user_content}"
92
- else:
93
- final_content = user_content
94
-
95
- # Create payload exactly like the original Kimi.py
96
- payload = {
97
- "kimiplus_id": "kimi",
98
- "extend": {"sidebar": True},
99
- "model": model,
100
- "use_search": self._client.web_search,
101
- "messages": [
102
- {
103
- "role": "user",
104
- "content": final_content
105
- }
106
- ],
107
- "refs": [],
108
- "history": [],
109
- "scene_labels": [],
110
- "use_semantic_memory": False,
111
- "use_deep_research": False
112
- }
113
-
114
- # Generate request ID and timestamp
115
- request_id = f"chatcmpl-{uuid.uuid4().hex}"
116
- created_time = int(time.time())
117
-
118
- if stream:
119
- return self._create_stream(request_id, created_time, model, messages, payload, timeout, proxies)
120
- else:
121
- return self._create_non_stream(request_id, created_time, model, messages, payload, timeout, proxies)
122
-
123
- def _create_stream(
124
- self, request_id: str, created_time: int, model: str, messages: List[Dict[str, str]],
125
- payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
126
- ) -> Generator[ChatCompletionChunk, None, None]:
127
- try:
128
- response = self._client.session.post(
129
- self._client.chat_completion_endpoint.format(chat_id=self._client.chat_id),
130
- json=payload,
131
- stream=True,
132
- timeout=timeout or self._client.timeout,
133
- impersonate="chrome110"
134
- )
135
- response.raise_for_status()
136
-
137
- # Calculate prompt tokens using the messages parameter
138
- prompt_tokens = count_tokens(messages)
139
- completion_tokens = 0
140
- total_tokens = prompt_tokens
141
-
142
- for line in response.iter_lines(decode_unicode=True):
143
- if line:
144
- if line.startswith("data: "):
145
- json_str = line[6:]
146
- if json_str == "[DONE]":
147
- break
148
-
149
- try:
150
- data = json.loads(json_str)
151
- if data.get("event") == "cmpl":
152
- content = data.get("text")
153
- if content:
154
- completion_tokens += count_tokens(content)
155
- total_tokens = prompt_tokens + completion_tokens
156
-
157
- delta = ChoiceDelta(content=content, role=None, tool_calls=None)
158
- choice = Choice(index=0, delta=delta, finish_reason=None, logprobs=None)
159
- chunk = ChatCompletionChunk(
160
- id=request_id,
161
- choices=[choice],
162
- created=created_time,
163
- model=model,
164
- system_fingerprint=None
165
- )
166
-
167
- # Add usage information
168
- chunk.usage = {
169
- "prompt_tokens": prompt_tokens,
170
- "completion_tokens": completion_tokens,
171
- "total_tokens": total_tokens,
172
- "estimated_cost": None
173
- }
174
-
175
- yield chunk
176
- except json.JSONDecodeError:
177
- continue
178
-
179
- # Final chunk with finish_reason="stop"
180
- delta = ChoiceDelta(content=None, role=None, tool_calls=None)
181
- choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
182
- chunk = ChatCompletionChunk(
183
- id=request_id,
184
- choices=[choice],
185
- created=created_time,
186
- model=model,
187
- system_fingerprint=None
188
- )
189
- chunk.usage = {
190
- "prompt_tokens": prompt_tokens,
191
- "completion_tokens": completion_tokens,
192
- "total_tokens": total_tokens,
193
- "estimated_cost": None
194
- }
195
- yield chunk
196
-
197
- except CurlError as e:
198
- print(f"Error during Kimi stream request: {e}")
199
- raise IOError(f"Kimi request failed: {e}") from e
200
- except Exception as e:
201
- print(f"Error processing Kimi stream: {e}")
202
- raise
203
-
204
- def _create_non_stream(
205
- self, request_id: str, created_time: int, model: str, messages: List[Dict[str, str]],
206
- payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
207
- ) -> ChatCompletion:
208
- try:
209
- response = self._client.session.post(
210
- self._client.chat_completion_endpoint.format(chat_id=self._client.chat_id),
211
- json=payload,
212
- timeout=timeout or self._client.timeout,
213
- impersonate="chrome110",
214
- stream=True
215
- )
216
- response.raise_for_status()
217
-
218
- # Collect all streaming data
219
- full_text = ""
220
- for line in response.iter_lines():
221
- if line:
222
- # Decode bytes to string if needed
223
- if isinstance(line, bytes):
224
- line = line.decode('utf-8')
225
- if line.startswith("data: "):
226
- json_str = line[6:]
227
- if json_str == "[DONE]":
228
- break
229
-
230
- try:
231
- data = json.loads(json_str)
232
- if data.get("event") == "cmpl":
233
- content = data.get("text")
234
- if content:
235
- full_text += content
236
- except json.JSONDecodeError:
237
- continue
238
-
239
- # Create the message object
240
- message = ChatCompletionMessage(
241
- role="assistant",
242
- content=full_text
243
- )
244
-
245
- # Create the choice object
246
- choice = Choice(
247
- index=0,
248
- message=message,
249
- finish_reason="stop"
250
- )
251
-
252
- # Create usage object with proper token counting
253
- prompt_tokens = count_tokens(messages)
254
- completion_tokens = count_tokens(full_text) if full_text else 0
255
- usage = CompletionUsage(
256
- prompt_tokens=prompt_tokens,
257
- completion_tokens=completion_tokens,
258
- total_tokens=prompt_tokens + completion_tokens
259
- )
260
-
261
- # Create the completion object
262
- completion = ChatCompletion(
263
- id=request_id,
264
- choices=[choice],
265
- created=created_time,
266
- model=model,
267
- usage=usage,
268
- )
269
- return completion
270
-
271
- except CurlError as e:
272
- print(f"Error during Kimi request: {e}")
273
- raise IOError(f"Kimi request failed: {e}") from e
274
- except Exception as e:
275
- print(f"Error processing Kimi response: {e}")
276
- raise
277
-
278
-
279
- class Chat(BaseChat):
280
- def __init__(self, client: 'Kimi'):
281
- self._client = client
282
- self.completions = Completions(client)
283
-
284
-
285
- class Kimi(OpenAICompatibleProvider):
286
- """
287
- OpenAI-compatible Kimi provider.
288
-
289
- This provider implements the OpenAI API interface for Kimi models.
290
- It supports the following models:
291
- - k1.5
292
- - k2
293
- - k1.5-thinking
294
-
295
- Examples:
296
- >>> from webscout.Provider.OPENAI.kimi import Kimi
297
- >>> client = Kimi()
298
- >>> response = client.chat.completions.create(
299
- ... model="k2",
300
- ... messages=[{"role": "user", "content": "Hello!"}]
301
- ... )
302
- >>> print(response.choices[0].message.content)
303
- """
304
-
305
- AVAILABLE_MODELS = ["k1.5", "k2", "k1.5-thinking"]
306
-
307
- def __init__(
308
- self,
309
- api_key: Optional[str] = None,
310
- base_url: Optional[str] = None,
311
- timeout: int = 30,
312
- proxies: Optional[Dict[str, str]] = None,
313
- browser: str = "chrome",
314
- web_search: bool = False,
315
- **kwargs
316
- ):
317
- """
318
- Initialize the Kimi provider.
319
-
320
- Args:
321
- api_key: Not used for Kimi (authentication is handled via device registration)
322
- base_url: Not used for Kimi
323
- timeout: Request timeout in seconds
324
- proxies: Proxy configuration
325
- browser: Browser to impersonate
326
- web_search: Whether to enable web search
327
- **kwargs: Additional arguments
328
- """
329
- super().__init__(api_key=api_key, base_url=base_url, timeout=timeout, **kwargs)
330
-
331
- self.timeout = timeout
332
- self.proxies = proxies or {}
333
- self.web_search = web_search
334
-
335
- # Kimi API endpoints
336
- self.register_endpoint = "https://www.kimi.com/api/device/register"
337
- self.chat_create_endpoint = "https://www.kimi.com/api/chat"
338
- self.chat_completion_endpoint = "https://www.kimi.com/api/chat/{chat_id}/completion/stream"
339
-
340
- # Initialize session
341
- self.session = Session()
342
- self.session.proxies = self.proxies
343
-
344
- # Initialize LitAgent for browser fingerprinting
345
- try:
346
- self.agent = LitAgent()
347
- self.fingerprint = self.agent.generate_fingerprint(browser)
348
- except:
349
- self.fingerprint = {
350
- "user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
351
- "accept_language": "en-US,en;q=0.9",
352
- "sec_ch_ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
353
- "platform": "Windows"
354
- }
355
-
356
- # Generate device ID
357
- self.device_id = str(random.randint(1000000000000000, 9999999999999999))
358
-
359
- # Headers for Kimi API
360
- self.headers = {
361
- "Accept": "text/event-stream",
362
- "Accept-Language": self.fingerprint["accept_language"],
363
- "Accept-Encoding": "gzip, deflate, br",
364
- "Cache-Control": "no-cache",
365
- "Connection": "keep-alive",
366
- "Content-Type": "application/json",
367
- "DNT": "1",
368
- "Origin": "https://www.kimi.com",
369
- "Pragma": "no-cache",
370
- "Referer": "https://www.kimi.com/",
371
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"],
372
- "Sec-CH-UA-Mobile": "?0",
373
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
374
- "User-Agent": self.fingerprint["user_agent"],
375
- "x-msh-device-id": self.device_id,
376
- "x-msh-platform": "web",
377
- "x-traffic-id": self.device_id,
378
- }
379
-
380
- # Initialize authentication
381
- self.access_token = None
382
- self.chat_id = None
383
-
384
- # Update session headers
385
- self.session.headers.update(self.headers)
386
-
387
- # Initialize chat and completions
388
- self.chat = Chat(self)
389
- self.completions = Completions(self)
390
-
391
- @property
392
- def models(self):
393
- """Property that returns an object with a .list() method returning available models."""
394
- class _ModelList:
395
- def list(inner_self):
396
- return type(self).AVAILABLE_MODELS
397
- return _ModelList()
398
-
399
- def _authenticate(self) -> str:
400
- """Authenticate with Kimi API and get access token."""
401
- if self.access_token:
402
- return self.access_token
403
-
404
- max_retries = 3
405
- last_exception = None
406
-
407
- for attempt in range(max_retries):
408
- try:
409
- response = self.session.post(
410
- self.register_endpoint,
411
- json={},
412
- timeout=self.timeout,
413
- impersonate="chrome110"
414
- )
415
- response.raise_for_status()
416
-
417
- data = response.json()
418
- if not data.get("access_token"):
419
- raise exceptions.FailedToGenerateResponseError("No access token received")
420
-
421
- self.access_token = data["access_token"]
422
- self.session.headers["Authorization"] = f"Bearer {self.access_token}"
423
- return self.access_token
424
-
425
- except CurlError as e:
426
- last_exception = e
427
- if attempt < max_retries - 1:
428
- continue
429
- raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts (CurlError): {e}")
430
- except Exception as e:
431
- last_exception = e
432
- if attempt < max_retries - 1:
433
- continue
434
- raise exceptions.FailedToGenerateResponseError(f"Authentication failed after {max_retries} attempts: {e}")
435
-
436
- def _create_chat(self) -> str:
437
- """Create a new chat session and return chat ID."""
438
- if self.chat_id:
439
- return self.chat_id
440
-
441
- self._authenticate()
442
-
443
- try:
444
- response = self.session.post(
445
- self.chat_create_endpoint,
446
- json={
447
- "name": "Unnamed Chat",
448
- "born_from": "home",
449
- "kimiplus_id": "kimi",
450
- "is_example": False,
451
- "source": "web",
452
- "tags": []
453
- },
454
- timeout=self.timeout,
455
- impersonate="chrome110"
456
- )
457
- response.raise_for_status()
458
-
459
- data = response.json()
460
- self.chat_id = data.get("id")
461
- if not self.chat_id:
462
- raise exceptions.FailedToGenerateResponseError("No chat ID received")
463
-
464
- return self.chat_id
465
-
466
- except CurlError as e:
467
- raise exceptions.FailedToGenerateResponseError(f"Chat creation failed (CurlError): {e}")
468
- except Exception as e:
469
- raise exceptions.FailedToGenerateResponseError(f"Chat creation failed: {e}")