webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (130) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Provider/AISEARCH/__init__.py +18 -11
  3. webscout/Provider/AISEARCH/scira_search.py +3 -1
  4. webscout/Provider/Aitopia.py +2 -3
  5. webscout/Provider/Andi.py +3 -3
  6. webscout/Provider/ChatGPTClone.py +1 -1
  7. webscout/Provider/ChatSandbox.py +1 -0
  8. webscout/Provider/Cloudflare.py +1 -1
  9. webscout/Provider/Cohere.py +1 -0
  10. webscout/Provider/Deepinfra.py +7 -10
  11. webscout/Provider/ExaAI.py +1 -1
  12. webscout/Provider/ExaChat.py +1 -80
  13. webscout/Provider/Flowith.py +1 -1
  14. webscout/Provider/Gemini.py +7 -5
  15. webscout/Provider/GeminiProxy.py +1 -0
  16. webscout/Provider/GithubChat.py +3 -1
  17. webscout/Provider/Groq.py +1 -1
  18. webscout/Provider/HeckAI.py +8 -4
  19. webscout/Provider/Jadve.py +23 -38
  20. webscout/Provider/K2Think.py +308 -0
  21. webscout/Provider/Koboldai.py +8 -186
  22. webscout/Provider/LambdaChat.py +2 -4
  23. webscout/Provider/Nemotron.py +3 -4
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OLLAMA.py +1 -0
  26. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  27. webscout/Provider/OPENAI/FalconH1.py +2 -7
  28. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  29. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  30. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  31. webscout/Provider/OPENAI/PI.py +5 -4
  32. webscout/Provider/OPENAI/Qwen3.py +2 -3
  33. webscout/Provider/OPENAI/TogetherAI.py +2 -2
  34. webscout/Provider/OPENAI/TwoAI.py +3 -4
  35. webscout/Provider/OPENAI/__init__.py +17 -58
  36. webscout/Provider/OPENAI/ai4chat.py +313 -303
  37. webscout/Provider/OPENAI/base.py +9 -29
  38. webscout/Provider/OPENAI/chatgpt.py +7 -2
  39. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  40. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  41. webscout/Provider/OPENAI/deepinfra.py +6 -6
  42. webscout/Provider/OPENAI/heckai.py +4 -1
  43. webscout/Provider/OPENAI/netwrck.py +1 -0
  44. webscout/Provider/OPENAI/scirachat.py +6 -0
  45. webscout/Provider/OPENAI/textpollinations.py +3 -11
  46. webscout/Provider/OPENAI/toolbaz.py +14 -11
  47. webscout/Provider/OpenGPT.py +1 -1
  48. webscout/Provider/Openai.py +150 -402
  49. webscout/Provider/PI.py +1 -0
  50. webscout/Provider/Perplexitylabs.py +1 -2
  51. webscout/Provider/QwenLM.py +107 -89
  52. webscout/Provider/STT/__init__.py +17 -2
  53. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  54. webscout/Provider/StandardInput.py +1 -1
  55. webscout/Provider/TTI/__init__.py +18 -12
  56. webscout/Provider/TTS/__init__.py +18 -10
  57. webscout/Provider/TeachAnything.py +1 -0
  58. webscout/Provider/TextPollinationsAI.py +5 -12
  59. webscout/Provider/TogetherAI.py +86 -87
  60. webscout/Provider/TwoAI.py +53 -309
  61. webscout/Provider/TypliAI.py +2 -1
  62. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  63. webscout/Provider/Venice.py +2 -1
  64. webscout/Provider/VercelAI.py +1 -0
  65. webscout/Provider/WiseCat.py +2 -1
  66. webscout/Provider/WrDoChat.py +2 -1
  67. webscout/Provider/__init__.py +18 -86
  68. webscout/Provider/ai4chat.py +1 -1
  69. webscout/Provider/akashgpt.py +7 -10
  70. webscout/Provider/cerebras.py +115 -9
  71. webscout/Provider/chatglm.py +170 -83
  72. webscout/Provider/cleeai.py +1 -2
  73. webscout/Provider/deepseek_assistant.py +1 -1
  74. webscout/Provider/elmo.py +1 -1
  75. webscout/Provider/geminiapi.py +1 -1
  76. webscout/Provider/granite.py +1 -1
  77. webscout/Provider/hermes.py +1 -3
  78. webscout/Provider/julius.py +1 -0
  79. webscout/Provider/learnfastai.py +1 -1
  80. webscout/Provider/llama3mitril.py +1 -1
  81. webscout/Provider/llmchat.py +1 -1
  82. webscout/Provider/llmchatco.py +1 -1
  83. webscout/Provider/meta.py +3 -3
  84. webscout/Provider/oivscode.py +2 -2
  85. webscout/Provider/scira_chat.py +51 -124
  86. webscout/Provider/searchchat.py +1 -0
  87. webscout/Provider/sonus.py +1 -1
  88. webscout/Provider/toolbaz.py +15 -12
  89. webscout/Provider/turboseek.py +31 -22
  90. webscout/Provider/typefully.py +2 -1
  91. webscout/Provider/x0gpt.py +1 -0
  92. webscout/Provider/yep.py +2 -1
  93. webscout/tempid.py +6 -0
  94. webscout/version.py +1 -1
  95. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
  96. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
  97. webscout/Provider/AllenAI.py +0 -440
  98. webscout/Provider/Blackboxai.py +0 -793
  99. webscout/Provider/FreeGemini.py +0 -250
  100. webscout/Provider/GptOss.py +0 -207
  101. webscout/Provider/Hunyuan.py +0 -283
  102. webscout/Provider/Kimi.py +0 -445
  103. webscout/Provider/MCPCore.py +0 -322
  104. webscout/Provider/MiniMax.py +0 -207
  105. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  106. webscout/Provider/OPENAI/MiniMax.py +0 -298
  107. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  108. webscout/Provider/OPENAI/copilot.py +0 -321
  109. webscout/Provider/OPENAI/gptoss.py +0 -288
  110. webscout/Provider/OPENAI/kimi.py +0 -469
  111. webscout/Provider/OPENAI/mcpcore.py +0 -431
  112. webscout/Provider/OPENAI/multichat.py +0 -378
  113. webscout/Provider/Reka.py +0 -214
  114. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  115. webscout/Provider/asksteve.py +0 -220
  116. webscout/Provider/copilot.py +0 -441
  117. webscout/Provider/freeaichat.py +0 -294
  118. webscout/Provider/koala.py +0 -182
  119. webscout/Provider/lmarena.py +0 -198
  120. webscout/Provider/monochat.py +0 -275
  121. webscout/Provider/multichat.py +0 -375
  122. webscout/Provider/scnet.py +0 -244
  123. webscout/Provider/talkai.py +0 -194
  124. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  125. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  126. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  127. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  128. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  129. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  130. {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,298 +0,0 @@
1
- import os
2
- import requests
3
- import json
4
- import time
5
- import uuid
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from webscout.Provider.OPENAI.utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, count_tokens
12
- )
13
-
14
- class Completions(BaseCompletions):
15
- def __init__(self, client: 'MiniMax'):
16
- self._client = client
17
-
18
- def create(
19
- self,
20
- *,
21
- model: str,
22
- messages: List[Dict[str, str]],
23
- max_tokens: Optional[int] = None,
24
- stream: bool = False,
25
- temperature: Optional[float] = None,
26
- top_p: Optional[float] = None,
27
- timeout: Optional[int] = None,
28
- proxies: Optional[Dict[str, str]] = None,
29
- stop: Optional[Union[str, List[str]]] = None,
30
- **kwargs: Any
31
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
32
- """
33
- Creates a model response for the given chat conversation.
34
- Mimics openai.chat.completions.create
35
- """
36
- api_key = self._client.api_key
37
- if not api_key:
38
- raise Exception("MINIMAX_API_KEY not set in environment.")
39
- model_name = self._client.convert_model_name(model)
40
- payload = {
41
- "model": model_name,
42
- "messages": messages,
43
- "stream": stream,
44
- }
45
- if max_tokens is not None:
46
- payload["max_tokens"] = max_tokens
47
- if temperature is not None:
48
- payload["temperature"] = temperature
49
- if top_p is not None:
50
- payload["top_p"] = top_p
51
- if stop is not None:
52
- payload["stop"] = stop
53
- payload.update(kwargs)
54
- request_id = f"chatcmpl-{uuid.uuid4()}"
55
- created_time = int(time.time())
56
- if stream:
57
- return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
58
- else:
59
- return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
60
-
61
- def _create_stream(
62
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
63
- ) -> Generator[ChatCompletionChunk, None, None]:
64
- try:
65
- headers = {
66
- 'Content-Type': 'application/json',
67
- 'Authorization': f'Bearer {self._client.api_key}',
68
- }
69
- response = self._client.session.post(
70
- self._client.api_endpoint,
71
- headers=headers,
72
- data=json.dumps(payload),
73
- stream=True,
74
- timeout=timeout or self._client.timeout,
75
- proxies=proxies
76
- )
77
- response.raise_for_status()
78
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
79
- completion_tokens = 0
80
- total_tokens = prompt_tokens
81
- streaming_response = ""
82
- last_content = ""
83
- last_reasoning = ""
84
- in_think = False
85
- for line in response.iter_lines():
86
- if line:
87
- line = line.decode('utf-8')
88
- if line.startswith('data: '):
89
- line = line[6:]
90
- if line.strip() == '[DONE]':
91
- break
92
- try:
93
- chunk_data = json.loads(line)
94
- if 'choices' in chunk_data and chunk_data['choices']:
95
- choice_data = chunk_data['choices'][0]
96
- delta = choice_data.get('delta', {})
97
- content = delta.get('content')
98
- reasoning_content = delta.get('reasoning_content')
99
- finish_reason = choice_data.get('finish_reason')
100
- # Only yield <think> and reasoning_content if reasoning_content is not empty
101
- if reasoning_content and reasoning_content.strip() and reasoning_content != last_reasoning:
102
- if not in_think:
103
- yield ChatCompletionChunk(
104
- id=request_id,
105
- choices=[Choice(index=0, delta=ChoiceDelta(content='<think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
106
- created=created_time,
107
- model=model
108
- )
109
- in_think = True
110
- yield ChatCompletionChunk(
111
- id=request_id,
112
- choices=[Choice(index=0, delta=ChoiceDelta(content=reasoning_content, role=None, tool_calls=None), finish_reason=None, logprobs=None)],
113
- created=created_time,
114
- model=model
115
- )
116
- last_reasoning = reasoning_content
117
- # Only yield </think> if we were in <think> and now have new content
118
- if in_think and content and content.strip() and content != last_content:
119
- yield ChatCompletionChunk(
120
- id=request_id,
121
- choices=[Choice(index=0, delta=ChoiceDelta(content='</think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
122
- created=created_time,
123
- model=model
124
- )
125
- in_think = False
126
- # Only yield content if it is not empty
127
- if content and content.strip() and content != last_content:
128
- completion_tokens += count_tokens(content)
129
- total_tokens = prompt_tokens + completion_tokens
130
- choice_delta = ChoiceDelta(
131
- content=content,
132
- role=delta.get('role', 'assistant'),
133
- tool_calls=delta.get('tool_calls')
134
- )
135
- choice = Choice(
136
- index=0,
137
- delta=choice_delta,
138
- finish_reason=finish_reason,
139
- logprobs=None
140
- )
141
- chunk = ChatCompletionChunk(
142
- id=request_id,
143
- choices=[choice],
144
- created=created_time,
145
- model=model
146
- )
147
- chunk.usage = {
148
- "prompt_tokens": prompt_tokens,
149
- "completion_tokens": completion_tokens,
150
- "total_tokens": total_tokens,
151
- "estimated_cost": None
152
- }
153
- yield chunk
154
- streaming_response += content
155
- last_content = content
156
- except Exception:
157
- continue
158
- # Final chunk with finish_reason="stop"
159
- delta = ChoiceDelta(content=None, role=None, tool_calls=None)
160
- choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
161
- chunk = ChatCompletionChunk(
162
- id=request_id,
163
- choices=[choice],
164
- created=created_time,
165
- model=model
166
- )
167
- chunk.usage = {
168
- "prompt_tokens": prompt_tokens,
169
- "completion_tokens": completion_tokens,
170
- "total_tokens": total_tokens,
171
- "estimated_cost": None
172
- }
173
- yield chunk
174
- except Exception as e:
175
- raise IOError(f"MiniMax stream request failed: {e}") from e
176
-
177
- def _create_non_stream(
178
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
179
- ) -> ChatCompletion:
180
- try:
181
- headers = {
182
- 'Content-Type': 'application/json',
183
- 'Authorization': f'Bearer {self._client.api_key}',
184
- }
185
- payload_copy = payload.copy()
186
- payload_copy["stream"] = False
187
- response = self._client.session.post(
188
- self._client.api_endpoint,
189
- headers=headers,
190
- data=json.dumps(payload_copy),
191
- timeout=timeout or self._client.timeout,
192
- proxies=proxies
193
- )
194
- response.raise_for_status()
195
- data = response.json()
196
- full_text = ""
197
- finish_reason = "stop"
198
- if 'choices' in data and data['choices']:
199
- choice_data = data['choices'][0]
200
- # MiniMax returns content in 'message' or directly in 'delta' for streaming
201
- reasoning_content = ""
202
- if 'message' in choice_data and choice_data['message']:
203
- full_text = choice_data['message'].get('content', '')
204
- reasoning_content = choice_data['message'].get('reasoning_content', '')
205
- elif 'delta' in choice_data and choice_data['delta']:
206
- full_text = choice_data['delta'].get('content', '')
207
- reasoning_content = choice_data['delta'].get('reasoning_content', '')
208
- finish_reason = choice_data.get('finish_reason', 'stop')
209
- # If both are present, concatenate with <think> ... </think>
210
- if reasoning_content and reasoning_content.strip():
211
- if full_text and full_text.strip():
212
- full_text = f"<think>\n\n{reasoning_content}</think>\n\n{full_text}"
213
- else:
214
- full_text = f"<think>\n\n{reasoning_content}</think>\n\n"
215
- message = ChatCompletionMessage(
216
- role="assistant",
217
- content=full_text
218
- )
219
- choice = Choice(
220
- index=0,
221
- message=message,
222
- finish_reason=finish_reason
223
- )
224
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
225
- completion_tokens = count_tokens(full_text)
226
- usage = CompletionUsage(
227
- prompt_tokens=prompt_tokens,
228
- completion_tokens=completion_tokens,
229
- total_tokens=prompt_tokens + completion_tokens
230
- )
231
- completion = ChatCompletion(
232
- id=request_id,
233
- choices=[choice],
234
- created=created_time,
235
- model=model,
236
- usage=usage,
237
- )
238
- return completion
239
- except Exception as e:
240
- raise IOError(f"MiniMax non-stream request failed: {e}") from e
241
-
242
- class Chat(BaseChat):
243
- def __init__(self, client: 'MiniMax'):
244
- self.completions = Completions(client)
245
-
246
- class MiniMax(OpenAICompatibleProvider):
247
- """
248
- OpenAI-compatible client for MiniMax API.
249
- """
250
- AVAILABLE_MODELS = [
251
- "MiniMax-Reasoning-01"
252
- ]
253
- def __init__(self, timeout: int = 30):
254
- self.timeout = timeout
255
- self.api_endpoint = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
256
- self.session = requests.Session()
257
- self.api_key = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"
258
- self.chat = Chat(self)
259
-
260
- @property
261
- def models(self):
262
- class _ModelList:
263
- def list(inner_self):
264
- return MiniMax.AVAILABLE_MODELS
265
- return _ModelList()
266
-
267
- def convert_model_name(self, model: str) -> str:
268
- if model in self.AVAILABLE_MODELS:
269
- return model
270
- return self.AVAILABLE_MODELS[0]
271
-
272
- if __name__ == "__main__":
273
- from rich import print
274
- client = MiniMax()
275
- messages = [
276
- {"role": "user", "content": "What is the capital of France?"}
277
- ]
278
- # Non-streaming example
279
- response = client.chat.completions.create(
280
- model="MiniMax-Reasoning-01",
281
- messages=messages,
282
- max_tokens=5000,
283
- stream=False
284
- )
285
- print("Non-streaming response:")
286
- print(response)
287
- # Streaming example
288
- print("\nStreaming response:")
289
- stream = client.chat.completions.create(
290
- model="MiniMax-Reasoning-01",
291
- messages=messages,
292
- max_tokens=5000,
293
- stream=True
294
- )
295
- for chunk in stream:
296
- if chunk.choices[0].delta and chunk.choices[0].delta.content:
297
- print(chunk.choices[0].delta.content, end="")
298
- print()