webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,298 +0,0 @@
1
- import os
2
- import requests
3
- import json
4
- import time
5
- import uuid
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from webscout.Provider.OPENAI.utils import (
10
- ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
- ChatCompletionMessage, CompletionUsage, count_tokens
12
- )
13
-
14
- class Completions(BaseCompletions):
15
- def __init__(self, client: 'MiniMax'):
16
- self._client = client
17
-
18
- def create(
19
- self,
20
- *,
21
- model: str,
22
- messages: List[Dict[str, str]],
23
- max_tokens: Optional[int] = None,
24
- stream: bool = False,
25
- temperature: Optional[float] = None,
26
- top_p: Optional[float] = None,
27
- timeout: Optional[int] = None,
28
- proxies: Optional[Dict[str, str]] = None,
29
- stop: Optional[Union[str, List[str]]] = None,
30
- **kwargs: Any
31
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
32
- """
33
- Creates a model response for the given chat conversation.
34
- Mimics openai.chat.completions.create
35
- """
36
- api_key = self._client.api_key
37
- if not api_key:
38
- raise Exception("MINIMAX_API_KEY not set in environment.")
39
- model_name = self._client.convert_model_name(model)
40
- payload = {
41
- "model": model_name,
42
- "messages": messages,
43
- "stream": stream,
44
- }
45
- if max_tokens is not None:
46
- payload["max_tokens"] = max_tokens
47
- if temperature is not None:
48
- payload["temperature"] = temperature
49
- if top_p is not None:
50
- payload["top_p"] = top_p
51
- if stop is not None:
52
- payload["stop"] = stop
53
- payload.update(kwargs)
54
- request_id = f"chatcmpl-{uuid.uuid4()}"
55
- created_time = int(time.time())
56
- if stream:
57
- return self._create_stream(request_id, created_time, model_name, payload, timeout, proxies)
58
- else:
59
- return self._create_non_stream(request_id, created_time, model_name, payload, timeout, proxies)
60
-
61
- def _create_stream(
62
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
63
- ) -> Generator[ChatCompletionChunk, None, None]:
64
- try:
65
- headers = {
66
- 'Content-Type': 'application/json',
67
- 'Authorization': f'Bearer {self._client.api_key}',
68
- }
69
- response = self._client.session.post(
70
- self._client.api_endpoint,
71
- headers=headers,
72
- data=json.dumps(payload),
73
- stream=True,
74
- timeout=timeout or self._client.timeout,
75
- proxies=proxies
76
- )
77
- response.raise_for_status()
78
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
79
- completion_tokens = 0
80
- total_tokens = prompt_tokens
81
- streaming_response = ""
82
- last_content = ""
83
- last_reasoning = ""
84
- in_think = False
85
- for line in response.iter_lines():
86
- if line:
87
- line = line.decode('utf-8')
88
- if line.startswith('data: '):
89
- line = line[6:]
90
- if line.strip() == '[DONE]':
91
- break
92
- try:
93
- chunk_data = json.loads(line)
94
- if 'choices' in chunk_data and chunk_data['choices']:
95
- choice_data = chunk_data['choices'][0]
96
- delta = choice_data.get('delta', {})
97
- content = delta.get('content')
98
- reasoning_content = delta.get('reasoning_content')
99
- finish_reason = choice_data.get('finish_reason')
100
- # Only yield <think> and reasoning_content if reasoning_content is not empty
101
- if reasoning_content and reasoning_content.strip() and reasoning_content != last_reasoning:
102
- if not in_think:
103
- yield ChatCompletionChunk(
104
- id=request_id,
105
- choices=[Choice(index=0, delta=ChoiceDelta(content='<think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
106
- created=created_time,
107
- model=model
108
- )
109
- in_think = True
110
- yield ChatCompletionChunk(
111
- id=request_id,
112
- choices=[Choice(index=0, delta=ChoiceDelta(content=reasoning_content, role=None, tool_calls=None), finish_reason=None, logprobs=None)],
113
- created=created_time,
114
- model=model
115
- )
116
- last_reasoning = reasoning_content
117
- # Only yield </think> if we were in <think> and now have new content
118
- if in_think and content and content.strip() and content != last_content:
119
- yield ChatCompletionChunk(
120
- id=request_id,
121
- choices=[Choice(index=0, delta=ChoiceDelta(content='</think>\n\n', role=None, tool_calls=None), finish_reason=None, logprobs=None)],
122
- created=created_time,
123
- model=model
124
- )
125
- in_think = False
126
- # Only yield content if it is not empty
127
- if content and content.strip() and content != last_content:
128
- completion_tokens += count_tokens(content)
129
- total_tokens = prompt_tokens + completion_tokens
130
- choice_delta = ChoiceDelta(
131
- content=content,
132
- role=delta.get('role', 'assistant'),
133
- tool_calls=delta.get('tool_calls')
134
- )
135
- choice = Choice(
136
- index=0,
137
- delta=choice_delta,
138
- finish_reason=finish_reason,
139
- logprobs=None
140
- )
141
- chunk = ChatCompletionChunk(
142
- id=request_id,
143
- choices=[choice],
144
- created=created_time,
145
- model=model
146
- )
147
- chunk.usage = {
148
- "prompt_tokens": prompt_tokens,
149
- "completion_tokens": completion_tokens,
150
- "total_tokens": total_tokens,
151
- "estimated_cost": None
152
- }
153
- yield chunk
154
- streaming_response += content
155
- last_content = content
156
- except Exception:
157
- continue
158
- # Final chunk with finish_reason="stop"
159
- delta = ChoiceDelta(content=None, role=None, tool_calls=None)
160
- choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
161
- chunk = ChatCompletionChunk(
162
- id=request_id,
163
- choices=[choice],
164
- created=created_time,
165
- model=model
166
- )
167
- chunk.usage = {
168
- "prompt_tokens": prompt_tokens,
169
- "completion_tokens": completion_tokens,
170
- "total_tokens": total_tokens,
171
- "estimated_cost": None
172
- }
173
- yield chunk
174
- except Exception as e:
175
- raise IOError(f"MiniMax stream request failed: {e}") from e
176
-
177
- def _create_non_stream(
178
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
179
- ) -> ChatCompletion:
180
- try:
181
- headers = {
182
- 'Content-Type': 'application/json',
183
- 'Authorization': f'Bearer {self._client.api_key}',
184
- }
185
- payload_copy = payload.copy()
186
- payload_copy["stream"] = False
187
- response = self._client.session.post(
188
- self._client.api_endpoint,
189
- headers=headers,
190
- data=json.dumps(payload_copy),
191
- timeout=timeout or self._client.timeout,
192
- proxies=proxies
193
- )
194
- response.raise_for_status()
195
- data = response.json()
196
- full_text = ""
197
- finish_reason = "stop"
198
- if 'choices' in data and data['choices']:
199
- choice_data = data['choices'][0]
200
- # MiniMax returns content in 'message' or directly in 'delta' for streaming
201
- reasoning_content = ""
202
- if 'message' in choice_data and choice_data['message']:
203
- full_text = choice_data['message'].get('content', '')
204
- reasoning_content = choice_data['message'].get('reasoning_content', '')
205
- elif 'delta' in choice_data and choice_data['delta']:
206
- full_text = choice_data['delta'].get('content', '')
207
- reasoning_content = choice_data['delta'].get('reasoning_content', '')
208
- finish_reason = choice_data.get('finish_reason', 'stop')
209
- # If both are present, concatenate with <think> ... </think>
210
- if reasoning_content and reasoning_content.strip():
211
- if full_text and full_text.strip():
212
- full_text = f"<think>\n\n{reasoning_content}</think>\n\n{full_text}"
213
- else:
214
- full_text = f"<think>\n\n{reasoning_content}</think>\n\n"
215
- message = ChatCompletionMessage(
216
- role="assistant",
217
- content=full_text
218
- )
219
- choice = Choice(
220
- index=0,
221
- message=message,
222
- finish_reason=finish_reason
223
- )
224
- prompt_tokens = count_tokens([msg.get("content", "") for msg in payload.get("messages", [])])
225
- completion_tokens = count_tokens(full_text)
226
- usage = CompletionUsage(
227
- prompt_tokens=prompt_tokens,
228
- completion_tokens=completion_tokens,
229
- total_tokens=prompt_tokens + completion_tokens
230
- )
231
- completion = ChatCompletion(
232
- id=request_id,
233
- choices=[choice],
234
- created=created_time,
235
- model=model,
236
- usage=usage,
237
- )
238
- return completion
239
- except Exception as e:
240
- raise IOError(f"MiniMax non-stream request failed: {e}") from e
241
-
242
- class Chat(BaseChat):
243
- def __init__(self, client: 'MiniMax'):
244
- self.completions = Completions(client)
245
-
246
- class MiniMax(OpenAICompatibleProvider):
247
- """
248
- OpenAI-compatible client for MiniMax API.
249
- """
250
- AVAILABLE_MODELS = [
251
- "MiniMax-Reasoning-01"
252
- ]
253
- def __init__(self, timeout: int = 30):
254
- self.timeout = timeout
255
- self.api_endpoint = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
256
- self.session = requests.Session()
257
- self.api_key = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"
258
- self.chat = Chat(self)
259
-
260
- @property
261
- def models(self):
262
- class _ModelList:
263
- def list(inner_self):
264
- return MiniMax.AVAILABLE_MODELS
265
- return _ModelList()
266
-
267
- def convert_model_name(self, model: str) -> str:
268
- if model in self.AVAILABLE_MODELS:
269
- return model
270
- return self.AVAILABLE_MODELS[0]
271
-
272
- if __name__ == "__main__":
273
- from rich import print
274
- client = MiniMax()
275
- messages = [
276
- {"role": "user", "content": "What is the capital of France?"}
277
- ]
278
- # Non-streaming example
279
- response = client.chat.completions.create(
280
- model="MiniMax-Reasoning-01",
281
- messages=messages,
282
- max_tokens=5000,
283
- stream=False
284
- )
285
- print("Non-streaming response:")
286
- print(response)
287
- # Streaming example
288
- print("\nStreaming response:")
289
- stream = client.chat.completions.create(
290
- model="MiniMax-Reasoning-01",
291
- messages=messages,
292
- max_tokens=5000,
293
- stream=True
294
- )
295
- for chunk in stream:
296
- if chunk.choices[0].delta and chunk.choices[0].delta.content:
297
- print(chunk.choices[0].delta.content, end="")
298
- print()