webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,207 +0,0 @@
1
- import os
2
- import json
3
- import requests
4
- from typing import Any, Dict, Optional, Union, Generator
5
- from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
6
- from webscout.AIbase import Provider
7
- from webscout import exceptions
8
-
9
- class MiniMax(Provider):
10
- """
11
- Provider for MiniMax-Reasoning-01 API, following the standard provider interface.
12
- """
13
- AVAILABLE_MODELS = ["MiniMax-Reasoning-01"]
14
- API_URL = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
15
- # TODO: Move API_KEY to env/config for security
16
- API_KEY = os.environ.get("MINIMAX_API_KEY") or """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"""
17
- MODEL_CONTROL_DEFAULTS = {"tokens_to_generate": 40000, "temperature": 1, "top_p": 0.95}
18
-
19
- def __init__(
20
- self,
21
- is_conversation: bool = True,
22
- max_tokens: int = 2049,
23
- timeout: int = 30,
24
- intro: str = None,
25
- filepath: str = None,
26
- update_file: bool = True,
27
- proxies: dict = {},
28
- history_offset: int = 10250,
29
- act: str = None,
30
- model: str = "MiniMax-Reasoning-01",
31
- system_prompt: str = "You are a helpful assistant, always respond in english",
32
- ):
33
- if model not in self.AVAILABLE_MODELS:
34
- raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
35
- self.model = model
36
- self.api_url = self.API_URL
37
- self.api_key = self.API_KEY
38
- self.timeout = timeout
39
- self.is_conversation = is_conversation
40
- self.max_tokens_to_sample = max_tokens
41
- self.last_response = {}
42
- self.system_prompt = system_prompt
43
- self.proxies = proxies
44
- self.__available_optimizers = tuple(
45
- method for method in dir(Optimizers)
46
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
47
- )
48
- Conversation.intro = (
49
- AwesomePrompts().get_act(
50
- act, raise_not_found=True, default=None, case_insensitive=True
51
- )
52
- if act
53
- else intro or Conversation.intro
54
- )
55
- self.conversation = Conversation(
56
- is_conversation, self.max_tokens_to_sample, filepath, update_file
57
- )
58
- self.conversation.history_offset = history_offset
59
-
60
- @staticmethod
61
- def _extract_content(chunk: Any) -> Optional[dict]:
62
- if not isinstance(chunk, dict):
63
- return None
64
- choice = chunk.get('choices', [{}])[0]
65
- delta = choice.get('delta', {})
66
- content = delta.get('content')
67
- reasoning = delta.get('reasoning_content')
68
- result = {}
69
- if content:
70
- result['content'] = content
71
- if reasoning:
72
- result['reasoning_content'] = reasoning
73
- return result if result else None
74
-
75
- def ask(
76
- self,
77
- prompt: str,
78
- stream: bool = True,
79
- raw: bool = False,
80
- optimizer: str = None,
81
- conversationally: bool = False,
82
- ) -> Union[Dict[str, Any], Generator]:
83
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
84
- if optimizer:
85
- if optimizer in self.__available_optimizers:
86
- conversation_prompt = getattr(Optimizers, optimizer)(
87
- conversation_prompt if conversationally else prompt
88
- )
89
- else:
90
- raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
91
- messages = [
92
- {'role': 'system', 'content': self.system_prompt},
93
- {'role': 'user', 'content': conversation_prompt}
94
- ]
95
- data = {
96
- 'model': self.model,
97
- 'messages': messages,
98
- 'stream': True,
99
- 'max_tokens': self.MODEL_CONTROL_DEFAULTS.get('tokens_to_generate', 512),
100
- 'temperature': self.MODEL_CONTROL_DEFAULTS.get('temperature', 1.0),
101
- 'top_p': self.MODEL_CONTROL_DEFAULTS.get('top_p', 1.0),
102
- }
103
- headers = {
104
- 'Content-Type': 'application/json',
105
- 'Authorization': f'Bearer {self.api_key}',
106
- }
107
- def for_stream():
108
- try:
109
- response = requests.post(
110
- self.api_url,
111
- headers=headers,
112
- data=json.dumps(data),
113
- stream=True,
114
- timeout=self.timeout,
115
- proxies=self.proxies if self.proxies else None
116
- )
117
- if not response.ok:
118
- raise exceptions.FailedToGenerateResponseError(
119
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
120
- )
121
- streaming_response = ""
122
- last_content = ""
123
- last_reasoning = ""
124
- in_think = False
125
- processed_stream = sanitize_stream(
126
- response.iter_lines(),
127
- intro_value="data:",
128
- to_json=True,
129
- content_extractor=self._extract_content,
130
- raw=False # always process as dict for logic
131
- )
132
- for chunk in processed_stream:
133
- if not chunk:
134
- continue
135
- content = chunk.get('content') if isinstance(chunk, dict) else None
136
- reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
137
- # Handle reasoning_content with <think> tags
138
- if reasoning and reasoning != last_reasoning:
139
- if not in_think:
140
- yield "<think>\n\n"
141
- in_think = True
142
- yield reasoning
143
- last_reasoning = reasoning
144
- # If we were in <think> and now have new content, close <think>
145
- if in_think and content and content != last_content:
146
- yield "</think>\n\n"
147
- in_think = False
148
- # Handle normal content
149
- if content and content != last_content:
150
- yield content
151
- streaming_response += content
152
- last_content = content
153
- if not raw:
154
- self.last_response = {"text": streaming_response}
155
- self.conversation.update_chat_history(prompt, streaming_response)
156
- except Exception as e:
157
- raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
158
- def for_non_stream():
159
- full_response = ""
160
- for chunk in for_stream():
161
- if isinstance(chunk, dict) and "text" in chunk:
162
- full_response += chunk["text"]
163
- elif isinstance(chunk, str):
164
- full_response += chunk
165
- if not raw:
166
- self.last_response = {"text": full_response}
167
- self.conversation.update_chat_history(prompt, full_response)
168
- return {"text": full_response}
169
- else:
170
- return full_response
171
- return for_stream() if stream else for_non_stream()
172
-
173
- def chat(
174
- self,
175
- prompt: str,
176
- stream: bool = True,
177
- optimizer: str = None,
178
- conversationally: bool = False,
179
- raw: bool = False,
180
- ) -> Union[str, Generator[str, None, None]]:
181
- def for_stream():
182
- for response in self.ask(
183
- prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
184
- ):
185
- if raw:
186
- yield response
187
- else:
188
- yield response
189
- def for_non_stream():
190
- result = self.ask(
191
- prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
192
- )
193
- if raw:
194
- return result
195
- else:
196
- return self.get_message(result)
197
- return for_stream() if stream else for_non_stream()
198
-
199
- def get_message(self, response: dict) -> str:
200
- assert isinstance(response, dict), "Response should be of dict data-type only"
201
- return response.get("text", "")
202
-
203
- if __name__ == "__main__":
204
- ai = MiniMax()
205
- resp = ai.chat("What is the capital of France?", stream=True, raw=False)
206
- for chunk in resp:
207
- print(chunk, end="", flush=True)