webscout 8.3.5__py3-none-any.whl → 8.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (159) hide show
  1. webscout/AIutel.py +2 -0
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
  5. webscout/Provider/AISEARCH/__init__.py +18 -11
  6. webscout/Provider/AISEARCH/scira_search.py +3 -1
  7. webscout/Provider/Aitopia.py +2 -3
  8. webscout/Provider/Andi.py +3 -3
  9. webscout/Provider/ChatGPTClone.py +1 -1
  10. webscout/Provider/ChatSandbox.py +1 -0
  11. webscout/Provider/Cloudflare.py +1 -1
  12. webscout/Provider/Cohere.py +1 -0
  13. webscout/Provider/Deepinfra.py +13 -10
  14. webscout/Provider/ExaAI.py +1 -1
  15. webscout/Provider/ExaChat.py +1 -80
  16. webscout/Provider/Flowith.py +6 -1
  17. webscout/Provider/Gemini.py +7 -5
  18. webscout/Provider/GeminiProxy.py +1 -0
  19. webscout/Provider/GithubChat.py +4 -1
  20. webscout/Provider/Groq.py +1 -1
  21. webscout/Provider/HeckAI.py +8 -4
  22. webscout/Provider/Jadve.py +23 -38
  23. webscout/Provider/K2Think.py +308 -0
  24. webscout/Provider/Koboldai.py +8 -186
  25. webscout/Provider/LambdaChat.py +2 -4
  26. webscout/Provider/Nemotron.py +3 -4
  27. webscout/Provider/Netwrck.py +6 -8
  28. webscout/Provider/OLLAMA.py +1 -0
  29. webscout/Provider/OPENAI/Cloudflare.py +6 -7
  30. webscout/Provider/OPENAI/FalconH1.py +2 -7
  31. webscout/Provider/OPENAI/FreeGemini.py +6 -8
  32. webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
  33. webscout/Provider/OPENAI/NEMOTRON.py +3 -6
  34. webscout/Provider/OPENAI/PI.py +5 -4
  35. webscout/Provider/OPENAI/Qwen3.py +2 -3
  36. webscout/Provider/OPENAI/README.md +2 -1
  37. webscout/Provider/OPENAI/TogetherAI.py +52 -57
  38. webscout/Provider/OPENAI/TwoAI.py +3 -4
  39. webscout/Provider/OPENAI/__init__.py +17 -56
  40. webscout/Provider/OPENAI/ai4chat.py +313 -303
  41. webscout/Provider/OPENAI/base.py +9 -29
  42. webscout/Provider/OPENAI/chatgpt.py +7 -2
  43. webscout/Provider/OPENAI/chatgptclone.py +4 -7
  44. webscout/Provider/OPENAI/chatsandbox.py +84 -59
  45. webscout/Provider/OPENAI/deepinfra.py +12 -6
  46. webscout/Provider/OPENAI/e2b.py +60 -8
  47. webscout/Provider/OPENAI/flowith.py +4 -3
  48. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  49. webscout/Provider/OPENAI/heckai.py +4 -1
  50. webscout/Provider/OPENAI/netwrck.py +9 -12
  51. webscout/Provider/OPENAI/refact.py +274 -0
  52. webscout/Provider/OPENAI/scirachat.py +6 -0
  53. webscout/Provider/OPENAI/textpollinations.py +3 -14
  54. webscout/Provider/OPENAI/toolbaz.py +14 -10
  55. webscout/Provider/OpenGPT.py +1 -1
  56. webscout/Provider/Openai.py +150 -402
  57. webscout/Provider/PI.py +1 -0
  58. webscout/Provider/Perplexitylabs.py +1 -2
  59. webscout/Provider/QwenLM.py +107 -89
  60. webscout/Provider/STT/__init__.py +17 -2
  61. webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
  62. webscout/Provider/StandardInput.py +1 -1
  63. webscout/Provider/TTI/__init__.py +18 -12
  64. webscout/Provider/TTI/bing.py +14 -2
  65. webscout/Provider/TTI/together.py +10 -9
  66. webscout/Provider/TTS/README.md +0 -1
  67. webscout/Provider/TTS/__init__.py +18 -11
  68. webscout/Provider/TTS/base.py +479 -159
  69. webscout/Provider/TTS/deepgram.py +409 -156
  70. webscout/Provider/TTS/elevenlabs.py +425 -111
  71. webscout/Provider/TTS/freetts.py +317 -140
  72. webscout/Provider/TTS/gesserit.py +192 -128
  73. webscout/Provider/TTS/murfai.py +248 -113
  74. webscout/Provider/TTS/openai_fm.py +347 -129
  75. webscout/Provider/TTS/speechma.py +620 -586
  76. webscout/Provider/TeachAnything.py +1 -0
  77. webscout/Provider/TextPollinationsAI.py +5 -15
  78. webscout/Provider/TogetherAI.py +136 -142
  79. webscout/Provider/TwoAI.py +53 -309
  80. webscout/Provider/TypliAI.py +2 -1
  81. webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
  82. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  83. webscout/Provider/Venice.py +2 -1
  84. webscout/Provider/VercelAI.py +1 -0
  85. webscout/Provider/WiseCat.py +2 -1
  86. webscout/Provider/WrDoChat.py +2 -1
  87. webscout/Provider/__init__.py +18 -174
  88. webscout/Provider/ai4chat.py +1 -1
  89. webscout/Provider/akashgpt.py +7 -10
  90. webscout/Provider/cerebras.py +194 -38
  91. webscout/Provider/chatglm.py +170 -83
  92. webscout/Provider/cleeai.py +1 -2
  93. webscout/Provider/deepseek_assistant.py +1 -1
  94. webscout/Provider/elmo.py +1 -1
  95. webscout/Provider/geminiapi.py +1 -1
  96. webscout/Provider/granite.py +1 -1
  97. webscout/Provider/hermes.py +1 -3
  98. webscout/Provider/julius.py +1 -0
  99. webscout/Provider/learnfastai.py +1 -1
  100. webscout/Provider/llama3mitril.py +1 -1
  101. webscout/Provider/llmchat.py +1 -1
  102. webscout/Provider/llmchatco.py +1 -1
  103. webscout/Provider/meta.py +3 -3
  104. webscout/Provider/oivscode.py +2 -2
  105. webscout/Provider/scira_chat.py +51 -124
  106. webscout/Provider/searchchat.py +1 -0
  107. webscout/Provider/sonus.py +1 -1
  108. webscout/Provider/toolbaz.py +15 -11
  109. webscout/Provider/turboseek.py +31 -22
  110. webscout/Provider/typefully.py +2 -1
  111. webscout/Provider/x0gpt.py +1 -0
  112. webscout/Provider/yep.py +2 -1
  113. webscout/conversation.py +22 -20
  114. webscout/sanitize.py +14 -10
  115. webscout/scout/README.md +20 -23
  116. webscout/scout/core/crawler.py +125 -38
  117. webscout/scout/core/scout.py +26 -5
  118. webscout/tempid.py +6 -0
  119. webscout/version.py +1 -1
  120. webscout/webscout_search.py +13 -6
  121. webscout/webscout_search_async.py +10 -8
  122. webscout/yep_search.py +13 -5
  123. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/METADATA +3 -1
  124. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/RECORD +132 -155
  125. webscout/Provider/AllenAI.py +0 -440
  126. webscout/Provider/Blackboxai.py +0 -793
  127. webscout/Provider/FreeGemini.py +0 -250
  128. webscout/Provider/Glider.py +0 -225
  129. webscout/Provider/Hunyuan.py +0 -283
  130. webscout/Provider/MCPCore.py +0 -322
  131. webscout/Provider/MiniMax.py +0 -207
  132. webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
  133. webscout/Provider/OPENAI/MiniMax.py +0 -298
  134. webscout/Provider/OPENAI/autoproxy.py +0 -1067
  135. webscout/Provider/OPENAI/c4ai.py +0 -394
  136. webscout/Provider/OPENAI/copilot.py +0 -305
  137. webscout/Provider/OPENAI/glider.py +0 -330
  138. webscout/Provider/OPENAI/mcpcore.py +0 -431
  139. webscout/Provider/OPENAI/multichat.py +0 -378
  140. webscout/Provider/Reka.py +0 -214
  141. webscout/Provider/TTS/sthir.py +0 -94
  142. webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
  143. webscout/Provider/asksteve.py +0 -220
  144. webscout/Provider/copilot.py +0 -422
  145. webscout/Provider/freeaichat.py +0 -294
  146. webscout/Provider/koala.py +0 -182
  147. webscout/Provider/lmarena.py +0 -198
  148. webscout/Provider/monochat.py +0 -275
  149. webscout/Provider/multichat.py +0 -375
  150. webscout/Provider/scnet.py +0 -244
  151. webscout/Provider/talkai.py +0 -194
  152. /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
  153. /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
  154. /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
  155. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  156. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
  157. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
  158. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
  159. {webscout-8.3.5.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
@@ -1,90 +0,0 @@
1
- import requests
2
- import json
3
-
4
- def fetch_together_models():
5
- """Fetch models from Together.xyz API"""
6
- api_key = "56c8eeff9971269d7a7e625ff88e8a83a34a556003a5c87c289ebe9a3d8a3d2c"
7
- endpoint = "https://api.together.xyz/v1/models"
8
-
9
- headers = {
10
- "Authorization": f"Bearer {api_key}",
11
- "Accept": "application/json"
12
- }
13
-
14
- try:
15
- response = requests.get(endpoint, headers=headers, timeout=30)
16
- response.raise_for_status()
17
-
18
- models_data = response.json()
19
-
20
- # Extract and categorize models
21
- chat_models = []
22
- image_models = []
23
- language_models = []
24
- all_models = []
25
-
26
- print(f"Total models found: {len(models_data)}")
27
- print("\n" + "="*80)
28
-
29
- for model in models_data:
30
- if isinstance(model, dict):
31
- model_id = model.get("id", "")
32
- model_type = model.get("type", "").lower()
33
- context_length = model.get("context_length", 0)
34
-
35
- if not model_id:
36
- continue
37
-
38
- all_models.append(model_id)
39
-
40
- # Categorize by type
41
- if model_type == "chat":
42
- chat_models.append(model_id)
43
- elif model_type == "image":
44
- image_models.append(model_id)
45
- elif model_type == "language":
46
- language_models.append(model_id)
47
-
48
- # Print model details
49
- print(f"Model: {model_id}")
50
- print(f" Type: {model_type}")
51
- print(f" Context Length: {context_length}")
52
- # if model.get("config"):
53
- # config = model["config"]
54
- # if config.get("stop"):
55
- # print(f" Stop Tokens: {config['stop']}")
56
- # print("-" * 40)
57
-
58
- print(f"\nSUMMARY:")
59
- print(f"Chat Models: {len(chat_models)}")
60
- print(f"Image Models: {len(image_models)}")
61
- print(f"Language Models: {len(language_models)}")
62
- print(f"Total Models: {len(all_models)}")
63
-
64
- # Generate Python list for code
65
- print("\n" + "="*80)
66
- print("AVAILABLE_MODELS = [")
67
- for model in sorted(all_models):
68
- print(f' "{model}",')
69
- print("]")
70
-
71
- return {
72
- "all_models": all_models,
73
- "chat_models": chat_models,
74
- "image_models": image_models,
75
- "language_models": language_models,
76
- "raw_data": models_data
77
- }
78
-
79
- except requests.exceptions.RequestException as e:
80
- print(f"Error fetching models: {e}")
81
- return None
82
- except json.JSONDecodeError as e:
83
- print(f"Error parsing JSON response: {e}")
84
- return None
85
-
86
- if __name__ == "__main__":
87
- result = fetch_together_models()
88
-
89
- if result:
90
- print(f"\n📊 Successfully fetched {len(result['all_models'])} models from Together.xyz")
@@ -1,220 +0,0 @@
1
- from typing import Any, Dict, Optional, Union
2
- from curl_cffi import CurlError
3
- from curl_cffi.requests import Session
4
- from webscout import exceptions
5
- from webscout.AIutel import Optimizers
6
- from webscout.AIutel import Conversation
7
- from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
8
- from webscout.AIbase import Provider
9
- from webscout.litagent import LitAgent
10
-
11
- class AskSteve(Provider):
12
- """
13
- A class to interact with the AskSteve API.
14
- """
15
- AVAILABLE_MODELS = ["Gemini"]
16
- def __init__(
17
- self,
18
- is_conversation: bool = True,
19
- max_tokens: int = 600,
20
- timeout: int = 30,
21
- intro: str = None,
22
- filepath: str = None,
23
- update_file: bool = True,
24
- proxies: dict = {},
25
- history_offset: int = 10250,
26
- act: str = None,
27
- ) -> None:
28
- """Instantiates AskSteve
29
-
30
- Args:
31
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
32
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
33
- timeout (int, optional): Http request timeout. Defaults to 30.
34
- intro (str, optional): Conversation introductory prompt. Defaults to None.
35
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
36
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
37
- proxies (dict, optional): Http request proxies. Defaults to {}.
38
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40
- system_prompt (str, optional): System prompt for AskSteve. Defaults to the provided string.
41
- """
42
- self.session = Session() # Use curl_cffi Session
43
- self.is_conversation = is_conversation
44
- self.max_tokens_to_sample = max_tokens
45
- self.api_endpoint = "https://quickstart.asksteve.to/quickStartRequest"
46
- self.timeout = timeout
47
- self.last_response = {}
48
- self.headers = {
49
- "accept": "*/*",
50
- "accept-encoding": "gzip, deflate, br, zstd",
51
- "accept-language": "en-US,en;q=0.9",
52
- "content-type": "text/plain;charset=UTF-8",
53
- "origin": "chrome-extension://gldebcpkoojijledacjeboaehblhfbjg",
54
- "priority": "u=1, i",
55
- "sec-fetch-dest": "empty",
56
- "sec-fetch-mode": "cors",
57
- "sec-fetch-site": "none",
58
- "sec-fetch-storage-access": "active",
59
- "user-agent": LitAgent().random(),
60
- }
61
-
62
- self.__available_optimizers = (
63
- method
64
- for method in dir(Optimizers)
65
- if callable(getattr(Optimizers, method)) and not method.startswith("__")
66
- )
67
- self.session.headers.update(self.headers)
68
- Conversation.intro = (
69
- AwesomePrompts().get_act(
70
- act, raise_not_found=True, default=None, case_insensitive=True
71
- )
72
- if act
73
- else intro or Conversation.intro
74
- )
75
- self.conversation = Conversation(
76
- is_conversation, self.max_tokens_to_sample, filepath, update_file
77
- )
78
- self.conversation.history_offset = history_offset
79
- self.session.proxies = proxies # Assign proxies directly
80
- @staticmethod
81
- def _asksteve_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
82
- """Extracts content from AskSteve JSON response."""
83
- if isinstance(chunk, dict) and "candidates" in chunk and len(chunk["candidates"]) > 0:
84
- parts = chunk["candidates"][0].get("content", {}).get("parts", [])
85
- if parts and isinstance(parts[0].get("text"), str):
86
- return parts[0]["text"]
87
- return None
88
-
89
- def ask(
90
- self,
91
- prompt: str,
92
- stream: bool = False,
93
- raw: bool = False,
94
- optimizer: str = None,
95
- conversationally: bool = False,
96
- ) -> dict:
97
- """Chat with AI
98
-
99
- Args:
100
- prompt (str): Prompt to be send.
101
- stream (bool, optional): Flag for streaming response. Defaults to False.
102
- raw (bool, optional): Stream back raw response as received. Defaults to False.
103
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
104
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
105
- Returns:
106
- dict : {}
107
- ```json
108
- {
109
- "text" : "How may I assist you today?"
110
- }
111
- ```
112
- """
113
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
- if optimizer:
115
- if optimizer in self.__available_optimizers:
116
- conversation_prompt = getattr(Optimizers, optimizer)(
117
- conversation_prompt if conversationally else prompt
118
- )
119
- else:
120
- raise Exception(
121
- f"Optimizer is not one of {self.__available_optimizers}"
122
- )
123
-
124
- payload = {
125
- "key": "asksteve",
126
- "prompt": conversation_prompt
127
- }
128
-
129
-
130
- # This API doesn't stream, so we process the full response
131
- try:
132
- response = self.session.post(
133
- self.api_endpoint,
134
- headers=self.headers,
135
- json=payload,
136
- stream=False, # API doesn't stream
137
- timeout=self.timeout,
138
- impersonate="chrome120" # Add impersonate
139
- )
140
- response.raise_for_status()
141
- response_text_raw = response.text # Get raw text
142
-
143
- # Process the full JSON text using sanitize_stream
144
- processed_stream = sanitize_stream(
145
- data=response_text_raw,
146
- to_json=True, # Parse the whole text as JSON
147
- intro_value=None,
148
- content_extractor=self._asksteve_extractor, # Use the specific extractor
149
- yield_raw_on_error=False
150
- )
151
- # Extract the single result
152
- text = next(processed_stream, None)
153
- text = text if isinstance(text, str) else "" # Ensure it's a string
154
-
155
- self.last_response.update(dict(text=text))
156
- self.conversation.update_chat_history(
157
- prompt, self.get_message(self.last_response)
158
- )
159
- # Always return a dict for consistency
160
- return {"text": text} if raw else self.last_response
161
-
162
- except CurlError as e:
163
- raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
164
- except Exception as e: # Catch other potential errors
165
- raise exceptions.FailedToGenerateResponseError(f"Failed to get response ({type(e).__name__}): {e}") from e
166
-
167
- def chat(
168
- self,
169
- prompt: str,
170
- stream: bool = False,
171
- optimizer: str = None,
172
- conversationally: bool = False,
173
- ) -> str:
174
- """Generate response `str`
175
- Args:
176
- prompt (str): Prompt to be send.
177
- stream (bool, optional): Flag for streaming response. Defaults to False.
178
- optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
179
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
180
- Returns:
181
- str: Response generated
182
- """
183
-
184
- response_data = self.ask(
185
- prompt,
186
- stream=False, # Always False for this API
187
- raw=False, # Get the dict back
188
- optimizer=optimizer,
189
- conversationally=conversationally,
190
- )
191
- if stream:
192
- def stream_wrapper():
193
- yield self.get_message(response_data)
194
- return stream_wrapper()
195
- else:
196
- return self.get_message(response_data)
197
-
198
- def get_message(self, response) -> str:
199
- """Retrieves message only from response
200
-
201
- Args:
202
- response (dict or str): Response generated by `self.ask` or a string
203
-
204
- Returns:
205
- str: Message extracted
206
- """
207
- if isinstance(response, dict):
208
- return response.get("text", "") # Use .get for safety
209
- elif isinstance(response, str):
210
- return response
211
- else:
212
- raise TypeError(f"Unsupported response type: {type(response)}")
213
-
214
-
215
- if __name__ == "__main__":
216
- from rich import print
217
- ai = AskSteve()
218
- response = ai.chat("write a short poem about AI", stream=True)
219
- for chunk in response:
220
- print(chunk, end="", flush=True)