webscout 7.8__py3-none-any.whl → 7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (41) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/__init__.py +2 -0
  4. webscout/Extra/autocoder/__init__.py +1 -1
  5. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  6. webscout/Extra/tempmail/__init__.py +26 -0
  7. webscout/Extra/tempmail/async_utils.py +141 -0
  8. webscout/Extra/tempmail/base.py +156 -0
  9. webscout/Extra/tempmail/cli.py +187 -0
  10. webscout/Extra/tempmail/mail_tm.py +361 -0
  11. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  12. webscout/Provider/Deepinfra.py +288 -286
  13. webscout/Provider/ElectronHub.py +709 -716
  14. webscout/Provider/ExaChat.py +20 -5
  15. webscout/Provider/Gemini.py +167 -165
  16. webscout/Provider/Groq.py +38 -24
  17. webscout/Provider/LambdaChat.py +2 -1
  18. webscout/Provider/TextPollinationsAI.py +232 -230
  19. webscout/Provider/__init__.py +0 -4
  20. webscout/Provider/copilot.py +427 -427
  21. webscout/Provider/freeaichat.py +8 -1
  22. webscout/Provider/uncovr.py +312 -299
  23. webscout/Provider/yep.py +64 -12
  24. webscout/__init__.py +38 -36
  25. webscout/cli.py +293 -293
  26. webscout/conversation.py +350 -17
  27. webscout/litprinter/__init__.py +59 -667
  28. webscout/optimizers.py +419 -419
  29. webscout/update_checker.py +14 -12
  30. webscout/version.py +1 -1
  31. webscout/webscout_search.py +1282 -1282
  32. webscout/webscout_search_async.py +813 -813
  33. {webscout-7.8.dist-info → webscout-7.9.dist-info}/METADATA +44 -39
  34. {webscout-7.8.dist-info → webscout-7.9.dist-info}/RECORD +38 -35
  35. webscout/Provider/DARKAI.py +0 -225
  36. webscout/Provider/EDITEE.py +0 -192
  37. webscout/litprinter/colors.py +0 -54
  38. {webscout-7.8.dist-info → webscout-7.9.dist-info}/LICENSE.md +0 -0
  39. {webscout-7.8.dist-info → webscout-7.9.dist-info}/WHEEL +0 -0
  40. {webscout-7.8.dist-info → webscout-7.9.dist-info}/entry_points.txt +0 -0
  41. {webscout-7.8.dist-info → webscout-7.9.dist-info}/top_level.txt +0 -0
@@ -18,6 +18,7 @@ MODEL_CONFIGS = {
18
18
  "endpoint": "https://exa-chat.vercel.app/api/gemini",
19
19
  "models": [
20
20
  "gemini-2.0-flash",
21
+ "gemini-2.0-flash-exp-image-generation",
21
22
  "gemini-2.0-flash-thinking-exp-01-21",
22
23
  "gemini-2.5-pro-exp-03-25",
23
24
  "gemini-2.0-pro-exp-02-05",
@@ -51,6 +52,13 @@ MODEL_CONFIGS = {
51
52
  "qwen-qwq-32b"
52
53
  ],
53
54
  },
55
+ "cerebras": {
56
+ "endpoint": "https://exa-chat.vercel.app/api/cerebras",
57
+ "models": [
58
+ "llama3.1-8b",
59
+ "llama-3.3-70b"
60
+ ],
61
+ },
54
62
  }
55
63
 
56
64
  class ExaChat(Provider):
@@ -87,7 +95,11 @@ class ExaChat(Provider):
87
95
  "llama3-8b-8192",
88
96
  "qwen-2.5-32b",
89
97
  "qwen-2.5-coder-32b",
90
- "qwen-qwq-32b"
98
+ "qwen-qwq-32b",
99
+
100
+ # Cerebras Models
101
+ "llama3.1-8b",
102
+ "llama-3.3-70b"
91
103
  ]
92
104
 
93
105
  def __init__(
@@ -206,6 +218,12 @@ class ExaChat(Provider):
206
218
  "model": self.model,
207
219
  "messages": []
208
220
  }
221
+ elif self.provider == "cerebras":
222
+ return {
223
+ "query": conversation_prompt,
224
+ "model": self.model,
225
+ "messages": []
226
+ }
209
227
  else: # openrouter or groq
210
228
  return {
211
229
  "query": conversation_prompt + "\n", # Add newline for openrouter and groq models
@@ -246,10 +264,7 @@ class ExaChat(Provider):
246
264
  full_response += content
247
265
  except json.JSONDecodeError:
248
266
  continue
249
-
250
- if not raw:
251
- print() # New line after response
252
-
267
+
253
268
  self.last_response = {"text": full_response}
254
269
  self.conversation.update_chat_history(prompt, full_response)
255
270
  return self.last_response
@@ -1,165 +1,167 @@
1
- from os import path
2
- from json import load, dumps
3
- import warnings
4
- from typing import Union, Any, Dict
5
-
6
- # Import internal modules and dependencies
7
- from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
8
- from ..AIbase import Provider, AsyncProvider
9
- from ..Bard import Chatbot, Model
10
-
11
- warnings.simplefilter("ignore", category=UserWarning)
12
-
13
- # Define model aliases for easy usage
14
- MODEL_ALIASES: Dict[str, Model] = {
15
- "unspecified": Model.UNSPECIFIED,
16
- "flash": Model.G_2_0_FLASH,
17
- "flash-exp": Model.G_2_0_FLASH_EXP,
18
- "thinking": Model.G_2_0_FLASH_THINKING,
19
- "thinking-with-apps": Model.G_2_0_FLASH_THINKING_WITH_APPS,
20
- "exp-advanced": Model.G_2_0_EXP_ADVANCED,
21
- "2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
22
- "1.5-flash": Model.G_1_5_FLASH,
23
- "1.5-pro": Model.G_1_5_PRO,
24
- "1.5-pro-research": Model.G_1_5_PRO_RESEARCH,
25
- }
26
-
27
- # List of available models (friendly names)
28
- AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
29
-
30
- class GEMINI(Provider):
31
- def __init__(
32
- self,
33
- cookie_file: str,
34
- model: str = "flash", # Accepts either a Model enum or a str alias.
35
- proxy: dict = {},
36
- timeout: int = 30,
37
- ):
38
- """
39
- Initializes GEMINI with model support.
40
-
41
- Args:
42
- cookie_file (str): Path to the cookies JSON file.
43
- model (Model or str): Selected model for the session. Can be a Model enum
44
- or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
45
- exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
46
- proxy (dict, optional): HTTP request proxy. Defaults to {}.
47
- timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
48
- """
49
- self.conversation = Conversation(False)
50
-
51
- # Ensure cookie_file existence.
52
- if not isinstance(cookie_file, str):
53
- raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
54
- if not path.isfile(cookie_file):
55
- raise Exception(f"{cookie_file} is not a valid file path")
56
-
57
- # If model is provided as alias (str), convert to Model enum.
58
- if isinstance(model, str):
59
- alias = model.lower()
60
- if alias in MODEL_ALIASES:
61
- selected_model = MODEL_ALIASES[alias]
62
- else:
63
- raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
64
- elif isinstance(model, Model):
65
- selected_model = model
66
- else:
67
- raise TypeError("model must be a string alias or an instance of Model")
68
-
69
- # Initialize the Chatbot session using the cookie file.
70
- self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
71
- self.last_response = {}
72
- self.__available_optimizers = (
73
- method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
74
- )
75
- # Store cookies from Chatbot for later use (e.g. image generation)
76
- self.session_auth1 = self.session.secure_1psid
77
- self.session_auth2 = self.session.secure_1psidts
78
-
79
- def ask(
80
- self,
81
- prompt: str,
82
- stream: bool = False,
83
- raw: bool = False,
84
- optimizer: str = None,
85
- conversationally: bool = False,
86
- ) -> dict:
87
- """Chat with AI.
88
-
89
- Args:
90
- prompt (str): Prompt to be sent.
91
- stream (bool, optional): Flag for streaming response. Defaults to False.
92
- raw (bool, optional): Stream back raw response as received. Defaults to False.
93
- optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
94
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
95
-
96
- Returns:
97
- dict: Response generated by the underlying Chatbot.
98
- """
99
- conversation_prompt = self.conversation.gen_complete_prompt(prompt)
100
- if optimizer:
101
- if optimizer in self.__available_optimizers:
102
- conversation_prompt = getattr(Optimizers, optimizer)(
103
- conversation_prompt if conversationally else prompt
104
- )
105
- else:
106
- raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
107
-
108
- def for_stream():
109
- response = self.session.ask(prompt)
110
- self.last_response.update(response)
111
- self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
112
- yield dumps(response) if raw else response
113
-
114
- def for_non_stream():
115
- for _ in for_stream():
116
- pass
117
- return self.last_response
118
-
119
- return for_stream() if stream else for_non_stream()
120
-
121
- def chat(
122
- self,
123
- prompt: str,
124
- stream: bool = False,
125
- optimizer: str = None,
126
- conversationally: bool = False,
127
- ) -> str:
128
- """Generate response text.
129
-
130
- Args:
131
- prompt (str): Prompt to be sent.
132
- stream (bool, optional): Flag for streaming response. Defaults to False.
133
- optimizer (str, optional): Prompt optimizer name. Defaults to None.
134
- conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
135
-
136
- Returns:
137
- str: Response generated.
138
- """
139
- def for_stream():
140
- for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
141
- yield self.get_message(response)
142
-
143
- def for_non_stream():
144
- return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
145
-
146
- return for_stream() if stream else for_non_stream()
147
-
148
- def get_message(self, response: dict) -> str:
149
- """Retrieves message content from the response.
150
-
151
- Args:
152
- response (dict): Response generated by `self.ask`.
153
-
154
- Returns:
155
- str: Extracted message content.
156
- """
157
- if not isinstance(response, dict):
158
- raise TypeError("Response should be of type dict")
159
- return response["content"]
160
-
161
- def reset(self):
162
- """Reset the current conversation."""
163
- self.session.async_chatbot.conversation_id = ""
164
- self.session.async_chatbot.response_id = ""
165
- self.session.async_chatbot.choice_id = ""
1
+ from os import path
2
+ from json import load, dumps
3
+ import warnings
4
+ from typing import Union, Any, Dict
5
+
6
+ # Import internal modules and dependencies
7
+ from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
8
+ from ..AIbase import Provider, AsyncProvider
9
+ from ..Bard import Chatbot, Model
10
+
11
+ warnings.simplefilter("ignore", category=UserWarning)
12
+
13
+ # Define model aliases for easy usage
14
+ MODEL_ALIASES: Dict[str, Model] = {
15
+ "unspecified": Model.UNSPECIFIED,
16
+ "gemini-2.0-flash": Model.G_2_0_FLASH,
17
+ "gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
18
+ "gemini-2.5-pro": Model.G_2_5_PRO,
19
+ "gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
20
+ "gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
21
+ # Add shorter aliases for convenience
22
+ "flash": Model.G_2_0_FLASH,
23
+ "thinking": Model.G_2_0_FLASH_THINKING,
24
+ "pro": Model.G_2_5_PRO,
25
+ "advanced": Model.G_2_0_EXP_ADVANCED,
26
+ "advanced-2.5": Model.G_2_5_EXP_ADVANCED,
27
+ }
28
+
29
+ # List of available models (friendly names)
30
+ AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
31
+
32
+ class GEMINI(Provider):
33
+ def __init__(
34
+ self,
35
+ cookie_file: str,
36
+ model: str = "flash", # Accepts either a Model enum or a str alias.
37
+ proxy: dict = {},
38
+ timeout: int = 30,
39
+ ):
40
+ """
41
+ Initializes GEMINI with model support.
42
+
43
+ Args:
44
+ cookie_file (str): Path to the cookies JSON file.
45
+ model (Model or str): Selected model for the session. Can be a Model enum
46
+ or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
47
+ exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
48
+ proxy (dict, optional): HTTP request proxy. Defaults to {}.
49
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
50
+ """
51
+ self.conversation = Conversation(False)
52
+
53
+ # Ensure cookie_file existence.
54
+ if not isinstance(cookie_file, str):
55
+ raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
56
+ if not path.isfile(cookie_file):
57
+ raise Exception(f"{cookie_file} is not a valid file path")
58
+
59
+ # If model is provided as alias (str), convert to Model enum.
60
+ if isinstance(model, str):
61
+ alias = model.lower()
62
+ if alias in MODEL_ALIASES:
63
+ selected_model = MODEL_ALIASES[alias]
64
+ else:
65
+ raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
66
+ elif isinstance(model, Model):
67
+ selected_model = model
68
+ else:
69
+ raise TypeError("model must be a string alias or an instance of Model")
70
+
71
+ # Initialize the Chatbot session using the cookie file.
72
+ self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
73
+ self.last_response = {}
74
+ self.__available_optimizers = (
75
+ method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
76
+ )
77
+ # Store cookies from Chatbot for later use (e.g. image generation)
78
+ self.session_auth1 = self.session.secure_1psid
79
+ self.session_auth2 = self.session.secure_1psidts
80
+
81
+ def ask(
82
+ self,
83
+ prompt: str,
84
+ stream: bool = False,
85
+ raw: bool = False,
86
+ optimizer: str = None,
87
+ conversationally: bool = False,
88
+ ) -> dict:
89
+ """Chat with AI.
90
+
91
+ Args:
92
+ prompt (str): Prompt to be sent.
93
+ stream (bool, optional): Flag for streaming response. Defaults to False.
94
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
95
+ optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
96
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
97
+
98
+ Returns:
99
+ dict: Response generated by the underlying Chatbot.
100
+ """
101
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
102
+ if optimizer:
103
+ if optimizer in self.__available_optimizers:
104
+ conversation_prompt = getattr(Optimizers, optimizer)(
105
+ conversation_prompt if conversationally else prompt
106
+ )
107
+ else:
108
+ raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
109
+
110
+ def for_stream():
111
+ response = self.session.ask(prompt)
112
+ self.last_response.update(response)
113
+ self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
114
+ yield dumps(response) if raw else response
115
+
116
+ def for_non_stream():
117
+ for _ in for_stream():
118
+ pass
119
+ return self.last_response
120
+
121
+ return for_stream() if stream else for_non_stream()
122
+
123
+ def chat(
124
+ self,
125
+ prompt: str,
126
+ stream: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ ) -> str:
130
+ """Generate response text.
131
+
132
+ Args:
133
+ prompt (str): Prompt to be sent.
134
+ stream (bool, optional): Flag for streaming response. Defaults to False.
135
+ optimizer (str, optional): Prompt optimizer name. Defaults to None.
136
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
137
+
138
+ Returns:
139
+ str: Response generated.
140
+ """
141
+ def for_stream():
142
+ for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
143
+ yield self.get_message(response)
144
+
145
+ def for_non_stream():
146
+ return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
147
+
148
+ return for_stream() if stream else for_non_stream()
149
+
150
+ def get_message(self, response: dict) -> str:
151
+ """Retrieves message content from the response.
152
+
153
+ Args:
154
+ response (dict): Response generated by `self.ask`.
155
+
156
+ Returns:
157
+ str: Extracted message content.
158
+ """
159
+ if not isinstance(response, dict):
160
+ raise TypeError("Response should be of type dict")
161
+ return response["content"]
162
+
163
+ def reset(self):
164
+ """Reset the current conversation."""
165
+ self.session.async_chatbot.conversation_id = ""
166
+ self.session.async_chatbot.response_id = ""
167
+ self.session.async_chatbot.choice_id = ""
webscout/Provider/Groq.py CHANGED
@@ -16,26 +16,31 @@ class GROQ(Provider):
16
16
  """
17
17
 
18
18
  AVAILABLE_MODELS = [
19
- # "whisper-large-v3",
19
+ "distil-whisper-large-v3-en",
20
+ "gemma2-9b-it",
21
+ "llama-3.3-70b-versatile",
22
+ "llama-3.1-8b-instant",
23
+ "llama-guard-3-8b",
20
24
  "llama3-70b-8192",
21
- "qwen-2.5-32b",
25
+ "llama3-8b-8192",
26
+ "whisper-large-v3",
27
+ "whisper-large-v3-turbo",
28
+ "meta-llama/llama-4-scout-17b-16e-instruct",
29
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
30
+ "playai-tts",
31
+ "playai-tts-arabic",
32
+ "qwen-qwq-32b",
33
+ "mistral-saba-24b",
22
34
  "qwen-2.5-coder-32b",
35
+ "qwen-2.5-32b",
23
36
  "deepseek-r1-distill-qwen-32b",
24
37
  "deepseek-r1-distill-llama-70b",
38
+ "llama-3.3-70b-specdec",
39
+ "llama-3.2-1b-preview",
25
40
  "llama-3.2-3b-preview",
26
- "gemma2-9b-it",
27
- "llama-3.2-11b-vision-preview",
28
- "llama3-8b-8192",
29
- "llama-3.3-70b-versatile",
30
41
  "llama-3.2-11b-vision-preview",
31
- # "distil-whisper-large-v3-en",
32
- "mixtral-8x7b-32768",
33
- "llama-3.3-70b-specdec",
34
42
  "llama-3.2-90b-vision-preview",
35
- "llama-3.2-1b-preview",
36
- # "whisper-large-v3-turbo",
37
- "llama-3.1-8b-instant",
38
- "llama-guard-3-8b"
43
+ "mixtral-8x7b-32768"
39
44
  ]
40
45
 
41
46
  def __init__(
@@ -345,22 +350,31 @@ class AsyncGROQ(AsyncProvider):
345
350
  """
346
351
 
347
352
  AVAILABLE_MODELS = [
348
- # "whisper-large-v3",
349
- "llama3-70b-8192",
350
- "llama-3.2-3b-preview",
353
+ "distil-whisper-large-v3-en",
351
354
  "gemma2-9b-it",
352
- "llama-3.2-11b-vision-preview",
353
- "llama3-8b-8192",
354
355
  "llama-3.3-70b-versatile",
356
+ "llama-3.1-8b-instant",
357
+ "llama-guard-3-8b",
358
+ "llama3-70b-8192",
359
+ "llama3-8b-8192",
360
+ "whisper-large-v3",
361
+ "whisper-large-v3-turbo",
362
+ "meta-llama/llama-4-scout-17b-16e-instruct",
363
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
364
+ "playai-tts",
365
+ "playai-tts-arabic",
366
+ "qwen-qwq-32b",
367
+ "mistral-saba-24b",
368
+ "qwen-2.5-coder-32b",
369
+ "qwen-2.5-32b",
370
+ "deepseek-r1-distill-qwen-32b",
355
371
  "deepseek-r1-distill-llama-70b",
356
- # "distil-whisper-large-v3-en",
357
- "mixtral-8x7b-32768",
358
372
  "llama-3.3-70b-specdec",
359
- "llama-3.2-90b-vision-preview",
360
373
  "llama-3.2-1b-preview",
361
- # "whisper-large-v3-turbo",
362
- "llama-3.1-8b-instant",
363
- "llama-guard-3-8b"
374
+ "llama-3.2-3b-preview",
375
+ "llama-3.2-11b-vision-preview",
376
+ "llama-3.2-90b-vision-preview",
377
+ "mixtral-8x7b-32768"
364
378
  ]
365
379
 
366
380
  def __init__(
@@ -24,7 +24,8 @@ class LambdaChat(Provider):
24
24
  "hermes-3-llama-3.1-405b-fp8",
25
25
  "llama3.1-nemotron-70b-instruct",
26
26
  "lfm-40b",
27
- "llama3.3-70b-instruct-fp8"
27
+ "llama3.3-70b-instruct-fp8",
28
+ "qwen25-coder-32b-instruct"
28
29
  ]
29
30
 
30
31
  def __init__(