webscout 7.9__py3-none-any.whl → 8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (69) hide show
  1. webscout/Extra/GitToolkit/__init__.py +10 -0
  2. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  3. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  4. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  5. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  6. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  7. webscout/Provider/AISEARCH/DeepFind.py +1 -1
  8. webscout/Provider/AISEARCH/ISou.py +1 -1
  9. webscout/Provider/AISEARCH/__init__.py +6 -1
  10. webscout/Provider/AISEARCH/felo_search.py +1 -1
  11. webscout/Provider/AISEARCH/genspark_search.py +1 -1
  12. webscout/Provider/AISEARCH/hika_search.py +194 -0
  13. webscout/Provider/AISEARCH/iask_search.py +436 -0
  14. webscout/Provider/AISEARCH/monica_search.py +246 -0
  15. webscout/Provider/AISEARCH/scira_search.py +320 -0
  16. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  17. webscout/Provider/AllenAI.py +255 -122
  18. webscout/Provider/DeepSeek.py +1 -2
  19. webscout/Provider/Deepinfra.py +17 -9
  20. webscout/Provider/ExaAI.py +261 -0
  21. webscout/Provider/ExaChat.py +8 -1
  22. webscout/Provider/GithubChat.py +2 -1
  23. webscout/Provider/Jadve.py +2 -2
  24. webscout/Provider/Netwrck.py +3 -2
  25. webscout/Provider/OPENAI/__init__.py +17 -0
  26. webscout/Provider/OPENAI/base.py +46 -0
  27. webscout/Provider/OPENAI/c4ai.py +347 -0
  28. webscout/Provider/OPENAI/chatgptclone.py +460 -0
  29. webscout/Provider/OPENAI/deepinfra.py +284 -0
  30. webscout/Provider/OPENAI/exaai.py +419 -0
  31. webscout/Provider/OPENAI/exachat.py +421 -0
  32. webscout/Provider/OPENAI/freeaichat.py +355 -0
  33. webscout/Provider/OPENAI/glider.py +314 -0
  34. webscout/Provider/OPENAI/heckai.py +337 -0
  35. webscout/Provider/OPENAI/llmchatco.py +325 -0
  36. webscout/Provider/OPENAI/netwrck.py +348 -0
  37. webscout/Provider/OPENAI/scirachat.py +459 -0
  38. webscout/Provider/OPENAI/sonus.py +294 -0
  39. webscout/Provider/OPENAI/typegpt.py +361 -0
  40. webscout/Provider/OPENAI/utils.py +211 -0
  41. webscout/Provider/OPENAI/venice.py +428 -0
  42. webscout/Provider/OPENAI/wisecat.py +381 -0
  43. webscout/Provider/OPENAI/x0gpt.py +389 -0
  44. webscout/Provider/OPENAI/yep.py +329 -0
  45. webscout/Provider/OpenGPT.py +199 -0
  46. webscout/Provider/PI.py +39 -24
  47. webscout/Provider/Venice.py +1 -1
  48. webscout/Provider/Youchat.py +326 -296
  49. webscout/Provider/__init__.py +16 -6
  50. webscout/Provider/ai4chat.py +58 -56
  51. webscout/Provider/akashgpt.py +34 -22
  52. webscout/Provider/freeaichat.py +1 -1
  53. webscout/Provider/labyrinth.py +121 -20
  54. webscout/Provider/llmchatco.py +306 -0
  55. webscout/Provider/scira_chat.py +274 -0
  56. webscout/Provider/typefully.py +280 -0
  57. webscout/Provider/typegpt.py +3 -184
  58. webscout/prompt_manager.py +2 -1
  59. webscout/version.py +1 -1
  60. webscout/webscout_search.py +118 -54
  61. webscout/webscout_search_async.py +109 -45
  62. webscout-8.1.dist-info/METADATA +683 -0
  63. {webscout-7.9.dist-info → webscout-8.1.dist-info}/RECORD +67 -33
  64. webscout/Provider/flowith.py +0 -207
  65. webscout-7.9.dist-info/METADATA +0 -995
  66. {webscout-7.9.dist-info → webscout-8.1.dist-info}/LICENSE.md +0 -0
  67. {webscout-7.9.dist-info → webscout-8.1.dist-info}/WHEEL +0 -0
  68. {webscout-7.9.dist-info → webscout-8.1.dist-info}/entry_points.txt +0 -0
  69. {webscout-7.9.dist-info → webscout-8.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,261 @@
1
+ from typing import Union, Any, Dict, Generator
2
+ from uuid import uuid4
3
+ import requests
4
+ import json
5
+ import re
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts
10
+ from webscout.AIbase import Provider
11
+ from webscout import exceptions
12
+ from webscout.litagent import LitAgent
13
+
14
+ class ExaAI(Provider):
15
+ """
16
+ A class to interact with the o3minichat.exa.ai API.
17
+
18
+ Attributes:
19
+ system_prompt (str): The system prompt to define the assistant's role.
20
+
21
+ Examples:
22
+ >>> from webscout.Provider.ExaAI import ExaAI
23
+ >>> ai = ExaAI()
24
+ >>> response = ai.chat("What's the weather today?")
25
+ >>> print(response)
26
+ 'The weather today depends on your location...'
27
+ """
28
+ AVAILABLE_MODELS = ["O3-Mini"]
29
+
30
+ def __init__(
31
+ self,
32
+ is_conversation: bool = True,
33
+ max_tokens: int = 600,
34
+ timeout: int = 30,
35
+ intro: str = None,
36
+ filepath: str = None,
37
+ update_file: bool = True,
38
+ proxies: dict = {},
39
+ history_offset: int = 10250,
40
+ act: str = None,
41
+ # system_prompt: str = "You are a helpful assistant.",
42
+ model: str = "O3-Mini", # >>> THIS FLAG IS NOT USED <<<
43
+ ):
44
+ """
45
+ Initializes the ExaAI API with given parameters.
46
+
47
+ Args:
48
+ is_conversation (bool): Whether the provider is in conversation mode.
49
+ max_tokens (int): Maximum number of tokens to sample.
50
+ timeout (int): Timeout for API requests.
51
+ intro (str): Introduction message for the conversation.
52
+ filepath (str): Filepath for storing conversation history.
53
+ update_file (bool): Whether to update the conversation history file.
54
+ proxies (dict): Proxies for the API requests.
55
+ history_offset (int): Offset for conversation history.
56
+ act (str): Act for the conversation.
57
+ system_prompt (str): The system prompt to define the assistant's role.
58
+
59
+ Examples:
60
+ >>> ai = ExaAI(system_prompt="You are a friendly assistant.")
61
+ >>> print(ai.system_prompt)
62
+ 'You are a friendly assistant.'
63
+ """
64
+ self.session = requests.Session()
65
+ self.is_conversation = is_conversation
66
+ self.max_tokens_to_sample = max_tokens
67
+ self.api_endpoint = "https://o3minichat.exa.ai/api/chat"
68
+ self.timeout = timeout
69
+ self.last_response = {}
70
+ # self.system_prompt = system_prompt
71
+
72
+ # Initialize LitAgent for user agent generation
73
+ self.agent = LitAgent()
74
+
75
+ self.headers = {
76
+ "authority": "o3minichat.exa.ai",
77
+ "accept": "*/*",
78
+ "accept-encoding": "gzip, deflate, br, zstd",
79
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
80
+ "content-type": "application/json",
81
+ "dnt": "1",
82
+ "origin": "https://o3minichat.exa.ai",
83
+ "priority": "u=1, i",
84
+ "referer": "https://o3minichat.exa.ai/",
85
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
86
+ "sec-ch-ua-mobile": "?0",
87
+ "sec-ch-ua-platform": '"Windows"',
88
+ "sec-fetch-dest": "empty",
89
+ "sec-fetch-mode": "cors",
90
+ "sec-fetch-site": "same-origin",
91
+ "sec-gpc": "1",
92
+ "user-agent": self.agent.random() # Use LitAgent to generate a random user agent
93
+ }
94
+
95
+ self.__available_optimizers = (
96
+ method
97
+ for method in dir(Optimizers)
98
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
99
+ )
100
+ self.session.headers.update(self.headers)
101
+ Conversation.intro = (
102
+ AwesomePrompts().get_act(
103
+ act, raise_not_found=True, default=None, case_insensitive=True
104
+ )
105
+ if act
106
+ else intro or Conversation.intro
107
+ )
108
+ self.conversation = Conversation(
109
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
110
+ )
111
+ self.conversation.history_offset = history_offset
112
+ self.session.proxies = proxies
113
+
114
+ def ask(
115
+ self,
116
+ prompt: str,
117
+ stream: bool = False,
118
+ raw: bool = False,
119
+ optimizer: str = None,
120
+ conversationally: bool = False,
121
+ ) -> Dict[str, Any]:
122
+ """
123
+ Sends a prompt to the o3minichat.exa.ai API and returns the response.
124
+
125
+ Args:
126
+ prompt (str): The prompt to send to the API.
127
+ stream (bool): Whether to stream the response.
128
+ raw (bool): Whether to return the raw response.
129
+ optimizer (str): Optimizer to use for the prompt.
130
+ conversationally (bool): Whether to generate the prompt conversationally.
131
+
132
+ Returns:
133
+ Dict[str, Any]: The API response.
134
+
135
+ Examples:
136
+ >>> ai = ExaAI()
137
+ >>> response = ai.ask("Tell me a joke!")
138
+ >>> print(response)
139
+ {'text': 'Why did the scarecrow win an award? Because he was outstanding in his field!'}
140
+ """
141
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
142
+ if optimizer:
143
+ if optimizer in self.__available_optimizers:
144
+ conversation_prompt = getattr(Optimizers, optimizer)(
145
+ conversation_prompt if conversationally else prompt
146
+ )
147
+ else:
148
+ raise Exception(
149
+ f"Optimizer is not one of {self.__available_optimizers}"
150
+ )
151
+
152
+ # Generate a unique ID for the conversation
153
+ conversation_id = uuid4().hex[:16]
154
+
155
+ payload = {
156
+ "id": conversation_id,
157
+ "messages": [
158
+ # {"role": "system", "content": self.system_prompt}, # system role not supported by this provider
159
+ {"role": "user", "content": conversation_prompt}
160
+ ]
161
+ }
162
+
163
+ def for_stream():
164
+ response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout)
165
+ if not response.ok:
166
+ raise exceptions.FailedToGenerateResponseError(
167
+ f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
168
+ )
169
+
170
+ streaming_response = ""
171
+ for line in response.iter_lines(decode_unicode=True):
172
+ if line:
173
+ match = re.search(r'0:"(.*?)"', line)
174
+ if match:
175
+ content = match.group(1)
176
+ streaming_response += content
177
+ yield content if raw else dict(text=content)
178
+
179
+ self.last_response.update(dict(text=streaming_response))
180
+ self.conversation.update_chat_history(
181
+ prompt, self.get_message(self.last_response)
182
+ )
183
+
184
+ def for_non_stream():
185
+ for _ in for_stream():
186
+ pass
187
+ return self.last_response
188
+
189
+ return for_stream() if stream else for_non_stream()
190
+
191
+ def chat(
192
+ self,
193
+ prompt: str,
194
+ stream: bool = False,
195
+ optimizer: str = None,
196
+ conversationally: bool = False,
197
+ ) -> Union[str, Generator[str, None, None]]:
198
+ """
199
+ Generates a response from the ExaAI API.
200
+
201
+ Args:
202
+ prompt (str): The prompt to send to the API.
203
+ stream (bool): Whether to stream the response.
204
+ optimizer (str): Optimizer to use for the prompt.
205
+ conversationally (bool): Whether to generate the prompt conversationally.
206
+
207
+ Returns:
208
+ Union[str, Generator[str, None, None]]: The API response as a string or a generator of string chunks.
209
+
210
+ Examples:
211
+ >>> ai = ExaAI()
212
+ >>> response = ai.chat("What's the weather today?")
213
+ >>> print(response)
214
+ 'The weather today depends on your location...'
215
+ """
216
+
217
+ def for_stream():
218
+ for response in self.ask(
219
+ prompt, True, optimizer=optimizer, conversationally=conversationally
220
+ ):
221
+ yield self.get_message(response)
222
+
223
+ def for_non_stream():
224
+ return self.get_message(
225
+ self.ask(
226
+ prompt,
227
+ False,
228
+ optimizer=optimizer,
229
+ conversationally=conversationally,
230
+ )
231
+ )
232
+
233
+ return for_stream() if stream else for_non_stream()
234
+
235
+ def get_message(self, response: dict) -> str:
236
+ """
237
+ Extracts the message from the API response.
238
+
239
+ Args:
240
+ response (dict): The API response.
241
+
242
+ Returns:
243
+ str: The message content.
244
+
245
+ Examples:
246
+ >>> ai = ExaAI()
247
+ >>> response = ai.ask("Tell me a joke!")
248
+ >>> message = ai.get_message(response)
249
+ >>> print(message)
250
+ 'Why did the scarecrow win an award? Because he was outstanding in his field!'
251
+ """
252
+ assert isinstance(response, dict), "Response should be of dict data-type only"
253
+ formatted_text = response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
254
+ return formatted_text
255
+
256
+ if __name__ == "__main__":
257
+ from rich import print
258
+ ai = ExaAI(timeout=5000)
259
+ response = ai.chat("Tell me about HelpingAI", stream=True)
260
+ for chunk in response:
261
+ print(chunk, end="", flush=True)
@@ -22,6 +22,7 @@ MODEL_CONFIGS = {
22
22
  "gemini-2.0-flash-thinking-exp-01-21",
23
23
  "gemini-2.5-pro-exp-03-25",
24
24
  "gemini-2.0-pro-exp-02-05",
25
+
25
26
  ],
26
27
  },
27
28
  "openrouter": {
@@ -31,6 +32,7 @@ MODEL_CONFIGS = {
31
32
  "deepseek/deepseek-r1:free",
32
33
  "deepseek/deepseek-chat-v3-0324:free",
33
34
  "google/gemma-3-27b-it:free",
35
+ "meta-llama/llama-4-maverick:free",
34
36
  ],
35
37
  },
36
38
  "groq": {
@@ -49,7 +51,8 @@ MODEL_CONFIGS = {
49
51
  "llama3-8b-8192",
50
52
  "qwen-2.5-32b",
51
53
  "qwen-2.5-coder-32b",
52
- "qwen-qwq-32b"
54
+ "qwen-qwq-32b",
55
+ "meta-llama/llama-4-scout-17b-16e-instruct"
53
56
  ],
54
57
  },
55
58
  "cerebras": {
@@ -71,6 +74,7 @@ class ExaChat(Provider):
71
74
 
72
75
  # Gemini Models
73
76
  "gemini-2.0-flash",
77
+ "gemini-2.0-flash-exp-image-generation",
74
78
  "gemini-2.0-flash-thinking-exp-01-21",
75
79
  "gemini-2.5-pro-exp-03-25",
76
80
  "gemini-2.0-pro-exp-02-05",
@@ -80,6 +84,7 @@ class ExaChat(Provider):
80
84
  "deepseek/deepseek-r1:free",
81
85
  "deepseek/deepseek-chat-v3-0324:free",
82
86
  "google/gemma-3-27b-it:free",
87
+ "meta-llama/llama-4-maverick:free",
83
88
 
84
89
  # Groq Models
85
90
  "deepseek-r1-distill-llama-70b",
@@ -96,6 +101,8 @@ class ExaChat(Provider):
96
101
  "qwen-2.5-32b",
97
102
  "qwen-2.5-coder-32b",
98
103
  "qwen-qwq-32b",
104
+ "meta-llama/llama-4-scout-17b-16e-instruct",
105
+
99
106
 
100
107
  # Cerebras Models
101
108
  "llama3.1-8b",
@@ -24,7 +24,8 @@ class GithubChat(Provider):
24
24
  "claude-3.5-sonnet",
25
25
  "claude-3.7-sonnet",
26
26
  "claude-3.7-sonnet-thought",
27
- "gemini-2.0-flash-001"
27
+ "gemini-2.0-flash-001",
28
+ "gemini-2.5-pro"
28
29
 
29
30
  ]
30
31
 
@@ -13,7 +13,7 @@ class JadveOpenAI(Provider):
13
13
  A class to interact with the OpenAI API through jadve.com using the streaming endpoint.
14
14
  """
15
15
 
16
- AVAILABLE_MODELS = ["gpt-4o", "gpt-4o-mini", "claude-3-7-sonnet-20250219", "claude-3-5-sonnet-20240620", "o1-mini", "deepseek-chat", "o1-mini", "claude-3-5-haiku-20241022"]
16
+ AVAILABLE_MODELS = ["gpt-4o-mini"]
17
17
 
18
18
  def __init__(
19
19
  self,
@@ -26,7 +26,7 @@ class JadveOpenAI(Provider):
26
26
  proxies: dict = {},
27
27
  history_offset: int = 10250,
28
28
  act: str = None,
29
- model: str = "claude-3-7-sonnet-20250219",
29
+ model: str = "gpt-4o-mini",
30
30
  system_prompt: str = "You are a helpful AI assistant."
31
31
  ):
32
32
  """
@@ -25,8 +25,9 @@ class Netwrck(Provider):
25
25
  "gryphe/mythomax-l2-13b",
26
26
  "google/gemini-pro-1.5",
27
27
  "nvidia/llama-3.1-nemotron-70b-instruct",
28
- "deepseek-r1",
29
- "deepseek",
28
+ "deepseek/deepseek-r1",
29
+ "deepseek/deepseek-chat"
30
+
30
31
  ]
31
32
 
32
33
  def __init__(
@@ -0,0 +1,17 @@
1
+ # This file marks the directory as a Python package.
2
+ from .deepinfra import *
3
+ from .glider import *
4
+ from .chatgptclone import *
5
+ from .x0gpt import *
6
+ from .wisecat import *
7
+ from .venice import *
8
+ from .exaai import *
9
+ from .typegpt import *
10
+ from .scirachat import *
11
+ from .freeaichat import *
12
+ from .llmchatco import *
13
+ from .yep import * # Add YEPCHAT
14
+ from .heckai import *
15
+ from .sonus import *
16
+ from .exachat import *
17
+ from .netwrck import *
@@ -0,0 +1,46 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Optional, Union, Generator, Any
3
+
4
+ # Re-define or import necessary response structure classes (like ChatCompletion, ChatCompletionChunk)
5
+ # For simplicity, we'll assume they are defined elsewhere or passed directly.
6
+ # You might want to define base versions of these classes here as well.
7
+
8
+ class BaseChatCompletionChunk: # Placeholder
9
+ pass
10
+ class BaseChatCompletion: # Placeholder
11
+ pass
12
+
13
+
14
+ class BaseCompletions(ABC):
15
+ @abstractmethod
16
+ def create(
17
+ self,
18
+ *,
19
+ model: str,
20
+ messages: List[Dict[str, str]],
21
+ max_tokens: Optional[int] = None,
22
+ stream: bool = False,
23
+ temperature: Optional[float] = None,
24
+ top_p: Optional[float] = None,
25
+ **kwargs: Any
26
+ ) -> Union[BaseChatCompletion, Generator[BaseChatCompletionChunk, None, None]]:
27
+ """Abstract method to create chat completions."""
28
+ raise NotImplementedError
29
+
30
+
31
+ class BaseChat(ABC):
32
+ completions: BaseCompletions
33
+
34
+
35
+ class OpenAICompatibleProvider(ABC):
36
+ """
37
+ Abstract Base Class for providers mimicking the OpenAI Python client structure.
38
+ Requires a nested 'chat.completions' structure.
39
+ """
40
+ chat: BaseChat
41
+
42
+ @abstractmethod
43
+ def __init__(self, api_key: Optional[str] = None, **kwargs: Any):
44
+ """Initialize the provider, potentially with an API key."""
45
+ raise NotImplementedError
46
+