webscout 7.8__py3-none-any.whl → 8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (66) hide show
  1. webscout/Bard.py +5 -25
  2. webscout/DWEBS.py +476 -476
  3. webscout/Extra/GitToolkit/__init__.py +10 -0
  4. webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
  5. webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
  6. webscout/Extra/GitToolkit/gitapi/user.py +96 -0
  7. webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
  8. webscout/Extra/YTToolkit/ytapi/video.py +232 -103
  9. webscout/Extra/__init__.py +2 -0
  10. webscout/Extra/autocoder/__init__.py +1 -1
  11. webscout/Extra/autocoder/{rawdog.py → autocoder.py} +849 -849
  12. webscout/Extra/tempmail/__init__.py +26 -0
  13. webscout/Extra/tempmail/async_utils.py +141 -0
  14. webscout/Extra/tempmail/base.py +156 -0
  15. webscout/Extra/tempmail/cli.py +187 -0
  16. webscout/Extra/tempmail/mail_tm.py +361 -0
  17. webscout/Extra/tempmail/temp_mail_io.py +292 -0
  18. webscout/Provider/AISEARCH/__init__.py +5 -1
  19. webscout/Provider/AISEARCH/hika_search.py +194 -0
  20. webscout/Provider/AISEARCH/monica_search.py +246 -0
  21. webscout/Provider/AISEARCH/scira_search.py +320 -0
  22. webscout/Provider/AISEARCH/webpilotai_search.py +281 -0
  23. webscout/Provider/AllenAI.py +255 -122
  24. webscout/Provider/DeepSeek.py +1 -2
  25. webscout/Provider/Deepinfra.py +296 -286
  26. webscout/Provider/ElectronHub.py +709 -716
  27. webscout/Provider/ExaAI.py +261 -0
  28. webscout/Provider/ExaChat.py +28 -6
  29. webscout/Provider/Gemini.py +167 -165
  30. webscout/Provider/GithubChat.py +2 -1
  31. webscout/Provider/Groq.py +38 -24
  32. webscout/Provider/LambdaChat.py +2 -1
  33. webscout/Provider/Netwrck.py +3 -2
  34. webscout/Provider/OpenGPT.py +199 -0
  35. webscout/Provider/PI.py +39 -24
  36. webscout/Provider/TextPollinationsAI.py +232 -230
  37. webscout/Provider/Youchat.py +326 -296
  38. webscout/Provider/__init__.py +10 -4
  39. webscout/Provider/ai4chat.py +58 -56
  40. webscout/Provider/akashgpt.py +34 -22
  41. webscout/Provider/copilot.py +427 -427
  42. webscout/Provider/freeaichat.py +9 -2
  43. webscout/Provider/labyrinth.py +121 -20
  44. webscout/Provider/llmchatco.py +306 -0
  45. webscout/Provider/scira_chat.py +271 -0
  46. webscout/Provider/typefully.py +280 -0
  47. webscout/Provider/uncovr.py +312 -299
  48. webscout/Provider/yep.py +64 -12
  49. webscout/__init__.py +38 -36
  50. webscout/cli.py +293 -293
  51. webscout/conversation.py +350 -17
  52. webscout/litprinter/__init__.py +59 -667
  53. webscout/optimizers.py +419 -419
  54. webscout/update_checker.py +14 -12
  55. webscout/version.py +1 -1
  56. webscout/webscout_search.py +1346 -1282
  57. webscout/webscout_search_async.py +877 -813
  58. {webscout-7.8.dist-info → webscout-8.0.dist-info}/METADATA +44 -39
  59. {webscout-7.8.dist-info → webscout-8.0.dist-info}/RECORD +63 -46
  60. webscout/Provider/DARKAI.py +0 -225
  61. webscout/Provider/EDITEE.py +0 -192
  62. webscout/litprinter/colors.py +0 -54
  63. {webscout-7.8.dist-info → webscout-8.0.dist-info}/LICENSE.md +0 -0
  64. {webscout-7.8.dist-info → webscout-8.0.dist-info}/WHEEL +0 -0
  65. {webscout-7.8.dist-info → webscout-8.0.dist-info}/entry_points.txt +0 -0
  66. {webscout-7.8.dist-info → webscout-8.0.dist-info}/top_level.txt +0 -0
@@ -15,12 +15,12 @@ from .Phind import Phindv2
15
15
  from .ai4chat import *
16
16
  from .Gemini import GEMINI
17
17
  from .Deepinfra import DeepInfra
18
+ from .typefully import *
18
19
  from .cleeai import *
19
20
  from .OLLAMA import OLLAMA
20
21
  from .Andi import AndiSearch
21
22
  from .PizzaGPT import *
22
23
  from .Llama3 import *
23
- from .DARKAI import *
24
24
  from .koala import *
25
25
  from .meta import *
26
26
  from .julius import *
@@ -29,7 +29,6 @@ from .yep import *
29
29
  from .Cloudflare import *
30
30
  from .turboseek import *
31
31
  from .Free2GPT import *
32
- from .EDITEE import *
33
32
  from .TeachAnything import *
34
33
  from .AI21 import *
35
34
  from .Chatify import *
@@ -49,6 +48,7 @@ from .aimathgpt import *
49
48
  from .gaurish import *
50
49
  from .geminiprorealtime import *
51
50
  from .llmchat import *
51
+ from .llmchatco import LLMChatCo # Add new LLMChat.co provider
52
52
  from .talkai import *
53
53
  from .askmyai import *
54
54
  from .llama3mitril import *
@@ -89,12 +89,18 @@ from .ExaChat import *
89
89
  from .asksteve import *
90
90
  from .Aitopia import *
91
91
  from .searchchat import *
92
+ from .ExaAI import ExaAI
93
+ from .OpenGPT import OpenGPT
94
+ from .scira_chat import *
92
95
  __all__ = [
93
96
  'LLAMA',
97
+ 'SciraAI',
94
98
  'LabyrinthAI',
99
+ 'OpenGPT',
95
100
  'Flowith',
96
101
  'C4ai',
97
102
  'Venice',
103
+ 'ExaAI',
98
104
  'Copilot',
99
105
  'HuggingFaceChat',
100
106
  'TwoAI',
@@ -128,7 +134,6 @@ __all__ = [
128
134
  'AndiSearch',
129
135
  'PIZZAGPT',
130
136
  'Sambanova',
131
- 'DARKAI',
132
137
  'KOALA',
133
138
  'Meta',
134
139
  'AskMyAI',
@@ -138,7 +143,6 @@ __all__ = [
138
143
  'YEPCHAT',
139
144
  'Cloudflare',
140
145
  'TurboSeek',
141
- 'Editee',
142
146
  'TeachAnything',
143
147
  'AI21',
144
148
  'Chatify',
@@ -150,6 +154,7 @@ __all__ = [
150
154
  'Cleeai',
151
155
  'Elmo',
152
156
  'ChatGPTClone',
157
+ 'TypefullyAI',
153
158
  'Free2GPT',
154
159
  'GPTWeb',
155
160
  'Netwrck',
@@ -162,6 +167,7 @@ __all__ = [
162
167
  'GaurishCerebras',
163
168
  'GeminiPro',
164
169
  'LLMChat',
170
+ 'LLMChatCo',
165
171
  'Talkai',
166
172
  'Llama3Mitril',
167
173
  'Marcus',
@@ -1,7 +1,5 @@
1
1
  import requests
2
- import json
3
- import html
4
- import re
2
+ import urllib.parse
5
3
  from typing import Union, Any, Dict
6
4
 
7
5
  from webscout.AIutel import Optimizers
@@ -11,7 +9,7 @@ from webscout.AIbase import Provider
11
9
 
12
10
  class AI4Chat(Provider):
13
11
  """
14
- A class to interact with the AI4Chat API.
12
+ A class to interact with the AI4Chat Riddle API.
15
13
  """
16
14
 
17
15
  def __init__(
@@ -26,6 +24,8 @@ class AI4Chat(Provider):
26
24
  history_offset: int = 10250,
27
25
  act: str = None,
28
26
  system_prompt: str = "You are a helpful and informative AI assistant.",
27
+ country: str = "Asia",
28
+ user_id: str = "usersmjb2oaz7y"
29
29
  ) -> None:
30
30
  """
31
31
  Initializes the AI4Chat API with given parameters.
@@ -41,34 +41,30 @@ class AI4Chat(Provider):
41
41
  history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
42
42
  act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
43
43
  system_prompt (str, optional): System prompt to guide the AI's behavior. Defaults to "You are a helpful and informative AI assistant.".
44
+ country (str, optional): Country parameter for API. Defaults to "Asia".
45
+ user_id (str, optional): User ID for API. Defaults to "usersmjb2oaz7y".
44
46
  """
45
47
  self.session = requests.Session()
46
48
  self.is_conversation = is_conversation
47
49
  self.max_tokens_to_sample = max_tokens
48
- self.api_endpoint = "https://www.ai4chat.co/generate-response"
50
+ self.api_endpoint = "https://yw85opafq6.execute-api.us-east-1.amazonaws.com/default/boss_mode_15aug"
49
51
  self.timeout = timeout
50
52
  self.last_response = {}
53
+ self.country = country
54
+ self.user_id = user_id
51
55
  self.headers = {
52
- "authority": "www.ai4chat.co",
53
- "method": "POST",
54
- "path": "/generate-response",
55
- "scheme": "https",
56
- "accept": "*/*",
57
- "accept-encoding": "gzip, deflate, br, zstd",
58
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
59
- "content-type": "application/json",
60
- "cookie": "messageCount=1",
61
- "dnt": "1",
62
- "origin": "https://www.ai4chat.co",
63
- "priority": "u=1, i",
64
- "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
65
- "sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
66
- "sec-ch-ua-mobile": "?0",
67
- "sec-ch-ua-platform": '"Windows"',
68
- "sec-fetch-dest": "empty",
69
- "sec-fetch-mode": "cors",
70
- "sec-fetch-site": "same-origin",
71
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0"
56
+ "Accept": "*/*",
57
+ "Accept-Language": "id-ID,id;q=0.9",
58
+ "Origin": "https://www.ai4chat.co",
59
+ "Priority": "u=1, i",
60
+ "Referer": "https://www.ai4chat.co/",
61
+ "Sec-CH-UA": '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
62
+ "Sec-CH-UA-Mobile": "?1",
63
+ "Sec-CH-UA-Platform": '"Android"',
64
+ "Sec-Fetch-Dest": "empty",
65
+ "Sec-Fetch-Mode": "cors",
66
+ "Sec-Fetch-Site": "cross-site",
67
+ "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36"
72
68
  }
73
69
 
74
70
  self.__available_optimizers = (
@@ -98,16 +94,20 @@ class AI4Chat(Provider):
98
94
  raw: bool = False,
99
95
  optimizer: str = None,
100
96
  conversationally: bool = False,
97
+ country: str = None,
98
+ user_id: str = None,
101
99
  ) -> Dict[str, Any]:
102
100
  """
103
101
  Sends a prompt to the AI4Chat API and returns the response.
104
102
 
105
103
  Args:
106
104
  prompt: The text prompt to generate text from.
107
- stream (bool, optional): Not used (AI4Chat doesn't support streaming).
105
+ stream (bool, optional): Not supported. Defaults to False.
108
106
  raw (bool, optional): Whether to return the raw response. Defaults to False.
109
107
  optimizer (str, optional): The name of the optimizer to use. Defaults to None.
110
108
  conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
109
+ country (str, optional): Country parameter for API. Defaults to None.
110
+ user_id (str, optional): User ID for API. Defaults to None.
111
111
 
112
112
  Returns:
113
113
  dict: A dictionary containing the AI's response.
@@ -123,51 +123,52 @@ class AI4Chat(Provider):
123
123
  f"Optimizer is not one of {self.__available_optimizers}"
124
124
  )
125
125
 
126
- payload = {
127
- "messages": [
128
- {"role": "system", "content": self.system_prompt},
129
- {"role": "user", "content": conversation_prompt}
130
- ]
131
- }
132
-
133
- response = self.session.post(self.api_endpoint, headers=self.headers, json=payload, timeout=self.timeout)
126
+ # Use provided values or defaults
127
+ country_param = country or self.country
128
+ user_id_param = user_id or self.user_id
129
+
130
+ # Build the URL with parameters
131
+ encoded_text = urllib.parse.quote(conversation_prompt)
132
+ encoded_country = urllib.parse.quote(country_param)
133
+ encoded_user_id = urllib.parse.quote(user_id_param)
134
+
135
+ url = f"{self.api_endpoint}?text={encoded_text}&country={encoded_country}&user_id={encoded_user_id}"
136
+
137
+ response = self.session.get(url, headers=self.headers, timeout=self.timeout)
134
138
  if not response.ok:
135
139
  raise Exception(f"Failed to generate response: {response.status_code} - {response.reason}")
136
140
 
137
- response_data = response.json()
138
- message_content = response_data.get('message', 'No message found')
139
-
140
- # Decode HTML entities
141
- decoded_message = html.unescape(message_content)
142
-
143
- # Remove HTML tags while preserving newlines and list structure
144
- cleaned_text = re.sub(r'<p>(.*?)</p>', r'\1\n\n', decoded_message)
145
- cleaned_text = re.sub(r'<ol>|</ol>', '', cleaned_text)
146
- cleaned_text = re.sub(r'<li><p>(.*?)</p></li>', r'• \1\n', cleaned_text)
147
- cleaned_text = re.sub(r'</?[^>]+>', '', cleaned_text)
141
+ response_text = response.text
148
142
 
149
- # Remove extra newlines
150
- cleaned_text = re.sub(r'\n{3,}', '\n\n', cleaned_text.strip())
151
-
152
- self.last_response.update(dict(text=cleaned_text))
153
- self.conversation.update_chat_history(prompt, cleaned_text)
143
+ # Remove quotes from the start and end of the response
144
+ if response_text.startswith('"'):
145
+ response_text = response_text[1:]
146
+ if response_text.endswith('"'):
147
+ response_text = response_text[:-1]
148
+
149
+ self.last_response.update(dict(text=response_text))
150
+ self.conversation.update_chat_history(prompt, response_text)
154
151
  return self.last_response
155
152
 
156
153
  def chat(
157
154
  self,
158
155
  prompt: str,
159
- stream: bool = False, # Streaming is not supported by AI4Chat
156
+ stream: bool = False,
160
157
  optimizer: str = None,
161
158
  conversationally: bool = False,
159
+ country: str = None,
160
+ user_id: str = None,
162
161
  ) -> str:
163
162
  """
164
163
  Generates a response from the AI4Chat API.
165
164
 
166
165
  Args:
167
166
  prompt (str): The prompt to send to the AI.
168
- stream (bool, optional): Not used (AI4Chat doesn't support streaming).
167
+ stream (bool, optional): Not supported.
169
168
  optimizer (str, optional): The name of the optimizer to use. Defaults to None.
170
169
  conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
170
+ country (str, optional): Country parameter for API. Defaults to None.
171
+ user_id (str, optional): User ID for API. Defaults to None.
171
172
 
172
173
  Returns:
173
174
  str: The response generated by the AI.
@@ -177,6 +178,8 @@ class AI4Chat(Provider):
177
178
  prompt,
178
179
  optimizer=optimizer,
179
180
  conversationally=conversationally,
181
+ country=country,
182
+ user_id=user_id,
180
183
  )
181
184
  )
182
185
 
@@ -190,11 +193,10 @@ class AI4Chat(Provider):
190
193
  str: Message extracted
191
194
  """
192
195
  assert isinstance(response, dict), "Response should be of dict data-type only"
193
- return response["text"]
196
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
194
197
 
195
198
  if __name__ == "__main__":
196
199
  from rich import print
197
200
  ai = AI4Chat()
198
- response = ai.chat("write me poem about AI", stream=True)
199
- for chunk in response:
200
- print(chunk, end="", flush=True)
201
+ response = ai.chat("Tell me something interesting")
202
+ print(response)
@@ -1,6 +1,6 @@
1
1
  from typing import Union, Any, Dict, Generator
2
2
  from uuid import uuid4
3
- import requests
3
+ import cloudscraper
4
4
  import re
5
5
  import json
6
6
  import time
@@ -29,16 +29,18 @@ class AkashGPT(Provider):
29
29
  """
30
30
 
31
31
  AVAILABLE_MODELS = [
32
- "Meta-Llama-3-3-70B-Instruct",
33
- "DeepSeek-R1",
34
- "Meta-Llama-3-1-405B-Instruct-FP8",
35
- # "Meta-Llama-3-2-3B-Instruct",
36
- # "Meta-Llama-3-1-8B-Instruct-FP8",
32
+ "meta-llama-3-3-70b-instruct",
33
+ "deepseek-r1",
34
+ "meta-llama-3-1-405b-instruct-fp8",
35
+ "meta-llama-llama-4-maverick-17b-128e-instruct-fp8",
36
+ "nvidia-llama-3-3-nemotron-super-49b-v1",
37
+
38
+ # "meta-llama-3-2-3b-instruct",
39
+ # "meta-llama-3-1-8b-instruct-fp8",
37
40
  # "mistral",
38
- # "nous-hermes2-mixtral",
41
+ # "nous-hermes2-mixtral",
39
42
  # "dolphin-mixtral",
40
- "Qwen-QwQ-32B"
41
-
43
+ "qwen-qwq-32b"
42
44
  ]
43
45
 
44
46
  def __init__(
@@ -53,7 +55,7 @@ class AkashGPT(Provider):
53
55
  history_offset: int = 10250,
54
56
  act: str = None,
55
57
  system_prompt: str = "You are a helpful assistant.",
56
- model: str = "Meta-Llama-3-3-70B-Instruct",
58
+ model: str = "meta-llama-3-3-70b-instruct",
57
59
  temperature: float = 0.6,
58
60
  top_p: float = 0.9,
59
61
  session_token: str = None
@@ -81,7 +83,7 @@ class AkashGPT(Provider):
81
83
  if model not in self.AVAILABLE_MODELS:
82
84
  raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
83
85
 
84
- self.session = requests.Session()
86
+ self.session = cloudscraper.create_scraper()
85
87
  self.is_conversation = is_conversation
86
88
  self.max_tokens_to_sample = max_tokens
87
89
  self.api_endpoint = "https://chat.akash.network/api/chat"
@@ -107,17 +109,20 @@ class AkashGPT(Provider):
107
109
  "scheme": "https",
108
110
  "accept": "*/*",
109
111
  "accept-encoding": "gzip, deflate, br, zstd",
110
- "accept-language": "en-US,en;q=0.9",
112
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
111
113
  "content-type": "application/json",
112
114
  "dnt": "1",
113
115
  "origin": "https://chat.akash.network",
114
116
  "priority": "u=1, i",
115
117
  "referer": "https://chat.akash.network/",
116
- "sec-ch-ua": '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
118
+ "sec-ch-ua": '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
117
119
  "sec-ch-ua-mobile": "?0",
118
120
  "sec-ch-ua-platform": '"Windows"',
121
+ "sec-fetch-dest": "empty",
122
+ "sec-fetch-mode": "cors",
123
+ "sec-fetch-site": "same-origin",
124
+ "sec-gpc": "1",
119
125
  "user-agent": self.agent.random()
120
-
121
126
  }
122
127
 
123
128
  # Set cookies with the session token
@@ -181,15 +186,15 @@ class AkashGPT(Provider):
181
186
  )
182
187
 
183
188
  payload = {
184
- "id": str(uuid4()), # Generate a unique request ID
189
+ "id": str(uuid4()).replace("-", ""), # Generate a unique request ID in the correct format
185
190
  "messages": [
186
- {"role": "system", "content": self.system_prompt},
187
191
  {"role": "user", "content": conversation_prompt}
188
192
  ],
189
193
  "model": self.model,
190
- "temperature": self.temperature,
191
194
  "system": self.system_prompt,
192
- "topP": self.top_p
195
+ "temperature": self.temperature,
196
+ "topP": self.top_p,
197
+ "context": []
193
198
  }
194
199
 
195
200
  def for_stream():
@@ -218,17 +223,24 @@ class AkashGPT(Provider):
218
223
  # Parse content chunks
219
224
  if line.startswith('0:'):
220
225
  try:
221
- # Extract content between quotes
222
- content = line[2:].strip('"')
226
+ content = line[2:]
227
+ # Remove surrounding quotes if they exist
223
228
  if content.startswith('"') and content.endswith('"'):
224
229
  content = content[1:-1]
225
230
  streaming_response += content
226
231
  yield content if raw else dict(text=content)
227
- except Exception:
232
+ except Exception as e:
228
233
  continue
229
234
 
230
- # End of stream
235
+ # End of stream markers
231
236
  if line.startswith('e:') or line.startswith('d:'):
237
+ try:
238
+ finish_data = json.loads(line[2:])
239
+ finish_reason = finish_data.get("finishReason", "stop")
240
+ # Could store usage data if needed:
241
+ # usage = finish_data.get("usage", {})
242
+ except json.JSONDecodeError:
243
+ pass
232
244
  break
233
245
 
234
246
  self.last_response.update(dict(text=streaming_response, message_id=message_id))