webscout 6.0__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +406 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/Llama3.py +1 -1
  18. webscout/Provider/NinjaChat.py +200 -0
  19. webscout/Provider/OLLAMA.py +1 -1
  20. webscout/Provider/Perplexity.py +1 -1
  21. webscout/Provider/Reka.py +12 -5
  22. webscout/Provider/TTI/AIuncensored.py +103 -0
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +4 -2
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/TTI/talkai.py +116 -0
  28. webscout/Provider/TeachAnything.py +0 -3
  29. webscout/Provider/Youchat.py +1 -1
  30. webscout/Provider/__init__.py +16 -12
  31. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  32. webscout/Provider/cerebras.py +143 -123
  33. webscout/Provider/cleeai.py +1 -1
  34. webscout/Provider/felo_search.py +1 -1
  35. webscout/Provider/gaurish.py +207 -0
  36. webscout/Provider/geminiprorealtime.py +160 -0
  37. webscout/Provider/genspark.py +1 -1
  38. webscout/Provider/julius.py +8 -3
  39. webscout/Provider/learnfastai.py +1 -1
  40. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  41. webscout/Provider/promptrefine.py +3 -1
  42. webscout/Provider/talkai.py +196 -0
  43. webscout/Provider/turboseek.py +3 -8
  44. webscout/Provider/tutorai.py +1 -1
  45. webscout/__init__.py +2 -43
  46. webscout/exceptions.py +5 -1
  47. webscout/tempid.py +4 -73
  48. webscout/utils.py +3 -0
  49. webscout/version.py +1 -1
  50. webscout/webai.py +1 -1
  51. webscout/webscout_search.py +154 -123
  52. {webscout-6.0.dist-info → webscout-6.2.dist-info}/METADATA +164 -245
  53. {webscout-6.0.dist-info → webscout-6.2.dist-info}/RECORD +57 -55
  54. webscout/Local/rawdog.py +0 -946
  55. webscout/Provider/BasedGPT.py +0 -214
  56. webscout/Provider/TTI/amigo.py +0 -148
  57. webscout/Provider/bixin.py +0 -264
  58. webscout/Provider/xdash.py +0 -182
  59. webscout/websx_search.py +0 -19
  60. {webscout-6.0.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  61. {webscout-6.0.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  62. {webscout-6.0.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  63. {webscout-6.0.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,116 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ import os
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from webscout.AIbase import ImageProvider
8
+ from webscout import exceptions
9
+
10
+
11
+ class TalkaiImager(ImageProvider):
12
+ """
13
+ Image provider for Talkai.info.
14
+ """
15
+
16
+ def __init__(self, timeout: int = 60, proxies: dict = {}):
17
+ """Initializes the TalkaiImager class.
18
+
19
+ Args:
20
+ timeout (int, optional): HTTP request timeout in seconds. Defaults to 60.
21
+ proxies (dict, optional): HTTP request proxies. Defaults to {}.
22
+ """
23
+ self.api_endpoint = "https://talkai.info/chat/send/"
24
+ self.headers = {
25
+ 'accept': 'application/json',
26
+ 'accept-language': 'en-US,en;q=0.9',
27
+ 'content-type': 'application/json',
28
+ 'origin': 'https://talkai.info',
29
+ 'referer': 'https://talkai.info/image/',
30
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
31
+ }
32
+ self.session = requests.Session()
33
+ self.session.headers.update(self.headers)
34
+ self.session.proxies.update(proxies)
35
+ self.timeout = timeout
36
+ self.prompt: str = "AI-generated image - webscout"
37
+ self.image_extension: str = "png"
38
+
39
+ def generate(self, prompt: str, amount: int = 1) -> List[str]:
40
+ """Generates image URLs from a prompt."""
41
+ assert bool(prompt), "Prompt cannot be empty."
42
+ assert isinstance(amount, int) and amount > 0, "Amount must be a positive integer."
43
+
44
+ self.prompt = prompt
45
+ image_urls = []
46
+
47
+ try:
48
+ with self.session.post(self.api_endpoint, json=self._create_payload(prompt), timeout=self.timeout) as response:
49
+ response.raise_for_status()
50
+ data = response.json()
51
+
52
+ if 'data' in data and len(data['data']) > 0 and 'url' in data['data'][0]:
53
+ image_urls.append(data['data'][0]['url'])
54
+ else:
55
+ raise exceptions.InvalidResponseError("No image URL found in API response.")
56
+
57
+ except requests.exceptions.RequestException as e:
58
+ raise exceptions.APIConnectionError(f"Error making API request: {e}") from e
59
+ except json.JSONDecodeError as e:
60
+ raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
61
+ except Exception as e:
62
+ raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred: {e}") from e
63
+
64
+ return image_urls
65
+
66
+ def _create_payload(self, prompt: str) -> Dict[str, Any]:
67
+ return {
68
+ "type": "image",
69
+ "messagesHistory": [
70
+ {
71
+ "id": str(uuid.uuid4()),
72
+ "from": "you",
73
+ "content": prompt
74
+ }
75
+ ],
76
+ "settings": {
77
+ "model": "gpt-4o-mini" # Or another suitable model if available
78
+ }
79
+ }
80
+
81
+
82
+ def save(
83
+ self,
84
+ response: List[str],
85
+ name: str = None,
86
+ dir: str = os.getcwd(),
87
+ filenames_prefix: str = "",
88
+ ) -> List[str]:
89
+ assert isinstance(response, list), f"Response should be a list, not {type(response)}"
90
+ name = self.prompt if name is None else name
91
+
92
+ filenames = []
93
+ for i, url in enumerate(response):
94
+ try:
95
+ with self.session.get(url, stream=True, timeout=self.timeout) as r:
96
+ r.raise_for_status()
97
+ filename = f"{filenames_prefix}{name}_{i}.{self.image_extension}"
98
+ filepath = os.path.join(dir, filename)
99
+ with open(filepath, 'wb') as f:
100
+ for chunk in r.iter_content(chunk_size=8192):
101
+ f.write(chunk)
102
+ filenames.append(filename)
103
+ except requests.exceptions.RequestException as e:
104
+ print(f"Error downloading image from {url}: {e}")
105
+ filenames.append(None) # Indicate failure to download
106
+
107
+ return filenames
108
+
109
+
110
+ if __name__ == "__main__":
111
+ bot = TalkaiImager()
112
+ try:
113
+ resp = bot.generate("A shiny red sports car speeding down a scenic mountain road", 1)
114
+ print(bot.save(resp))
115
+ except Exception as e:
116
+ print(f"An error occurred: {e}")
@@ -1,9 +1,6 @@
1
1
  import requests
2
2
  from requests.exceptions import RequestException
3
3
  from typing import Any, Dict
4
- import logging
5
- import random
6
-
7
4
  from webscout.AIutel import Conversation, Optimizers
8
5
 
9
6
  class TeachAnything:
@@ -224,6 +224,6 @@ class YouChat(Provider):
224
224
  if __name__ == '__main__':
225
225
  from rich import print
226
226
  ai = YouChat(timeout=5000)
227
- response = ai.chat("write a poem about AI", stream=True)
227
+ response = ai.chat("Who is Abhay Koul in AI?", stream=True)
228
228
  for chunk in response:
229
229
  print(chunk, end="", flush=True)
@@ -16,7 +16,6 @@ from .Phind import PhindSearch
16
16
  from .Phind import Phindv2
17
17
  from .ai4chat import *
18
18
  from .Gemini import GEMINI
19
- from .BasedGPT import BasedGPT
20
19
  from .Deepseek import DeepSeek
21
20
  from .Deepinfra import DeepInfra
22
21
  from .Farfalle import *
@@ -31,7 +30,6 @@ from .RUBIKSAI import *
31
30
  from .meta import *
32
31
  from .DiscordRocks import *
33
32
  from .felo_search import *
34
- from .xdash import *
35
33
  from .julius import *
36
34
  from .Youchat import *
37
35
  from .yep import *
@@ -51,17 +49,22 @@ from .genspark import *
51
49
  from .upstage import *
52
50
  from .Bing import *
53
51
  from .GPTWeb import *
54
- from .aigames import *
52
+ # from .UNFINISHED.aigames import *
55
53
  from .llamatutor import *
56
54
  from .promptrefine import *
57
55
  from .twitterclone import *
58
56
  from .tutorai import *
59
- from .bixin import *
60
57
  from .ChatGPTES import *
61
58
  from .Amigo import *
62
59
  from .prefind import *
63
60
  from .bagoodex import *
64
- from .ChatHub import *
61
+ # from .UNFINISHED.ChatHub import *
62
+ from .aimathgpt import *
63
+ from .gaurish import *
64
+ from .geminiprorealtime import *
65
+ from .NinjaChat import *
66
+ from .llmchat import *
67
+ from .talkai import *
65
68
  __all__ = [
66
69
  'Farfalle',
67
70
  'LLAMA',
@@ -79,7 +82,6 @@ __all__ = [
79
82
  'PhindSearch',
80
83
  'Felo',
81
84
  'GEMINI',
82
- 'BasedGPT',
83
85
  'DeepSeek',
84
86
  'DeepInfra',
85
87
  'AI4Chat',
@@ -94,7 +96,6 @@ __all__ = [
94
96
  'Meta',
95
97
  'DiscordRocks',
96
98
  'PiAI',
97
- 'XDASH',
98
99
  'Julius',
99
100
  'YouChat',
100
101
  'YEPCHAT',
@@ -115,18 +116,21 @@ __all__ = [
115
116
  'Free2GPT',
116
117
  'Bing',
117
118
  'GPTWeb',
118
- 'AIGameIO',
119
+ # 'AIGameIO',
119
120
  'LlamaTutor',
120
121
  'PromptRefine',
121
122
  'AIUncensored',
122
123
  'TutorAI',
123
- 'Bixin',
124
124
  'ChatGPTES',
125
125
  'AmigoChat',
126
126
  'PrefindAI',
127
127
  'Bagoodex',
128
- 'ChatHub',
129
- # 'LearnFast',
130
-
128
+ # 'ChatHub',
129
+ 'AIMathGPT',
130
+ 'GaurishCerebras',
131
+ 'GeminiPro',
132
+ 'NinjaChat',
133
+ 'LLMChat',
134
+ 'Talkai'
131
135
 
132
136
  ]
@@ -9,29 +9,16 @@ from webscout.AIutel import AwesomePrompts, sanitize_stream
9
9
  from webscout.AIbase import Provider, AsyncProvider
10
10
  from webscout import exceptions
11
11
 
12
- class ChatHub(Provider):
12
+
13
+ class AIMathGPT(Provider):
13
14
  """
14
- A class to interact with the ChatHub API.
15
+ A class to interact with the AIMathGPT API.
15
16
  """
16
17
 
17
- AVAILABLE_MODELS = [
18
- 'meta/llama3.1-8b',
19
- 'mistral/mixtral-8x7b',
20
- 'google/gemma-2',
21
- 'perplexity/sonar-online',
22
- ]
23
- model_aliases = { # Aliases for shorter model names
24
- "llama3.1-8b": 'meta/llama3.1-8b',
25
- "mixtral-8x7b": 'mistral/mixtral-8x7b',
26
- "gemma-2": 'google/gemma-2',
27
- "sonar-online": 'perplexity/sonar-online',
28
- }
29
-
30
-
31
18
  def __init__(
32
19
  self,
33
20
  is_conversation: bool = True,
34
- max_tokens: int = 2049,
21
+ max_tokens: int = 2049,
35
22
  timeout: int = 30,
36
23
  intro: str = None,
37
24
  filepath: str = None,
@@ -39,25 +26,51 @@ class ChatHub(Provider):
39
26
  proxies: dict = {},
40
27
  history_offset: int = 10250,
41
28
  act: str = None,
42
- model: str = "sonar-online",
29
+ model: str = "llama3", # Default model
30
+ system_prompt: str = "You are a helpful AI assistant.",
43
31
  ):
44
- """Initializes the ChatHub API client."""
45
- self.url = "https://app.chathub.gg"
46
- self.api_endpoint = "https://app.chathub.gg/api/v3/chat/completions"
32
+ """
33
+ Initializes the AIMathGPT API with the given parameters.
34
+ """
35
+ self.url = "https://aimathgpt.forit.ai/api/ai"
47
36
  self.headers = {
48
- 'Accept': '*/*',
49
- 'Accept-Language': 'en-US,en;q=0.9',
50
- 'Content-Type': 'application/json',
51
- 'Origin': self.url,
52
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
53
- 'X-App-Id': 'web'
37
+ "authority": "aimathgpt.forit.ai",
38
+ "method": "POST",
39
+ "path": "/api/ai",
40
+ "scheme": "https",
41
+ "accept": "*/*",
42
+ "accept-encoding": "gzip, deflate, br, zstd",
43
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
44
+ "content-type": "application/json",
45
+ "cookie": (
46
+ "NEXT_LOCALE=en; _ga=GA1.1.1515823701.1726936796; "
47
+ "_ga_1F3ZVN96B1=GS1.1.1726936795.1.1.1726936833.0.0.0"
48
+ ),
49
+ "dnt": "1",
50
+ "origin": "https://aimathgpt.forit.ai",
51
+ "priority": "u=1, i",
52
+ "referer": "https://aimathgpt.forit.ai/?ref=taaft&utm_source=taaft&utm_medium=referral",
53
+ "sec-ch-ua": (
54
+ "\"Microsoft Edge\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\""
55
+ ),
56
+ "sec-ch-ua-mobile": "?0",
57
+ "sec-ch-ua-platform": "\"Windows\"",
58
+ "sec-fetch-dest": "empty",
59
+ "sec-fetch-mode": "cors",
60
+ "sec-fetch-site": "same-origin",
61
+ "user-agent": (
62
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
63
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
64
+ "Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"
65
+ ),
54
66
  }
55
67
  self.session = requests.Session()
56
- self.session.headers.update(self.headers)
57
- self.session.proxies.update(proxies)
68
+ self.session.headers.update(self.headers)
69
+ self.session.proxies.update(proxies)
58
70
  self.timeout = timeout
59
71
  self.last_response = {}
60
-
72
+ self.model = model
73
+ self.system_prompt = system_prompt
61
74
  self.is_conversation = is_conversation
62
75
  self.max_tokens_to_sample = max_tokens
63
76
  self.__available_optimizers = (
@@ -72,29 +85,11 @@ class ChatHub(Provider):
72
85
  if act
73
86
  else intro or Conversation.intro
74
87
  )
75
-
76
88
  self.conversation = Conversation(
77
89
  is_conversation, self.max_tokens_to_sample, filepath, update_file
78
90
  )
79
91
  self.conversation.history_offset = history_offset
80
92
 
81
- #Resolve the model
82
- self.model = self.get_model(model)
83
-
84
-
85
- def get_model(self, model: str) -> str:
86
- """
87
- Resolves the model name using aliases or defaults.
88
- """
89
-
90
- if model in self.AVAILABLE_MODELS:
91
- return model
92
- elif model in self.model_aliases:
93
- return self.model_aliases[model]
94
- else:
95
- print(f"Model '{model}' not found. Using default model '{self.default_model}'.")
96
- return self.default_model # Use class-level default
97
-
98
93
  def ask(
99
94
  self,
100
95
  prompt: str,
@@ -102,9 +97,10 @@ class ChatHub(Provider):
102
97
  raw: bool = False,
103
98
  optimizer: str = None,
104
99
  conversationally: bool = False,
105
- ) -> Union[Dict[str, Any], Generator]:
106
-
100
+ ) -> Union[Dict, Generator]:
101
+ """Sends a chat completion request to the AIMathGPT API."""
107
102
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
103
+
108
104
  if optimizer:
109
105
  if optimizer in self.__available_optimizers:
110
106
  conversation_prompt = getattr(Optimizers, optimizer)(
@@ -114,44 +110,39 @@ class ChatHub(Provider):
114
110
  raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
115
111
 
116
112
 
117
- data = {
113
+ payload = {
114
+ "messages": [
115
+ {"role": "system", "content": self.system_prompt},
116
+ {"role": "user", "content": conversation_prompt},
117
+ ],
118
118
  "model": self.model,
119
- "messages": [{"role": "user", "content": conversation_prompt}],
120
- "tools": []
121
119
  }
122
120
 
123
- # Set the Referer header dynamically based on the resolved model
124
- self.headers['Referer'] = f"{self.url}/chat/{self.model}"
125
-
126
121
 
127
122
  def for_stream():
128
123
  try:
129
- with requests.post(self.api_endpoint, headers=self.headers, json=data, stream=True, timeout=self.timeout) as response:
130
- response.raise_for_status()
131
- streaming_text = ""
124
+ with requests.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
125
+ if response.status_code != 200:
126
+ raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}: {response.text}")
132
127
 
128
+ streaming_text = ""
133
129
  for line in response.iter_lines(decode_unicode=True):
134
130
  if line:
135
- decoded_line = line.strip()
136
- if decoded_line.startswith('data:'):
137
- data_str = decoded_line[5:].strip()
138
- if data_str == '[DONE]':
139
- break
140
- try:
141
- data_json = json.loads(data_str)
142
- text_delta = data_json.get('textDelta')
143
- if text_delta:
144
- streaming_text += text_delta
145
- resp = dict(text=text_delta)
146
- yield resp if raw else resp
147
-
148
- except json.JSONDecodeError:
149
- continue
131
+ try:
132
+ data = json.loads(line)
133
+ if 'result' in data and 'response' in data['result']:
134
+ content = data['result']['response']
135
+ streaming_text += content
136
+ resp = dict(text=content) # Yield only the new content
137
+ yield resp if raw else resp
138
+ else:
139
+ pass
140
+ except json.JSONDecodeError:
141
+ pass
150
142
  self.conversation.update_chat_history(prompt, streaming_text)
151
143
  self.last_response.update({"text": streaming_text})
152
144
  except requests.exceptions.RequestException as e:
153
- raise exceptions.FailedToGenerateResponseError(f"Request error: {e}")
154
-
145
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
155
146
 
156
147
  def for_non_stream():
157
148
  for _ in for_stream():
@@ -162,7 +153,6 @@ class ChatHub(Provider):
162
153
 
163
154
 
164
155
 
165
-
166
156
  def chat(
167
157
  self,
168
158
  prompt: str,
@@ -170,7 +160,6 @@ class ChatHub(Provider):
170
160
  optimizer: str = None,
171
161
  conversationally: bool = False,
172
162
  ) -> Union[str, Generator]:
173
- """Generate response `str`"""
174
163
 
175
164
  def for_stream():
176
165
  for response in self.ask(
@@ -181,29 +170,24 @@ class ChatHub(Provider):
181
170
  def for_non_stream():
182
171
  return self.get_message(
183
172
  self.ask(
184
- prompt,
185
- stream=False, # Pass stream=False
186
- optimizer=optimizer,
187
- conversationally=conversationally,
173
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
188
174
  )
189
175
  )
190
176
 
191
177
  return for_stream() if stream else for_non_stream()
192
178
 
193
-
194
-
195
179
  def get_message(self, response: dict) -> str:
196
180
  """Retrieves message only from response"""
197
181
  assert isinstance(response, dict), "Response should be of dict data-type only"
198
- return response.get("text", "")
182
+ return response["text"]
199
183
 
200
184
 
201
185
  if __name__ == "__main__":
202
186
  from rich import print
203
- bot = ChatHub()
187
+ bot = AIMathGPT()
204
188
  try:
205
- response = bot.chat("who is Abhay koul in AI", stream=True)
189
+ response = bot.chat("What is the capital of France?", stream=True)
206
190
  for chunk in response:
207
191
  print(chunk, end="", flush=True)
208
192
  except Exception as e:
209
- print(f"An error occurred: {e}")
193
+ print(f"An error occurred: {e}")