webscout 5.9__py3-none-any.whl → 6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (57) hide show
  1. webscout/Agents/Onlinesearcher.py +22 -10
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Bard.py +21 -21
  4. webscout/Local/__init__.py +6 -7
  5. webscout/Local/formats.py +404 -194
  6. webscout/Local/model.py +1074 -477
  7. webscout/Local/samplers.py +108 -144
  8. webscout/Local/thread.py +251 -410
  9. webscout/Local/ui.py +401 -0
  10. webscout/Local/utils.py +308 -131
  11. webscout/Provider/Amigo.py +5 -3
  12. webscout/Provider/ChatHub.py +209 -0
  13. webscout/Provider/Chatify.py +3 -3
  14. webscout/Provider/Cloudflare.py +3 -3
  15. webscout/Provider/DARKAI.py +1 -1
  16. webscout/Provider/Deepinfra.py +95 -389
  17. webscout/Provider/Deepseek.py +4 -6
  18. webscout/Provider/DiscordRocks.py +3 -3
  19. webscout/Provider/Free2GPT.py +3 -3
  20. webscout/Provider/NinjaChat.py +200 -0
  21. webscout/Provider/OLLAMA.py +4 -4
  22. webscout/Provider/RUBIKSAI.py +3 -3
  23. webscout/Provider/TTI/Nexra.py +3 -3
  24. webscout/Provider/TTI/__init__.py +2 -1
  25. webscout/Provider/TTI/aiforce.py +2 -2
  26. webscout/Provider/TTI/imgninza.py +136 -0
  27. webscout/Provider/Youchat.py +4 -5
  28. webscout/Provider/__init__.py +13 -6
  29. webscout/Provider/ai4chat.py +3 -2
  30. webscout/Provider/aimathgpt.py +193 -0
  31. webscout/Provider/bagoodex.py +145 -0
  32. webscout/Provider/bixin.py +3 -3
  33. webscout/Provider/cleeai.py +3 -3
  34. webscout/Provider/elmo.py +2 -5
  35. webscout/Provider/felo_search.py +1 -1
  36. webscout/Provider/gaurish.py +168 -0
  37. webscout/Provider/geminiprorealtime.py +160 -0
  38. webscout/Provider/julius.py +10 -40
  39. webscout/Provider/llamatutor.py +2 -2
  40. webscout/Provider/prefind.py +3 -3
  41. webscout/Provider/promptrefine.py +3 -3
  42. webscout/Provider/turboseek.py +1 -1
  43. webscout/Provider/twitterclone.py +25 -41
  44. webscout/Provider/upstage.py +3 -3
  45. webscout/Provider/x0gpt.py +6 -6
  46. webscout/exceptions.py +5 -1
  47. webscout/utils.py +3 -0
  48. webscout/version.py +1 -1
  49. webscout/webscout_search.py +154 -123
  50. {webscout-5.9.dist-info → webscout-6.1.dist-info}/METADATA +132 -157
  51. {webscout-5.9.dist-info → webscout-6.1.dist-info}/RECORD +55 -49
  52. {webscout-5.9.dist-info → webscout-6.1.dist-info}/WHEEL +1 -1
  53. webscout/Local/rawdog.py +0 -946
  54. webscout/Provider/Poe.py +0 -208
  55. {webscout-5.9.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
  56. {webscout-5.9.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
  57. {webscout-5.9.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,160 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ import secrets
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+
13
+
14
+ class GeminiPro(Provider):
15
+ """
16
+ A class to interact with the Minitool AI API.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ ):
31
+ """Initializes the Minitool AI API client."""
32
+ self.url = "https://minitoolai.com/test_python/"
33
+ self.headers = {
34
+ 'authority': 'minitoolai.com',
35
+ 'method': 'POST',
36
+ 'path': '/test_python/',
37
+ 'scheme': 'https',
38
+ 'accept': '*/*',
39
+ 'content-type': 'application/json',
40
+ 'dnt': '1',
41
+ 'origin': 'https://minitoolai.com',
42
+ 'priority': 'u=1, i',
43
+ 'referer': 'https://minitoolai.com/Gemini-Pro/',
44
+ 'sec-ch-ua': '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
45
+ 'sec-ch-ua-mobile': '?0',
46
+ 'sec-ch-ua-platform': '"Windows"',
47
+ 'sec-fetch-dest': 'empty',
48
+ 'sec-fetch-mode': 'cors',
49
+ 'sec-fetch-site': 'same-origin',
50
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
51
+ 'x-requested-with': 'XMLHttpRequest'
52
+ }
53
+ self.session = requests.Session()
54
+ self.session.headers.update(self.headers)
55
+ self.session.proxies.update(proxies)
56
+ self.timeout = timeout
57
+ self.last_response = {}
58
+
59
+ self.is_conversation = is_conversation
60
+ self.max_tokens_to_sample = max_tokens
61
+ self.__available_optimizers = (
62
+ method
63
+ for method in dir(Optimizers)
64
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
65
+ )
66
+ Conversation.intro = (
67
+ AwesomePrompts().get_act(
68
+ act, raise_not_found=True, default=None, case_insensitive=True
69
+ )
70
+ if act
71
+ else intro or Conversation.intro
72
+ )
73
+ self.conversation = Conversation(
74
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
75
+ )
76
+ self.conversation.history_offset = history_offset
77
+
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ ) -> Union[Dict, Generator]:
87
+ """Sends a chat completion request to the Minitool AI API."""
88
+
89
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
+ if optimizer:
91
+ if optimizer in self.__available_optimizers:
92
+ conversation_prompt = getattr(Optimizers, optimizer)(
93
+ conversation_prompt if conversationally else prompt
94
+ )
95
+ else:
96
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
97
+
98
+
99
+ payload = {"utoken": secrets.token_hex(32), "message": conversation_prompt}
100
+
101
+ def for_stream():
102
+ # MinitoolAI doesn't support streaming; emulate with a single yield
103
+ try:
104
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
105
+ response.raise_for_status()
106
+ data = response.json()
107
+ text = data.get("response", "") # Get response, default to "" if missing
108
+ self.last_response.update({"text": text})
109
+ yield {"text": text} # Yield the entire response
110
+ except requests.exceptions.RequestException as e:
111
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
112
+ self.conversation.update_chat_history(prompt, text) #Update chat history
113
+
114
+
115
+ def for_non_stream():
116
+ for _ in for_stream(): pass # Update last_response
117
+ return self.last_response
118
+
119
+
120
+ return for_stream() if stream else for_non_stream()
121
+
122
+
123
+ def chat(
124
+ self,
125
+ prompt: str,
126
+ stream: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ ) -> Union[str, Generator]:
130
+ """Generate response `str`"""
131
+ def for_stream():
132
+ for response in self.ask(
133
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
134
+ ):
135
+ yield self.get_message(response)
136
+
137
+ def for_non_stream():
138
+ return self.get_message(
139
+ self.ask(
140
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
141
+ )
142
+ )
143
+
144
+ return for_stream() if stream else for_non_stream()
145
+
146
+ def get_message(self, response: dict) -> str:
147
+ """Retrieves message only from response"""
148
+ assert isinstance(response, dict), "Response should be of dict data-type only"
149
+ return response.get("text", "") # Handle missing keys
150
+
151
+
152
+ if __name__ == "__main__":
153
+ from rich import print
154
+ bot = GeminiPro()
155
+ try:
156
+ response = bot.chat("tell me about Gpt canvas", stream=True)
157
+ for chunk in response:
158
+ print(chunk, end="", flush=True)
159
+ except Exception as e:
160
+ print(f"An error occurred: {e}")
@@ -25,6 +25,10 @@ class Julius(Provider):
25
25
  "GPT-4",
26
26
  "GPT-4o mini",
27
27
  "Command R+",
28
+ "o1-mini",
29
+ "o1-preview",
30
+
31
+
28
32
  ]
29
33
  def __init__(
30
34
  self,
@@ -66,22 +70,13 @@ class Julius(Provider):
66
70
  self.last_response = {}
67
71
  self.model = model
68
72
  self.headers = {
69
- "accept": "*/*",
70
- "accept-encoding": "gzip, deflate, br, zstd",
71
- "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
72
73
  "authorization": "Bearer",
73
- "content-length": "206",
74
74
  "content-type": "application/json",
75
75
  "conversation-id": str(uuid.uuid4()),
76
- "dnt": "1",
77
76
  "interactive-charts": "true",
78
77
  "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
79
78
  "is-native": "false",
80
79
  "orient-split": "true",
81
- "origin": "https://julius.ai",
82
- "platform": "undefined",
83
- "priority": "u=1, i",
84
- "referer": "https://julius.ai/",
85
80
  "request-id": str(uuid.uuid4()),
86
81
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
87
82
  "visitor-id": str(uuid.uuid4())
@@ -165,7 +160,7 @@ class Julius(Provider):
165
160
  json_line = json.loads(line)
166
161
  content = json_line['content']
167
162
  streaming_response += content
168
- yield content if raw else dict(text=streaming_response)
163
+ yield content if raw else dict(text=content)
169
164
  except:
170
165
  continue
171
166
  self.last_response.update(dict(text=streaming_response))
@@ -174,30 +169,13 @@ class Julius(Provider):
174
169
  )
175
170
 
176
171
  def for_non_stream():
177
- response = self.session.post(
178
- self.chat_endpoint, json=payload, headers=self.headers, timeout=self.timeout
179
- )
180
172
 
181
- if not response.ok:
182
- raise exceptions.FailedToGenerateResponseError(
183
- f"Failed to generate response - ({response.status_code}, {response.reason})"
184
- )
185
- full_content = ""
186
- for line in response.text.splitlines():
187
- try:
188
- data = json.loads(line)
189
- if "content" in data:
190
- full_content += data['content']
191
- except json.JSONDecodeError:
192
- pass
193
- self.last_response.update(dict(text=full_content))
194
- self.conversation.update_chat_history(
195
- prompt, self.get_message(self.last_response)
196
- )
173
+ for _ in for_stream():
174
+ pass
197
175
  return self.last_response
198
176
 
199
- return for_stream() if stream else for_non_stream()
200
177
 
178
+ return for_stream() if stream else for_non_stream()
201
179
  def chat(
202
180
  self,
203
181
  prompt: str,
@@ -234,19 +212,11 @@ class Julius(Provider):
234
212
  return for_stream() if stream else for_non_stream()
235
213
 
236
214
  def get_message(self, response: dict) -> str:
237
- """Retrieves message only from response
238
-
239
- Args:
240
- response (dict): Response generated by `self.ask`
241
-
242
- Returns:
243
- str: Message extracted
244
- """
245
215
  assert isinstance(response, dict), "Response should be of dict data-type only"
246
216
  return response["text"]
247
217
  if __name__ == '__main__':
248
218
  from rich import print
249
- ai = Julius()
250
- response = ai.chat("hi")
219
+ ai = Julius(timeout=5000)
220
+ response = ai.chat("write a poem about AI", stream=True)
251
221
  for chunk in response:
252
222
  print(chunk, end="", flush=True)
@@ -147,7 +147,7 @@ class LlamaTutor(Provider):
147
147
  json_data = json.loads(decoded_line[6:])
148
148
  if "text" in json_data:
149
149
  full_response += json_data["text"]
150
- yield json_data["text"] if raw else dict(text=full_response)
150
+ yield json_data["text"] if raw else dict(text=json_data["text"])
151
151
 
152
152
  self.last_response.update(dict(text=full_response))
153
153
  self.conversation.update_chat_history(
@@ -217,6 +217,6 @@ if __name__ == "__main__":
217
217
  from rich import print
218
218
 
219
219
  ai = LlamaTutor()
220
- response = ai.chat(input(">>> "))
220
+ response = ai.chat("write a poem about AI", stream=True)
221
221
  for chunk in response:
222
222
  print(chunk, end="", flush=True)
@@ -161,8 +161,8 @@ class PrefindAI(Provider):
161
161
  (self.model == "claude" and model == 'OPENROUTER_CLAUDE'):
162
162
  content = data['chunk']['content']
163
163
  if content:
164
- streaming_text += content + ("\n" if stream else "")
165
- resp = dict(text=streaming_text)
164
+ streaming_text += content #+ ("\n" if stream else "")
165
+ resp = dict(text=content)
166
166
  self.last_response.update(resp)
167
167
  yield resp if raw else resp
168
168
  self.conversation.update_chat_history(
@@ -227,6 +227,6 @@ class PrefindAI(Provider):
227
227
  if __name__ == '__main__':
228
228
  from rich import print
229
229
  ai = PrefindAI(model="claude")
230
- response = ai.chat(input(">>> "))
230
+ response = ai.chat("write a poem about AI", stream=True)
231
231
  for chunk in response:
232
232
  print(chunk, end="", flush=True)
@@ -134,7 +134,7 @@ class PromptRefine(Provider):
134
134
  for line in response.iter_lines(decode_unicode=True):
135
135
  if line:
136
136
  full_response += line # No need to decode here
137
- yield full_response if raw else dict(text=full_response)
137
+ yield full_response if raw else dict(text=line)
138
138
  self.last_response.update(dict(text=full_response))
139
139
  self.conversation.update_chat_history(
140
140
  prompt, self.get_message(self.last_response)
@@ -185,7 +185,7 @@ class PromptRefine(Provider):
185
185
 
186
186
  if __name__ == '__main__':
187
187
  from rich import print
188
- ai = PromptRefine()
189
- response = ai.chat(input(">>> "))
188
+ ai = PromptRefine(timeout=5000)
189
+ response = ai.chat("write a poem about AI", stream=True)
190
190
  for chunk in response:
191
191
  print(chunk, end="", flush=True)
@@ -138,7 +138,7 @@ class TurboSeek(Provider):
138
138
  raise exceptions.FailedToGenerateResponseError(
139
139
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
140
  )
141
-
141
+ print(response.text)
142
142
  streaming_text = ""
143
143
  for value in response.iter_lines(
144
144
  decode_unicode=True,
@@ -149,45 +149,31 @@ class AIUncensored(Provider):
149
149
 
150
150
 
151
151
  def for_stream():
152
-
153
- try:
154
- with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
155
-
156
- if response.status_code == 200:
157
- full_content = ''
158
- for line in response.iter_lines():
159
- decoded_line = line.decode('utf-8').strip()
160
- if decoded_line:
161
-
162
- if decoded_line == "data: [DONE]":
163
-
164
- break
165
- if decoded_line.startswith("data: "):
166
- data_str = decoded_line[len("data: "):]
167
- try:
168
- data_json = json.loads(data_str)
169
- content = data_json.get("data", "")
170
- if content:
171
- full_content += content
172
-
173
- yield content if raw else {"text": full_content}
174
- except json.JSONDecodeError:
175
- if data_str != "[DONE]":
176
- return None
177
- else:
178
-
179
- raise exceptions.FailedToGenerateResponseError(
180
- f"Request failed with status code: {response.status_code}"
181
- )
182
- self.last_response = {"text": full_content}
183
-
152
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
153
+
154
+ if response.status_code == 200:
155
+ full_content = ''
156
+ for line in response.iter_lines():
157
+ decoded_line = line.decode('utf-8').strip()
158
+ if decoded_line:
159
+
160
+ if decoded_line == "data: [DONE]":
161
+
162
+ break
163
+ if decoded_line.startswith("data: "):
164
+ data_str = decoded_line[len("data: "):]
165
+ try:
166
+ data_json = json.loads(data_str)
167
+ content = data_json.get("data", "")
168
+ if content:
169
+ full_content += content
170
+ yield content if raw else dict(text=content)
171
+ except json.JSONDecodeError:
172
+ raise Exception
173
+ self.last_response.update(dict(text=full_content))
184
174
  self.conversation.update_chat_history(
185
175
  prompt, self.get_message(self.last_response)
186
176
  )
187
- except requests.exceptions.RequestException as e:
188
-
189
- raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
190
-
191
177
  def for_non_stream():
192
178
 
193
179
  for _ in for_stream():
@@ -252,9 +238,7 @@ class AIUncensored(Provider):
252
238
 
253
239
  if __name__ == "__main__":
254
240
  from rich import print
255
- ai = AIUncensored()
256
- user_input = 'hi'
257
- response = ai.chat(user_input)
241
+ ai = AIUncensored(timeout=5000)
242
+ response = ai.chat("write a poem about AI", stream=True)
258
243
  for chunk in response:
259
- print(chunk, end="", flush=True)
260
- print() # For a newline after streaming completes
244
+ print(chunk, end="", flush=True)
@@ -159,7 +159,7 @@ class Upstage(Provider):
159
159
  content = json_data['choices'][0]['delta'].get('content', '')
160
160
  if content:
161
161
  streaming_response += content
162
- yield content if raw else dict(text=streaming_response)
162
+ yield content if raw else dict(text=content)
163
163
  except json.JSONDecodeError:
164
164
  print(f"Error decoding JSON: {data}")
165
165
 
@@ -224,7 +224,7 @@ class Upstage(Provider):
224
224
  return response["text"]
225
225
  if __name__ == '__main__':
226
226
  from rich import print
227
- ai = Upstage()
228
- response = ai.chat("hi")
227
+ ai = Upstage(timeout=5000)
228
+ response = ai.chat("write a poem about AI", stream=True)
229
229
  for chunk in response:
230
230
  print(chunk, end="", flush=True)
@@ -118,13 +118,13 @@ class X0GPT(Provider):
118
118
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
119
119
  )
120
120
  streaming_response = ""
121
- for line in response.iter_lines(decode_unicode=True, chunk_size=64):
121
+ for line in response.iter_lines(decode_unicode=True):
122
122
  if line:
123
123
  match = re.search(r'0:"(.*?)"', line)
124
124
  if match:
125
125
  content = match.group(1)
126
126
  streaming_response += content
127
- yield content if raw else dict(text=streaming_response)
127
+ yield content if raw else dict(text=content)
128
128
  self.last_response.update(dict(text=streaming_response))
129
129
  self.conversation.update_chat_history(
130
130
  prompt, self.get_message(self.last_response)
@@ -152,7 +152,7 @@ class X0GPT(Provider):
152
152
  for response in self.ask(
153
153
  prompt, True, optimizer=optimizer, conversationally=conversationally
154
154
  ):
155
- yield self.get_message(response).replace("\n", "\n\n")
155
+ yield self.get_message(response)
156
156
 
157
157
  def for_non_stream():
158
158
  return self.get_message(
@@ -162,7 +162,7 @@ class X0GPT(Provider):
162
162
  optimizer=optimizer,
163
163
  conversationally=conversationally,
164
164
  )
165
- ).replace("\n", "\n\n")
165
+ )
166
166
 
167
167
  return for_stream() if stream else for_non_stream()
168
168
 
@@ -176,7 +176,7 @@ class X0GPT(Provider):
176
176
 
177
177
  if __name__ == "__main__":
178
178
  from rich import print
179
- ai = X0GPT()
180
- response = ai.chat("hi")
179
+ ai = X0GPT(timeout=5000)
180
+ response = ai.chat("write a poem about AI", stream=True)
181
181
  for chunk in response:
182
182
  print(chunk, end="", flush=True)
webscout/exceptions.py CHANGED
@@ -5,7 +5,9 @@ class WebscoutE(Exception):
5
5
  class RatelimitE(Exception):
6
6
  """Raised for rate limit exceeded errors during API requests."""
7
7
 
8
-
8
+ class ConversationLimitException(Exception):
9
+ """Raised for conversation limit exceeded errors during API requests."""
10
+ pass
9
11
  class TimeoutE(Exception):
10
12
  """Raised for timeout errors during API requests."""
11
13
 
@@ -23,3 +25,5 @@ class FacebookInvalidCredentialsException(Exception):
23
25
  class FacebookRegionBlocked(Exception):
24
26
  pass
25
27
 
28
+ class ModelUnloadedException(Exception):
29
+ pass
webscout/utils.py CHANGED
@@ -16,6 +16,9 @@ except ImportError:
16
16
 
17
17
  REGEX_STRIP_TAGS = re.compile("<.*?>")
18
18
 
19
+ def _expand_proxy_tb_alias(proxy: str | None) -> str | None:
20
+ """Expand "tb" to a full proxy URL if applicable."""
21
+ return "socks5://127.0.0.1:9150" if proxy == "tb" else proxy
19
22
 
20
23
  def json_dumps(obj: Any) -> str:
21
24
  try:
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "5.9"
1
+ __version__ = "6.1"
2
2
  __prog__ = "webscout"