webscout 6.0__py3-none-any.whl → 6.2b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (60) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/Onlinesearcher.py +22 -10
  3. webscout/Agents/functioncall.py +2 -2
  4. webscout/Bard.py +21 -21
  5. webscout/Extra/autollama.py +37 -20
  6. webscout/Local/__init__.py +6 -7
  7. webscout/Local/formats.py +404 -194
  8. webscout/Local/model.py +1074 -477
  9. webscout/Local/samplers.py +108 -144
  10. webscout/Local/thread.py +251 -410
  11. webscout/Local/ui.py +401 -0
  12. webscout/Local/utils.py +338 -136
  13. webscout/Provider/Amigo.py +51 -38
  14. webscout/Provider/Deepseek.py +7 -6
  15. webscout/Provider/EDITEE.py +2 -2
  16. webscout/Provider/GPTWeb.py +1 -1
  17. webscout/Provider/NinjaChat.py +200 -0
  18. webscout/Provider/OLLAMA.py +1 -1
  19. webscout/Provider/Perplexity.py +1 -1
  20. webscout/Provider/Reka.py +12 -5
  21. webscout/Provider/TTI/AIuncensored.py +103 -0
  22. webscout/Provider/TTI/Nexra.py +3 -3
  23. webscout/Provider/TTI/__init__.py +3 -2
  24. webscout/Provider/TTI/aiforce.py +2 -2
  25. webscout/Provider/TTI/imgninza.py +136 -0
  26. webscout/Provider/TeachAnything.py +0 -3
  27. webscout/Provider/Youchat.py +1 -1
  28. webscout/Provider/__init__.py +12 -11
  29. webscout/Provider/{ChatHub.py → aimathgpt.py} +72 -88
  30. webscout/Provider/cerebras.py +125 -118
  31. webscout/Provider/cleeai.py +1 -1
  32. webscout/Provider/felo_search.py +1 -1
  33. webscout/Provider/gaurish.py +207 -0
  34. webscout/Provider/geminiprorealtime.py +160 -0
  35. webscout/Provider/genspark.py +1 -1
  36. webscout/Provider/julius.py +8 -3
  37. webscout/Provider/learnfastai.py +1 -1
  38. webscout/Provider/promptrefine.py +3 -1
  39. webscout/Provider/turboseek.py +3 -8
  40. webscout/Provider/tutorai.py +1 -1
  41. webscout/__init__.py +2 -43
  42. webscout/exceptions.py +5 -1
  43. webscout/tempid.py +4 -73
  44. webscout/utils.py +3 -0
  45. webscout/version.py +1 -1
  46. webscout/webai.py +1 -1
  47. webscout/webscout_search.py +154 -123
  48. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/METADATA +156 -236
  49. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/RECORD +53 -54
  50. webscout/Local/rawdog.py +0 -946
  51. webscout/Provider/BasedGPT.py +0 -214
  52. webscout/Provider/TTI/amigo.py +0 -148
  53. webscout/Provider/aigames.py +0 -213
  54. webscout/Provider/bixin.py +0 -264
  55. webscout/Provider/xdash.py +0 -182
  56. webscout/websx_search.py +0 -19
  57. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/LICENSE.md +0 -0
  58. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/WHEEL +0 -0
  59. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/entry_points.txt +0 -0
  60. {webscout-6.0.dist-info → webscout-6.2b0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,207 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ from typing import Any, Dict, Optional, Generator, List, Union
5
+ import uuid
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+
13
+
14
+ class GaurishCerebras(Provider):
15
+ """
16
+ A class to interact with the Gaurish Cerebras API.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ system_prompt: str = "You are a helpful assistant.",
31
+ ):
32
+ """Initializes the Gaurish Cerebras API client."""
33
+ self.url = "https://proxy.gaurish.xyz/api/cerebras/v1/chat/completions"
34
+ self.headers = {
35
+ "Content-Type": "application/json",
36
+ "Accept": "text/event-stream",
37
+ "access-control-allow-credentials": "true",
38
+ "access-control-allow-headers": "*",
39
+ "access-control-allow-methods": "*",
40
+ "access-control-allow-origin": "*",
41
+ "cache-control": "public, max-age=0, must-revalidate",
42
+ "referrer-policy": "strict-origin-when-cross-origin",
43
+ "content-type": "text/event-stream; charset=utf-8",
44
+ "strict-transport-security": "max-age=3600; includeSubDomains",
45
+ "x-content-type-options": "nosniff",
46
+ "x-matched-path": "/api/cerebras/[...path]",
47
+ "x-ratelimit-limit-requests-day": "30000",
48
+ "x-ratelimit-limit-tokens-minute": "60000",
49
+ "x-ratelimit-remaining-requests-day": "29984",
50
+ "x-ratelimit-remaining-tokens-minute": "60000",
51
+ "x-ratelimit-reset-requests-day": "24092.23299384117",
52
+ "x-ratelimit-reset-tokens-minute": "32.232993841171265",
53
+ "x-request-id": "0vWYzSEvd9Ytk5Zvl8NGRfT_Ekjm0ErInwwxlihBPyqUBAjJpyXwCg==",
54
+ "x-vercel-id": "bom1::nsbfd-1729703907288-16e74bb1db50",
55
+ "accept": "application/json",
56
+ "accept-encoding": "gzip, deflate, br, zstd",
57
+ "accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
58
+ "dnt": "1",
59
+ "origin": "https://chat.gaurish.xyz",
60
+ "priority": "u=1, i",
61
+ "referer": "https://chat.gaurish.xyz/",
62
+ "sec-ch-ua": "\"Chromium\";v=\"130\", \"Microsoft Edge\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
63
+ "sec-ch-ua-mobile": "?0",
64
+ "sec-ch-ua-platform": "\"Windows\"",
65
+ "sec-fetch-dest": "empty",
66
+ "sec-fetch-mode": "cors",
67
+ "sec-fetch-site": "same-site",
68
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0",
69
+ "x-stainless-arch": "unknown",
70
+ "x-stainless-lang": "js",
71
+ "x-stainless-os": "Unknown",
72
+ "x-stainless-package-version": "4.67.3",
73
+ "x-stainless-retry-count": "0",
74
+ "x-stainless-runtime": "browser:chrome",
75
+ "x-stainless-runtime-version": "130.0.0",
76
+ }
77
+ self.session = requests.Session()
78
+ self.session.headers.update(self.headers)
79
+ self.session.proxies.update(proxies)
80
+ self.timeout = timeout
81
+ self.last_response = {}
82
+
83
+ self.is_conversation = is_conversation
84
+ self.max_tokens_to_sample = max_tokens
85
+ self.__available_optimizers = (
86
+ method
87
+ for method in dir(Optimizers)
88
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
89
+ )
90
+ Conversation.intro = (
91
+ AwesomePrompts().get_act(
92
+ act, raise_not_found=True, default=None, case_insensitive=True
93
+ )
94
+ if act
95
+ else intro or system_prompt or Conversation.intro
96
+ )
97
+ self.conversation = Conversation(
98
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
99
+ )
100
+ self.conversation.history_offset = history_offset
101
+ self.system_prompt = system_prompt # Store the system prompt
102
+
103
+
104
+ def ask(
105
+ self,
106
+ prompt: str,
107
+ stream: bool = False,
108
+ raw: bool = False,
109
+ optimizer: str = None,
110
+ conversationally: bool = False,
111
+ ) -> Union[Dict, Generator]:
112
+
113
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
114
+ if optimizer:
115
+ if optimizer in self.__available_optimizers:
116
+ conversation_prompt = getattr(Optimizers, optimizer)(
117
+ conversation_prompt if conversationally else prompt
118
+ )
119
+ else:
120
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
121
+
122
+ payload = {
123
+ "messages": [
124
+ {"role": "system", "content": self.system_prompt},
125
+ {"role": "user", "content": conversation_prompt},
126
+ ],
127
+ "model": "llama3.1-70b",
128
+ "temperature": 0.75,
129
+ "stream": stream,
130
+ }
131
+
132
+ def for_stream():
133
+ try:
134
+ with self.session.post(self.url, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout) as response:
135
+ response.raise_for_status()
136
+ streaming_text = ""
137
+ for line in response.iter_lines(decode_unicode=True):
138
+ if line:
139
+ line = line.strip()
140
+ if line.startswith("data: "):
141
+ line = line[6:]
142
+ if line == "[DONE]":
143
+ break
144
+ try:
145
+ data = json.loads(line)
146
+ if "choices" in data and data["choices"][0]["delta"].get("content"):
147
+ content = data["choices"][0]["delta"]["content"]
148
+ streaming_text += content
149
+ resp = dict(text=content) # Yield only the new content
150
+ yield resp if raw else resp
151
+ except json.JSONDecodeError:
152
+ # print(f"[Warning] Invalid JSON chunk received: {line}")
153
+ pass
154
+ self.conversation.update_chat_history(prompt, streaming_text)
155
+ self.last_response.update({"text": streaming_text})
156
+
157
+ except requests.exceptions.RequestException as e:
158
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
159
+
160
+
161
+ def for_non_stream():
162
+ for _ in for_stream():
163
+ pass
164
+ return self.last_response
165
+
166
+ return for_stream() if stream else for_non_stream()
167
+
168
+
169
+
170
+ def chat(
171
+ self,
172
+ prompt: str,
173
+ stream: bool = False,
174
+ optimizer: str = None,
175
+ conversationally: bool = False,
176
+ ) -> Union[str, Generator]:
177
+
178
+ def for_stream():
179
+ for response in self.ask(
180
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
181
+ ):
182
+ yield self.get_message(response)
183
+
184
+ def for_non_stream():
185
+ return self.get_message(
186
+ self.ask(
187
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
188
+ )
189
+ )
190
+
191
+ return for_stream() if stream else for_non_stream()
192
+
193
+ def get_message(self, response: dict) -> str:
194
+ assert isinstance(response, dict), "Response should be of dict data-type only"
195
+ return response["text"]
196
+
197
+
198
+
199
+ if __name__ == "__main__":
200
+ from rich import print
201
+ bot = GaurishCerebras()
202
+ try:
203
+ response = bot.chat("What is the capital of France?", stream=True)
204
+ for chunk in response:
205
+ print(chunk, end="", flush=True)
206
+ except Exception as e:
207
+ print(f"An error occurred: {e}")
@@ -0,0 +1,160 @@
1
+ import requests
2
+ import json
3
+ import os
4
+ import secrets
5
+ from typing import Any, Dict, Optional, Generator, Union
6
+
7
+ from webscout.AIutel import Optimizers
8
+ from webscout.AIutel import Conversation
9
+ from webscout.AIutel import AwesomePrompts, sanitize_stream
10
+ from webscout.AIbase import Provider, AsyncProvider
11
+ from webscout import exceptions
12
+
13
+
14
+ class GeminiPro(Provider):
15
+ """
16
+ A class to interact with the Minitool AI API.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ is_conversation: bool = True,
22
+ max_tokens: int = 2049,
23
+ timeout: int = 30,
24
+ intro: str = None,
25
+ filepath: str = None,
26
+ update_file: bool = True,
27
+ proxies: dict = {},
28
+ history_offset: int = 10250,
29
+ act: str = None,
30
+ ):
31
+ """Initializes the Minitool AI API client."""
32
+ self.url = "https://minitoolai.com/test_python/"
33
+ self.headers = {
34
+ 'authority': 'minitoolai.com',
35
+ 'method': 'POST',
36
+ 'path': '/test_python/',
37
+ 'scheme': 'https',
38
+ 'accept': '*/*',
39
+ 'content-type': 'application/json',
40
+ 'dnt': '1',
41
+ 'origin': 'https://minitoolai.com',
42
+ 'priority': 'u=1, i',
43
+ 'referer': 'https://minitoolai.com/Gemini-Pro/',
44
+ 'sec-ch-ua': '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
45
+ 'sec-ch-ua-mobile': '?0',
46
+ 'sec-ch-ua-platform': '"Windows"',
47
+ 'sec-fetch-dest': 'empty',
48
+ 'sec-fetch-mode': 'cors',
49
+ 'sec-fetch-site': 'same-origin',
50
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
51
+ 'x-requested-with': 'XMLHttpRequest'
52
+ }
53
+ self.session = requests.Session()
54
+ self.session.headers.update(self.headers)
55
+ self.session.proxies.update(proxies)
56
+ self.timeout = timeout
57
+ self.last_response = {}
58
+
59
+ self.is_conversation = is_conversation
60
+ self.max_tokens_to_sample = max_tokens
61
+ self.__available_optimizers = (
62
+ method
63
+ for method in dir(Optimizers)
64
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
65
+ )
66
+ Conversation.intro = (
67
+ AwesomePrompts().get_act(
68
+ act, raise_not_found=True, default=None, case_insensitive=True
69
+ )
70
+ if act
71
+ else intro or Conversation.intro
72
+ )
73
+ self.conversation = Conversation(
74
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
75
+ )
76
+ self.conversation.history_offset = history_offset
77
+
78
+
79
+ def ask(
80
+ self,
81
+ prompt: str,
82
+ stream: bool = False,
83
+ raw: bool = False,
84
+ optimizer: str = None,
85
+ conversationally: bool = False,
86
+ ) -> Union[Dict, Generator]:
87
+ """Sends a chat completion request to the Minitool AI API."""
88
+
89
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
90
+ if optimizer:
91
+ if optimizer in self.__available_optimizers:
92
+ conversation_prompt = getattr(Optimizers, optimizer)(
93
+ conversation_prompt if conversationally else prompt
94
+ )
95
+ else:
96
+ raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
97
+
98
+
99
+ payload = {"utoken": secrets.token_hex(32), "message": conversation_prompt}
100
+
101
+ def for_stream():
102
+ # MinitoolAI doesn't support streaming; emulate with a single yield
103
+ try:
104
+ response = self.session.post(self.url, json=payload, timeout=self.timeout)
105
+ response.raise_for_status()
106
+ data = response.json()
107
+ text = data.get("response", "") # Get response, default to "" if missing
108
+ self.last_response.update({"text": text})
109
+ yield {"text": text} # Yield the entire response
110
+ except requests.exceptions.RequestException as e:
111
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
112
+ self.conversation.update_chat_history(prompt, text) #Update chat history
113
+
114
+
115
+ def for_non_stream():
116
+ for _ in for_stream(): pass # Update last_response
117
+ return self.last_response
118
+
119
+
120
+ return for_stream() if stream else for_non_stream()
121
+
122
+
123
+ def chat(
124
+ self,
125
+ prompt: str,
126
+ stream: bool = False,
127
+ optimizer: str = None,
128
+ conversationally: bool = False,
129
+ ) -> Union[str, Generator]:
130
+ """Generate response `str`"""
131
+ def for_stream():
132
+ for response in self.ask(
133
+ prompt, stream=True, optimizer=optimizer, conversationally=conversationally
134
+ ):
135
+ yield self.get_message(response)
136
+
137
+ def for_non_stream():
138
+ return self.get_message(
139
+ self.ask(
140
+ prompt, stream=False, optimizer=optimizer, conversationally=conversationally
141
+ )
142
+ )
143
+
144
+ return for_stream() if stream else for_non_stream()
145
+
146
+ def get_message(self, response: dict) -> str:
147
+ """Retrieves message only from response"""
148
+ assert isinstance(response, dict), "Response should be of dict data-type only"
149
+ return response.get("text", "") # Handle missing keys
150
+
151
+
152
+ if __name__ == "__main__":
153
+ from rich import print
154
+ bot = GeminiPro()
155
+ try:
156
+ response = bot.chat("hi", stream=True)
157
+ for chunk in response:
158
+ print(chunk, end="", flush=True)
159
+ except Exception as e:
160
+ print(f"An error occurred: {e}")
@@ -220,6 +220,6 @@ class Genspark(Provider):
220
220
  if __name__ == '__main__':
221
221
  from rich import print
222
222
  ai = Genspark()
223
- response = ai.chat(input(">>> "))
223
+ response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
224
224
  for chunk in response:
225
225
  print(chunk, end="", flush=True)
@@ -25,9 +25,14 @@ class Julius(Provider):
25
25
  "GPT-4",
26
26
  "GPT-4o mini",
27
27
  "Command R+",
28
+ "o1-mini",
29
+ "o1-preview",
30
+
31
+
28
32
  ]
29
33
  def __init__(
30
34
  self,
35
+ api_key: str,
31
36
  is_conversation: bool = True,
32
37
  max_tokens: int = 600,
33
38
  timeout: int = 30,
@@ -65,12 +70,12 @@ class Julius(Provider):
65
70
  self.timeout = timeout
66
71
  self.last_response = {}
67
72
  self.model = model
73
+ self.api_key = api_key
68
74
  self.headers = {
69
- "authorization": "Bearer",
75
+ "authorization": f"Bearer {self.api_key}",
70
76
  "content-type": "application/json",
71
77
  "conversation-id": str(uuid.uuid4()),
72
78
  "interactive-charts": "true",
73
- "is-demo": "temp_14aabbb1-95bc-4203-a678-596258d6fdf3",
74
79
  "is-native": "false",
75
80
  "orient-split": "true",
76
81
  "request-id": str(uuid.uuid4()),
@@ -212,7 +217,7 @@ class Julius(Provider):
212
217
  return response["text"]
213
218
  if __name__ == '__main__':
214
219
  from rich import print
215
- ai = Julius(timeout=5000)
220
+ ai = Julius(api_key="",timeout=5000)
216
221
  response = ai.chat("write a poem about AI", stream=True)
217
222
  for chunk in response:
218
223
  print(chunk, end="", flush=True)
@@ -248,6 +248,6 @@ class LearnFast(Provider):
248
248
  if __name__ == "__main__":
249
249
  from rich import print
250
250
  ai = LearnFast()
251
- response = ai.chat(input(">>> "), image_path="photo_2024-07-06_22-19-42.jpg")
251
+ response = ai.chat(input(">>> "), image_path=None)
252
252
  for chunk in response:
253
253
  print(chunk, end="", flush=True)
@@ -12,7 +12,7 @@ class PromptRefine(Provider):
12
12
  """
13
13
  A class to interact with the PromptRefine API.
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -76,6 +76,8 @@ class PromptRefine(Provider):
76
76
  )
77
77
  self.conversation.history_offset = history_offset
78
78
  self.session.proxies = proxies
79
+ if self.model not in self.AVAILABLE_MODELS:
80
+ raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
79
81
 
80
82
  def ask(
81
83
  self,
@@ -1,6 +1,4 @@
1
1
  import requests
2
-
3
-
4
2
  import json
5
3
 
6
4
  from webscout.AIutel import Optimizers
@@ -138,16 +136,13 @@ class TurboSeek(Provider):
138
136
  raise exceptions.FailedToGenerateResponseError(
139
137
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
138
  )
141
- print(response.text)
142
139
  streaming_text = ""
143
140
  for value in response.iter_lines(
144
- decode_unicode=True,
145
141
  chunk_size=self.stream_chunk_size,
146
- delimiter="\n",
147
142
  ):
148
143
  try:
149
- if bool(value) and value.startswith("data: "):
150
- data = json.loads(value[6:])
144
+ if value and value.startswith(b"data: "): #Check for bytes and decode
145
+ data = json.loads(value[6:].decode('utf-8')) # Decode manually
151
146
  if "text" in data:
152
147
  streaming_text += data["text"]
153
148
  resp = dict(text=streaming_text)
@@ -217,4 +212,4 @@ if __name__ == '__main__':
217
212
  ai = TurboSeek()
218
213
  response = ai.chat("hi")
219
214
  for chunk in response:
220
- print(chunk, end="", flush=True)
215
+ print(chunk, end="", flush=True)
@@ -349,6 +349,6 @@ if __name__ == "__main__":
349
349
  from rich import print
350
350
 
351
351
  ai = TutorAI()
352
- response = ai.chat(input(">>> "), attachment_path='photo_2024-07-06_22-19-42.jpg')
352
+ response = ai.chat(input(">>> "), attachment_path=None)
353
353
  for chunk in response:
354
354
  print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -5,60 +5,19 @@ from .DWEBS import *
5
5
  from .transcriber import *
6
6
  from .requestsHTMLfix import *
7
7
  from .tempid import *
8
- from .websx_search import WEBSX
9
8
  from .LLM import VLM, LLM
10
9
  from .YTdownloader import *
11
10
  from .Bing_search import *
12
- import g4f
13
11
  from .YTdownloader import *
14
12
  from .Provider import *
15
13
  from .Provider.TTI import *
16
14
  from .Provider.TTS import *
17
- from .Extra import gguf
18
- from .Extra import autollama
19
- from .Extra import weather_ascii, weather
15
+ from .Extra import *
16
+
20
17
  from .Agents import *
21
18
 
22
19
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
23
20
 
24
- webai = [
25
- "leo",
26
- "openai",
27
- "opengpt",
28
- "koboldai",
29
- "gemini",
30
- "phind",
31
- "blackboxai",
32
- "g4fauto",
33
- "perplexity",
34
- "groq",
35
- "reka",
36
- "cohere",
37
- "yepchat",
38
- "you",
39
- "xjai",
40
- "thinkany",
41
- "berlin4h",
42
- "chatgptuk",
43
- "auto",
44
- "poe",
45
- "basedgpt",
46
- "deepseek",
47
- "deepinfra",
48
- "vtlchat",
49
- "geminiflash",
50
- "geminipro",
51
- "ollama",
52
- "andi",
53
- "llama3"
54
- ]
55
-
56
- gpt4free_providers = [
57
- provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
58
- ]
59
-
60
- available_providers = webai + gpt4free_providers
61
-
62
21
 
63
22
  import logging
64
23
  logging.getLogger("webscout").addHandler(logging.NullHandler())
webscout/exceptions.py CHANGED
@@ -5,7 +5,9 @@ class WebscoutE(Exception):
5
5
  class RatelimitE(Exception):
6
6
  """Raised for rate limit exceeded errors during API requests."""
7
7
 
8
-
8
+ class ConversationLimitException(Exception):
9
+ """Raised for conversation limit exceeded errors during API requests."""
10
+ pass
9
11
  class TimeoutE(Exception):
10
12
  """Raised for timeout errors during API requests."""
11
13
 
@@ -23,3 +25,5 @@ class FacebookInvalidCredentialsException(Exception):
23
25
  class FacebookRegionBlocked(Exception):
24
26
  pass
25
27
 
28
+ class ModelUnloadedException(Exception):
29
+ pass
webscout/tempid.py CHANGED
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import aiohttp
2
3
  from dataclasses import dataclass
3
4
  from bs4 import BeautifulSoup
@@ -33,7 +34,7 @@ class MessageResponseModel:
33
34
  email_to: str | None
34
35
 
35
36
 
36
- class Client:
37
+ class TempMail:
37
38
  def __init__(self):
38
39
  self._session = aiohttp.ClientSession(
39
40
  base_url="https://api.internal.temp-mail.io",
@@ -51,7 +52,7 @@ class Client:
51
52
  async def __aenter__(self):
52
53
  return self
53
54
 
54
- async def __aexit__(self) -> None:
55
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
55
56
  await self.close()
56
57
  return None
57
58
 
@@ -80,76 +81,6 @@ class Client:
80
81
  return [MessageResponseModel(message['attachments'], message['body_html'], message['body_text'], message['cc'], message['created_at'], message['from'], message['id'], message['subject'], message['to']) for message in response_json]
81
82
 
82
83
 
83
- class TemporaryPhoneNumber:
84
- def __init__(self):
85
- self.maxpages = {"UK": 59, "US": 3, "France": 73, "Netherlands": 60, "Finland": 47}
86
- self.minpages = {"UK": 20, "US": 1, "France": 20, "Netherlands": 20, "Finland": 20}
87
- self.plist = {"UK": "+44", "US": "+1", "France": "+33", "Netherlands": "+31", "Finland": "+358"}
88
- self.countries = {"44": "UK", "1": "US", "33": "France", "31": "Netherlands", "358": "Finland"}
89
-
90
- def get_number(self, country="UK"):
91
- if country == "Random":
92
- country = random.choice(list(self.countries.values()))
93
- if country not in self.countries.values():
94
- raise ValueError("Unsupported Country")
95
-
96
- session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
97
- maxpage = self.maxpages[country]
98
- minpage = self.minpages[country]
99
- page = random.randint(minpage, maxpage)
100
-
101
- if page == 1:
102
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number")
103
- else:
104
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/page{page}")
105
-
106
- soup = BeautifulSoup(res.content, "lxml")
107
- numbers = []
108
- p = self.plist[country]
109
- for a in soup.find_all("a"):
110
- a = a.get("title", "none")
111
- if f"{country} Phone Number {p}" in a:
112
- a = a.replace(f"{country} Phone Number ", "").replace(" ", "")
113
- numbers.append(a)
114
- return random.choice(numbers)
115
-
116
- def get_messages(self, number: str):
117
- number = number.replace("+", "")
118
- try:
119
- i = int(number)
120
- except:
121
- raise ValueError("Wrong Number")
122
-
123
- country = None
124
- for key, value in self.countries.items():
125
- if number.startswith(key):
126
- country = value
127
-
128
- if country == None:
129
- raise ValueError("Unsupported Country")
130
-
131
- session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
132
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/{number}")
133
-
134
- if res.status_code == 404:
135
- raise ValueError("Number doesn't exist")
136
-
137
- soup = BeautifulSoup(res.content, "lxml")
138
- messages = []
139
- message = {"content": None, "frm": "", "time": ""}
140
-
141
- for div in soup.find_all("div"):
142
- divclass = div.get("class", "None")[0]
143
- if divclass == "direct-chat-info":
144
- message["frm"] = div.text.split("\n")[1].replace("From ", "")
145
- message["time"] = div.text.split("\n")[2]
146
- if divclass == "direct-chat-text":
147
- message["content"] = div.text
148
- messages.append(sms_message(content=message["content"], frm=message["frm"], time=message["time"]))
149
- message = {"content": None, "frm": "", "time": ""}
150
-
151
- return messages
152
-
153
84
  class VNEngine:
154
85
  def __init__(self) -> NoReturn:
155
86
  self.lang: str = "?lang=en"
@@ -198,4 +129,4 @@ class sms_message:
198
129
  def __init__(self, content, frm, time):
199
130
  self.content = content
200
131
  self.frm = frm
201
- self.time = time
132
+ self.time = time
webscout/utils.py CHANGED
@@ -16,6 +16,9 @@ except ImportError:
16
16
 
17
17
  REGEX_STRIP_TAGS = re.compile("<.*?>")
18
18
 
19
+ def _expand_proxy_tb_alias(proxy: str | None) -> str | None:
20
+ """Expand "tb" to a full proxy URL if applicable."""
21
+ return "socks5://127.0.0.1:9150" if proxy == "tb" else proxy
19
22
 
20
23
  def json_dumps(obj: Any) -> str:
21
24
  try: