webscout 6.1__py3-none-any.whl → 6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (48) hide show
  1. webscout/AIauto.py +77 -259
  2. webscout/Agents/functioncall.py +2 -2
  3. webscout/Extra/autollama.py +37 -20
  4. webscout/Local/formats.py +4 -2
  5. webscout/Local/utils.py +37 -12
  6. webscout/Provider/Amigo.py +50 -37
  7. webscout/Provider/Deepseek.py +7 -6
  8. webscout/Provider/EDITEE.py +2 -2
  9. webscout/Provider/GPTWeb.py +1 -1
  10. webscout/Provider/Llama3.py +1 -1
  11. webscout/Provider/NinjaChat.py +2 -2
  12. webscout/Provider/OLLAMA.py +1 -1
  13. webscout/Provider/Perplexity.py +1 -1
  14. webscout/Provider/Reka.py +12 -5
  15. webscout/Provider/TTI/AIuncensored.py +103 -0
  16. webscout/Provider/TTI/__init__.py +3 -2
  17. webscout/Provider/TTI/talkai.py +116 -0
  18. webscout/Provider/TeachAnything.py +0 -3
  19. webscout/Provider/__init__.py +8 -11
  20. webscout/Provider/cerebras.py +143 -123
  21. webscout/Provider/cleeai.py +1 -1
  22. webscout/Provider/felo_search.py +1 -1
  23. webscout/Provider/gaurish.py +41 -2
  24. webscout/Provider/geminiprorealtime.py +1 -1
  25. webscout/Provider/genspark.py +1 -1
  26. webscout/Provider/julius.py +4 -3
  27. webscout/Provider/learnfastai.py +1 -1
  28. webscout/Provider/{aigames.py → llmchat.py} +74 -84
  29. webscout/Provider/promptrefine.py +3 -1
  30. webscout/Provider/talkai.py +196 -0
  31. webscout/Provider/turboseek.py +3 -8
  32. webscout/Provider/tutorai.py +1 -1
  33. webscout/__init__.py +2 -43
  34. webscout/tempid.py +4 -73
  35. webscout/version.py +1 -1
  36. webscout/webai.py +1 -1
  37. {webscout-6.1.dist-info → webscout-6.2.dist-info}/METADATA +44 -128
  38. {webscout-6.1.dist-info → webscout-6.2.dist-info}/RECORD +42 -45
  39. webscout/Provider/BasedGPT.py +0 -214
  40. webscout/Provider/ChatHub.py +0 -209
  41. webscout/Provider/TTI/amigo.py +0 -148
  42. webscout/Provider/bixin.py +0 -264
  43. webscout/Provider/xdash.py +0 -182
  44. webscout/websx_search.py +0 -19
  45. {webscout-6.1.dist-info → webscout-6.2.dist-info}/LICENSE.md +0 -0
  46. {webscout-6.1.dist-info → webscout-6.2.dist-info}/WHEEL +0 -0
  47. {webscout-6.1.dist-info → webscout-6.2.dist-info}/entry_points.txt +0 -0
  48. {webscout-6.1.dist-info → webscout-6.2.dist-info}/top_level.txt +0 -0
@@ -1,21 +1,29 @@
1
1
  import requests
2
- import uuid
3
2
  import json
3
+ from typing import Any, Dict, Optional, Generator, List
4
4
 
5
5
  from webscout.AIutel import Optimizers
6
6
  from webscout.AIutel import Conversation
7
7
  from webscout.AIutel import AwesomePrompts
8
8
  from webscout.AIbase import Provider
9
+ from webscout import exceptions
9
10
 
10
- class AIGameIO(Provider):
11
+ class LLMChat(Provider):
11
12
  """
12
- A class to interact with the AI-Game.io API.
13
+ A class to interact with the LLMChat API.
13
14
  """
14
15
 
16
+ AVAILABLE_MODELS = [
17
+ "@cf/meta/llama-3.1-70b-instruct",
18
+ "@cf/meta/llama-3.1-8b-instruct",
19
+ "@cf/meta/llama-3.2-3b-instruct",
20
+ "@cf/meta/llama-3.2-1b-instruct"
21
+ ]
22
+
15
23
  def __init__(
16
24
  self,
17
25
  is_conversation: bool = True,
18
- max_tokens: int = 600,
26
+ max_tokens: int = 2048,
19
27
  timeout: int = 30,
20
28
  intro: str = None,
21
29
  filepath: str = None,
@@ -23,49 +31,35 @@ class AIGameIO(Provider):
23
31
  proxies: dict = {},
24
32
  history_offset: int = 10250,
25
33
  act: str = None,
26
- system_prompt: str = "You are a Helpful ai"
34
+ model: str = "@cf/meta/llama-3.1-70b-instruct", # Default model
35
+ system_prompt: str = "You are a helpful assistant.",
27
36
  ):
28
37
  """
29
- Initializes the AI-Game.io API with given parameters.
30
-
31
- Args:
32
- is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
33
- max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
34
- timeout (int, optional): Http request timeout. Defaults to 30.
35
- intro (str, optional): Conversation introductory prompt. Defaults to None.
36
- filepath (str, optional): Path to file containing conversation history. Defaults to None.
37
- update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
38
- proxies (dict, optional): Http request proxies. Defaults to {}.
39
- history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
40
- act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
41
- system_prompt (str, optional): System prompt for AI-Game.io.
42
- Defaults to "You are a Helpful ai".
38
+ Initializes the LLMChat API with given parameters.
43
39
  """
40
+ if model not in self.AVAILABLE_MODELS:
41
+ raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
42
+
44
43
  self.session = requests.Session()
45
44
  self.is_conversation = is_conversation
46
45
  self.max_tokens_to_sample = max_tokens
47
- self.api_endpoint = 'https://stream-chat-blmeirpipa-uc.a.run.app/streamChat'
48
- self.stream_chunk_size = 64
46
+ self.api_endpoint = "https://llmchat.in/inference/stream"
49
47
  self.timeout = timeout
50
48
  self.last_response = {}
49
+ self.model = model
51
50
  self.system_prompt = system_prompt
52
51
  self.headers = {
53
- 'authority': 'stream-chat-blmeirpipa-uc.a.run.app',
54
- 'method': 'POST',
55
- 'path': '/streamChat',
56
- 'accept': 'text/event-stream',
57
- 'content-type': 'application/json',
58
- 'origin': 'https://www.ai-game.io',
59
- 'priority': 'u=1, i',
60
- 'referer': 'https://www.ai-game.io/',
52
+ "Content-Type": "application/json",
53
+ "Accept": "*/*",
54
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0",
55
+ "Origin": "https://llmchat.in",
56
+ "Referer": "https://llmchat.in/"
61
57
  }
62
-
63
58
  self.__available_optimizers = (
64
59
  method
65
60
  for method in dir(Optimizers)
66
61
  if callable(getattr(Optimizers, method)) and not method.startswith("__")
67
62
  )
68
- self.session.headers.update(self.headers)
69
63
  Conversation.intro = (
70
64
  AwesomePrompts().get_act(
71
65
  act, raise_not_found=True, default=None, case_insensitive=True
@@ -86,22 +80,17 @@ class AIGameIO(Provider):
86
80
  raw: bool = False,
87
81
  optimizer: str = None,
88
82
  conversationally: bool = False,
89
- ) -> dict:
90
- """Chat with AI
83
+ ) -> Dict[str, Any]:
84
+ """Chat with LLMChat
91
85
 
92
86
  Args:
93
- prompt (str): Prompt to be send.
87
+ prompt (str): Prompt to be sent.
94
88
  stream (bool, optional): Flag for streaming response. Defaults to False.
95
89
  raw (bool, optional): Stream back raw response as received. Defaults to False.
96
90
  optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
97
91
  conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
98
92
  Returns:
99
- dict : {}
100
- ```json
101
- {
102
- "text" : "How may I assist you today?"
103
- }
104
- ```
93
+ dict: Response dictionary.
105
94
  """
106
95
  conversation_prompt = self.conversation.gen_complete_prompt(prompt)
107
96
  if optimizer:
@@ -110,49 +99,51 @@ class AIGameIO(Provider):
110
99
  conversation_prompt if conversationally else prompt
111
100
  )
112
101
  else:
113
- raise Exception(
102
+ raise exceptions.FailedToGenerateResponseError(
114
103
  f"Optimizer is not one of {self.__available_optimizers}"
115
104
  )
116
-
105
+
106
+ url = f"{self.api_endpoint}?model={self.model}"
117
107
  payload = {
118
- "history": [
119
- {
120
- "role": "system",
121
- "content": self.system_prompt
122
- },
123
- {
124
- "role": "user",
125
- "content": conversation_prompt
126
- }
127
- ]
108
+ "messages": [
109
+ {"role": "system", "content": self.system_prompt},
110
+ {"role": "user", "content": conversation_prompt}
111
+ ],
112
+ "max_tokens": self.max_tokens_to_sample,
113
+ "stream": stream
128
114
  }
129
- def for_stream():
130
- response = self.session.post(
131
- self.api_endpoint, headers=self.headers, data=json.dumps(payload), stream=True, timeout=self.timeout
132
- )
133
- if not response.ok:
134
- raise Exception(
135
- f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
136
- )
137
115
 
138
- full_response = ''
139
- for line in response.iter_lines(decode_unicode=True):
140
- if line.startswith("data: "):
141
- try:
142
- event_data = json.loads(line[6:])
143
- if event_data['event'] == 'text-chunk':
144
- full_response += event_data['data']['text']
145
- yield event_data['data']['text'] if raw else dict(text=full_response)
146
- except json.JSONDecodeError:
147
- pass
148
- self.last_response.update(dict(text=full_response))
149
- self.conversation.update_chat_history(
150
- prompt, self.get_message(self.last_response)
151
- )
116
+ def for_stream():
117
+ try:
118
+ with requests.post(url, json=payload, headers=self.headers, stream=True, timeout=self.timeout) as response:
119
+ response.raise_for_status()
120
+ full_response = ""
121
+ for line in response.iter_lines():
122
+ if line:
123
+ line = line.decode('utf-8')
124
+ if line.startswith('data: '):
125
+ try:
126
+ data = json.loads(line[6:])
127
+ if data.get('response'):
128
+ response_text = data['response']
129
+ full_response += response_text
130
+ yield response_text if raw else dict(text=response_text)
131
+ except json.JSONDecodeError:
132
+ if line.strip() != 'data: [DONE]':
133
+ print(f"Failed to parse line: {line}")
134
+ continue
135
+ self.last_response.update(dict(text=full_response))
136
+ self.conversation.update_chat_history(
137
+ prompt, self.get_message(self.last_response)
138
+ )
139
+ except requests.exceptions.RequestException as e:
140
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
141
+
152
142
  def for_non_stream():
153
- for _ in for_stream():
154
- pass
155
- return self.last_response
143
+ full_response = ""
144
+ for line in for_stream():
145
+ full_response += line['text'] if not raw else line
146
+ return dict(text=full_response)
156
147
 
157
148
  return for_stream() if stream else for_non_stream()
158
149
 
@@ -162,7 +153,7 @@ class AIGameIO(Provider):
162
153
  stream: bool = False,
163
154
  optimizer: str = None,
164
155
  conversationally: bool = False,
165
- ) -> str:
156
+ ) -> str | Generator[str, None, None]:
166
157
  """Generate response `str`
167
158
  Args:
168
159
  prompt (str): Prompt to be send.
@@ -191,14 +182,14 @@ class AIGameIO(Provider):
191
182
 
192
183
  return for_stream() if stream else for_non_stream()
193
184
 
194
- def get_message(self, response: dict) -> str:
195
- """Retrieves message only from response
185
+ def get_message(self, response: Dict[str, Any]) -> str:
186
+ """Retrieves message only from response.
196
187
 
197
188
  Args:
198
189
  response (dict): Response generated by `self.ask`
199
190
 
200
191
  Returns:
201
- str: Message extracted
192
+ str: Message extracted.
202
193
  """
203
194
  assert isinstance(response, dict), "Response should be of dict data-type only"
204
195
  return response["text"]
@@ -206,8 +197,7 @@ class AIGameIO(Provider):
206
197
 
207
198
  if __name__ == "__main__":
208
199
  from rich import print
209
-
210
- ai = AIGameIO()
211
- response = ai.chat("hi")
200
+ ai = LLMChat(model='@cf/meta/llama-3.1-70b-instruct')
201
+ response = ai.chat("What's the meaning of life?", stream=True)
212
202
  for chunk in response:
213
203
  print(chunk, end="", flush=True)
@@ -12,7 +12,7 @@ class PromptRefine(Provider):
12
12
  """
13
13
  A class to interact with the PromptRefine API.
14
14
  """
15
-
15
+ AVAILABLE_MODELS = ["openai/gpt-4", "openai/gpt-4o", "openai/gpt-4-1106-preview"]
16
16
  def __init__(
17
17
  self,
18
18
  is_conversation: bool = True,
@@ -76,6 +76,8 @@ class PromptRefine(Provider):
76
76
  )
77
77
  self.conversation.history_offset = history_offset
78
78
  self.session.proxies = proxies
79
+ if self.model not in self.AVAILABLE_MODELS:
80
+ raise ValueError(f"Invalid model: {self.model}. Available models: {', '.join(self.AVAILABLE_MODELS)}")
79
81
 
80
82
  def ask(
81
83
  self,
@@ -0,0 +1,196 @@
1
+ import uuid
2
+ import requests
3
+ import json
4
+ from typing import Any, Dict, Optional, Generator
5
+
6
+ from webscout.AIutel import Optimizers
7
+ from webscout.AIutel import Conversation
8
+ from webscout.AIutel import AwesomePrompts
9
+ from webscout.AIbase import Provider
10
+ from webscout import exceptions
11
+
12
+ class Talkai(Provider):
13
+ """
14
+ A class to interact with the Talkai.info API.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ is_conversation: bool = True,
20
+ max_tokens: int = 2048,
21
+ timeout: int = 30,
22
+ intro: str = None,
23
+ filepath: str = None,
24
+ update_file: bool = True,
25
+ proxies: dict = {},
26
+ history_offset: int = 10250,
27
+ act: str = None,
28
+ model: str = "gpt-4o-mini", # Default model
29
+ ):
30
+ """
31
+ Initializes the Talkai.info API with given parameters.
32
+ """
33
+ self.session = requests.Session()
34
+ self.is_conversation = is_conversation
35
+ self.max_tokens_to_sample = max_tokens
36
+ self.api_endpoint = "https://talkai.info/chat/send/"
37
+ self.timeout = timeout
38
+ self.last_response = {}
39
+ self.model = model
40
+ self.headers = {
41
+ 'Accept': 'application/json, text/event-stream',
42
+ 'Accept-Language': 'en-US,en;q=0.9,en-IN;q=0.8',
43
+ 'Content-Type': 'application/json',
44
+ 'Origin': 'https://talkai.info',
45
+ 'Referer': 'https://talkai.info/chat/',
46
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0',
47
+ 'sec-ch-ua': '"Chromium";v="130", "Microsoft Edge";v="130", "Not?A_Brand";v="99"',
48
+ 'sec-ch-ua-platform': '"Windows"'
49
+ }
50
+ self.__available_optimizers = (
51
+ method
52
+ for method in dir(Optimizers)
53
+ if callable(getattr(Optimizers, method)) and not method.startswith("__")
54
+ )
55
+ Conversation.intro = (
56
+ AwesomePrompts().get_act(
57
+ act, raise_not_found=True, default=None, case_insensitive=True
58
+ )
59
+ if act
60
+ else intro or Conversation.intro
61
+ )
62
+ self.conversation = Conversation(
63
+ is_conversation, self.max_tokens_to_sample, filepath, update_file
64
+ )
65
+ self.conversation.history_offset = history_offset
66
+ self.session.proxies = proxies
67
+
68
+ def ask(
69
+ self,
70
+ prompt: str,
71
+ stream: bool = False,
72
+ raw: bool = False,
73
+ optimizer: str = None,
74
+ conversationally: bool = False,
75
+ ) -> Dict[str, Any]:
76
+ """Chat with Talkai
77
+
78
+ Args:
79
+ prompt (str): Prompt to be sent.
80
+ stream (bool, optional): Flag for streaming response. Defaults to False.
81
+ raw (bool, optional): Stream back raw response as received. Defaults to False.
82
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
83
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
84
+ Returns:
85
+ dict: Response dictionary.
86
+ """
87
+ conversation_prompt = self.conversation.gen_complete_prompt(prompt)
88
+ if optimizer:
89
+ if optimizer in self.__available_optimizers:
90
+ conversation_prompt = getattr(Optimizers, optimizer)(
91
+ conversation_prompt if conversationally else prompt
92
+ )
93
+ else:
94
+ raise exceptions.FailedToGenerateResponseError(
95
+ f"Optimizer is not one of {self.__available_optimizers}"
96
+ )
97
+
98
+ payload = {
99
+ "type": "chat",
100
+ "messagesHistory": [
101
+ {
102
+ "id": str(uuid.uuid4()),
103
+ "from": "you",
104
+ "content": conversation_prompt
105
+ }
106
+ ],
107
+ "settings": {
108
+ "model": self.model
109
+ }
110
+ }
111
+
112
+ def for_stream():
113
+ try:
114
+ with requests.post(self.api_endpoint, headers=self.headers, json=payload, stream=True, timeout=self.timeout) as response:
115
+ response.raise_for_status()
116
+
117
+ full_response = ""
118
+ for line in response.iter_lines():
119
+ if line:
120
+ decoded_line = line.decode('utf-8')
121
+ if 'event: trylimit' in decoded_line:
122
+ break # Stop if trylimit event is encountered
123
+ if decoded_line.startswith('data: '):
124
+ data = decoded_line[6:] # Remove 'data: ' prefix
125
+ full_response += data
126
+ yield data if raw else dict(text=data)
127
+
128
+ self.last_response.update(dict(text=full_response))
129
+ self.conversation.update_chat_history(
130
+ prompt, self.get_message(self.last_response)
131
+ )
132
+
133
+ except requests.exceptions.RequestException as e:
134
+ raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
135
+
136
+ def for_non_stream():
137
+ full_response = ""
138
+ for line in for_stream():
139
+ full_response += line['text'] if not raw else line
140
+ return dict(text=full_response)
141
+
142
+ return for_stream() if stream else for_non_stream()
143
+
144
+
145
+ def chat(
146
+ self,
147
+ prompt: str,
148
+ stream: bool = False,
149
+ optimizer: str = None,
150
+ conversationally: bool = False,
151
+ ) -> str | Generator[str, None, None]:
152
+ """Generate response `str`
153
+ Args:
154
+ prompt (str): Prompt to be send.
155
+ stream (bool, optional): Flag for streaming response. Defaults to False.
156
+ optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
157
+ conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
158
+ Returns:
159
+ str: Response generated
160
+ """
161
+
162
+ def for_stream():
163
+ for response in self.ask(
164
+ prompt, True, optimizer=optimizer, conversationally=conversationally
165
+ ):
166
+ yield self.get_message(response)
167
+
168
+ def for_non_stream():
169
+ return self.get_message(
170
+ self.ask(
171
+ prompt,
172
+ False,
173
+ optimizer=optimizer,
174
+ conversationally=conversationally,
175
+ )
176
+ )
177
+
178
+ return for_stream() if stream else for_non_stream()
179
+
180
+ def get_message(self, response: Dict[str, Any]) -> str:
181
+ """Retrieves message only from response.
182
+
183
+ Args:
184
+ response (dict): Response generated by `self.ask`
185
+
186
+ Returns:
187
+ str: Message extracted.
188
+ """
189
+ assert isinstance(response, dict), "Response should be of dict data-type only"
190
+ return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
191
+
192
+ if __name__ == "__main__":
193
+ t = Talkai()
194
+ resp = t.chat("write me about AI", stream=True)
195
+ for chunk in resp:
196
+ print(chunk, end="", flush=True)
@@ -1,6 +1,4 @@
1
1
  import requests
2
-
3
-
4
2
  import json
5
3
 
6
4
  from webscout.AIutel import Optimizers
@@ -138,16 +136,13 @@ class TurboSeek(Provider):
138
136
  raise exceptions.FailedToGenerateResponseError(
139
137
  f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
140
138
  )
141
- print(response.text)
142
139
  streaming_text = ""
143
140
  for value in response.iter_lines(
144
- decode_unicode=True,
145
141
  chunk_size=self.stream_chunk_size,
146
- delimiter="\n",
147
142
  ):
148
143
  try:
149
- if bool(value) and value.startswith("data: "):
150
- data = json.loads(value[6:])
144
+ if value and value.startswith(b"data: "): #Check for bytes and decode
145
+ data = json.loads(value[6:].decode('utf-8')) # Decode manually
151
146
  if "text" in data:
152
147
  streaming_text += data["text"]
153
148
  resp = dict(text=streaming_text)
@@ -217,4 +212,4 @@ if __name__ == '__main__':
217
212
  ai = TurboSeek()
218
213
  response = ai.chat("hi")
219
214
  for chunk in response:
220
- print(chunk, end="", flush=True)
215
+ print(chunk, end="", flush=True)
@@ -349,6 +349,6 @@ if __name__ == "__main__":
349
349
  from rich import print
350
350
 
351
351
  ai = TutorAI()
352
- response = ai.chat(input(">>> "), attachment_path='photo_2024-07-06_22-19-42.jpg')
352
+ response = ai.chat(input(">>> "), attachment_path=None)
353
353
  for chunk in response:
354
354
  print(chunk, end="", flush=True)
webscout/__init__.py CHANGED
@@ -5,60 +5,19 @@ from .DWEBS import *
5
5
  from .transcriber import *
6
6
  from .requestsHTMLfix import *
7
7
  from .tempid import *
8
- from .websx_search import WEBSX
9
8
  from .LLM import VLM, LLM
10
9
  from .YTdownloader import *
11
10
  from .Bing_search import *
12
- import g4f
13
11
  from .YTdownloader import *
14
12
  from .Provider import *
15
13
  from .Provider.TTI import *
16
14
  from .Provider.TTS import *
17
- from .Extra import gguf
18
- from .Extra import autollama
19
- from .Extra import weather_ascii, weather
15
+ from .Extra import *
16
+
20
17
  from .Agents import *
21
18
 
22
19
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
23
20
 
24
- webai = [
25
- "leo",
26
- "openai",
27
- "opengpt",
28
- "koboldai",
29
- "gemini",
30
- "phind",
31
- "blackboxai",
32
- "g4fauto",
33
- "perplexity",
34
- "groq",
35
- "reka",
36
- "cohere",
37
- "yepchat",
38
- "you",
39
- "xjai",
40
- "thinkany",
41
- "berlin4h",
42
- "chatgptuk",
43
- "auto",
44
- "poe",
45
- "basedgpt",
46
- "deepseek",
47
- "deepinfra",
48
- "vtlchat",
49
- "geminiflash",
50
- "geminipro",
51
- "ollama",
52
- "andi",
53
- "llama3"
54
- ]
55
-
56
- gpt4free_providers = [
57
- provider.__name__ for provider in g4f.Provider.__providers__ # if provider.working
58
- ]
59
-
60
- available_providers = webai + gpt4free_providers
61
-
62
21
 
63
22
  import logging
64
23
  logging.getLogger("webscout").addHandler(logging.NullHandler())
webscout/tempid.py CHANGED
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import aiohttp
2
3
  from dataclasses import dataclass
3
4
  from bs4 import BeautifulSoup
@@ -33,7 +34,7 @@ class MessageResponseModel:
33
34
  email_to: str | None
34
35
 
35
36
 
36
- class Client:
37
+ class TempMail:
37
38
  def __init__(self):
38
39
  self._session = aiohttp.ClientSession(
39
40
  base_url="https://api.internal.temp-mail.io",
@@ -51,7 +52,7 @@ class Client:
51
52
  async def __aenter__(self):
52
53
  return self
53
54
 
54
- async def __aexit__(self) -> None:
55
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
55
56
  await self.close()
56
57
  return None
57
58
 
@@ -80,76 +81,6 @@ class Client:
80
81
  return [MessageResponseModel(message['attachments'], message['body_html'], message['body_text'], message['cc'], message['created_at'], message['from'], message['id'], message['subject'], message['to']) for message in response_json]
81
82
 
82
83
 
83
- class TemporaryPhoneNumber:
84
- def __init__(self):
85
- self.maxpages = {"UK": 59, "US": 3, "France": 73, "Netherlands": 60, "Finland": 47}
86
- self.minpages = {"UK": 20, "US": 1, "France": 20, "Netherlands": 20, "Finland": 20}
87
- self.plist = {"UK": "+44", "US": "+1", "France": "+33", "Netherlands": "+31", "Finland": "+358"}
88
- self.countries = {"44": "UK", "1": "US", "33": "France", "31": "Netherlands", "358": "Finland"}
89
-
90
- def get_number(self, country="UK"):
91
- if country == "Random":
92
- country = random.choice(list(self.countries.values()))
93
- if country not in self.countries.values():
94
- raise ValueError("Unsupported Country")
95
-
96
- session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
97
- maxpage = self.maxpages[country]
98
- minpage = self.minpages[country]
99
- page = random.randint(minpage, maxpage)
100
-
101
- if page == 1:
102
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number")
103
- else:
104
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/page{page}")
105
-
106
- soup = BeautifulSoup(res.content, "lxml")
107
- numbers = []
108
- p = self.plist[country]
109
- for a in soup.find_all("a"):
110
- a = a.get("title", "none")
111
- if f"{country} Phone Number {p}" in a:
112
- a = a.replace(f"{country} Phone Number ", "").replace(" ", "")
113
- numbers.append(a)
114
- return random.choice(numbers)
115
-
116
- def get_messages(self, number: str):
117
- number = number.replace("+", "")
118
- try:
119
- i = int(number)
120
- except:
121
- raise ValueError("Wrong Number")
122
-
123
- country = None
124
- for key, value in self.countries.items():
125
- if number.startswith(key):
126
- country = value
127
-
128
- if country == None:
129
- raise ValueError("Unsupported Country")
130
-
131
- session = tls_client.Session(client_identifier="chrome112", random_tls_extension_order=True)
132
- res = session.get(f"https://temporary-phone-number.com/{country}-Phone-Number/{number}")
133
-
134
- if res.status_code == 404:
135
- raise ValueError("Number doesn't exist")
136
-
137
- soup = BeautifulSoup(res.content, "lxml")
138
- messages = []
139
- message = {"content": None, "frm": "", "time": ""}
140
-
141
- for div in soup.find_all("div"):
142
- divclass = div.get("class", "None")[0]
143
- if divclass == "direct-chat-info":
144
- message["frm"] = div.text.split("\n")[1].replace("From ", "")
145
- message["time"] = div.text.split("\n")[2]
146
- if divclass == "direct-chat-text":
147
- message["content"] = div.text
148
- messages.append(sms_message(content=message["content"], frm=message["frm"], time=message["time"]))
149
- message = {"content": None, "frm": "", "time": ""}
150
-
151
- return messages
152
-
153
84
  class VNEngine:
154
85
  def __init__(self) -> NoReturn:
155
86
  self.lang: str = "?lang=en"
@@ -198,4 +129,4 @@ class sms_message:
198
129
  def __init__(self, content, frm, time):
199
130
  self.content = content
200
131
  self.frm = frm
201
- self.time = time
132
+ self.time = time
webscout/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = "6.1"
1
+ __version__ = "6.2"
2
2
  __prog__ = "webscout"