webscout 4.3__py3-none-any.whl → 4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

webscout/AIauto.py CHANGED
@@ -1,8 +1,7 @@
1
1
  from webscout.AIbase import Provider, AsyncProvider
2
2
  from webscout.Provider.ThinkAnyAI import ThinkAnyAI
3
3
  from webscout.Provider.Xjai import Xjai
4
- from webscout.Provider.Llama import LLAMA2
5
- from webscout.Provider.Llama import AsyncLLAMA2
4
+ from webscout.Provider.Llama import LLAMA
6
5
  from webscout.Provider.Leo import LEO
7
6
  from webscout.Provider.Leo import AsyncLEO
8
7
  from webscout.Provider.Koboldai import KOBOLDAI
@@ -44,7 +43,7 @@ provider_map: dict[
44
43
  Union[
45
44
  ThinkAnyAI,
46
45
  Xjai,
47
- LLAMA2,
46
+ LLAMA,
48
47
  LEO,
49
48
  KOBOLDAI,
50
49
  OPENGPT,
@@ -69,7 +68,7 @@ provider_map: dict[
69
68
  ] = {
70
69
  "ThinkAnyAI": ThinkAnyAI,
71
70
  "Xjai": Xjai,
72
- "LLAMA2": LLAMA2,
71
+ "LLAMA2": LLAMA,
73
72
  "LEO": LEO,
74
73
  "KOBOLDAI": KOBOLDAI,
75
74
  "OPENGPT": OPENGPT,
@@ -124,7 +123,7 @@ class AUTO(Provider):
124
123
  self.provider: Union[
125
124
  ThinkAnyAI,
126
125
  Xjai,
127
- LLAMA2,
126
+ LLAMA,
128
127
  LEO,
129
128
  KOBOLDAI,
130
129
  OPENGPT,
@@ -353,13 +352,12 @@ class AsyncAUTO(AsyncProvider):
353
352
  AsyncOPENGPT,
354
353
  AsyncKOBOLDAI,
355
354
  AsyncPhindSearch,
356
- AsyncLLAMA2,
357
355
  AsyncBLACKBOXAI,
358
356
  AsyncGPT4FREE,
359
357
  AsyncLEO,
360
358
  ThinkAnyAI,
361
359
  Xjai,
362
- LLAMA2,
360
+ LLAMA,
363
361
  LEO,
364
362
  KOBOLDAI,
365
363
  OPENGPT,
@@ -1,6 +1,14 @@
1
1
  import subprocess
2
2
  import argparse
3
3
  import os
4
+ from rich.console import Console
5
+ from rich.panel import Panel
6
+ from rich.progress import track
7
+ from yaspin import yaspin
8
+ from pyfiglet import figlet_format
9
+ import time
10
+
11
+ console = Console()
4
12
 
5
13
  def autollama(model_path, gguf_file):
6
14
  """Manages models with Ollama using the autollama.sh script.
@@ -9,6 +17,7 @@ def autollama(model_path, gguf_file):
9
17
  model_path (str): The path to the Hugging Face model.
10
18
  gguf_file (str): The name of the GGUF file.
11
19
  """
20
+ console.print(f"[bold green]{figlet_format('Autollama')}[/]\n", justify="center")
12
21
 
13
22
  # Check if autollama.sh exists in the current working directory
14
23
  script_path = os.path.join(os.getcwd(), "autollama.sh")
@@ -172,25 +181,27 @@ echo "Use Ollama run $MODEL_NAME"
172
181
  # Execute the command
173
182
  process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
174
183
 
175
- # Print the output and error in real-time
176
- for line in process.stdout:
177
- print(line, end='')
178
-
179
- for line in process.stderr:
180
- print(line, end='')
184
+ for line in iter(process.stdout.readline, ''):
185
+ console.print(Panel(line.strip(), title="Autollama Output", expand=False))
181
186
 
187
+ for line in iter(process.stderr.readline, ''):
188
+ console.print(Panel(line.strip(), title="Autollama Errors (if any)", expand=False))
189
+
182
190
  process.wait()
191
+ console.print("[green]Model is ready![/]")
183
192
 
184
193
  def main():
185
194
  parser = argparse.ArgumentParser(description='Automatically create and run an Ollama model in Ollama')
186
- parser.add_argument('-m', '--model_path', required=True, help='Set the hunggingface model id to the Hugging Face model')
195
+ parser.add_argument('-m', '--model_path', required=True, help='Set the huggingface model id to the Hugging Face model')
187
196
  parser.add_argument('-g', '--gguf_file', required=True, help='Set the GGUF file name')
188
197
  args = parser.parse_args()
189
198
 
190
199
  try:
191
- autollama(args.model_path, args.gguf_file)
200
+ with yaspin(text="Processing...") as spinner:
201
+ autollama(args.model_path, args.gguf_file)
202
+ spinner.ok("Done!")
192
203
  except Exception as e:
193
- print(f"Error: {e}")
204
+ console.print(f"[red]Error: {e}[/]")
194
205
  exit(1)
195
206
 
196
207
  if __name__ == "__main__":
webscout/Extra/gguf.py CHANGED
@@ -1,7 +1,10 @@
1
1
  # webscout/Extra/gguf.py
2
2
  import subprocess
3
- import argparse
4
3
  import os
4
+ from pyfiglet import figlet_format
5
+ from rich.console import Console
6
+
7
+ console = Console()
5
8
 
6
9
  def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5_k_m"):
7
10
  """Converts and quantizes a Hugging Face model to GGUF format.
@@ -17,6 +20,7 @@ def convert(model_id, username=None, token=None, quantization_methods="q4_k_m,q5
17
20
  ValueError: If an invalid quantization method is provided.
18
21
  """
19
22
 
23
+ console.print(f"[bold green]{figlet_format('GGUF Converter')}[/]\n", justify="center")
20
24
  # List of valid quantization methods
21
25
  valid_methods = [
22
26
  "q2_k", "q3_k_l", "q3_k_m", "q3_k_s",
@@ -220,21 +224,3 @@ echo "Script completed."
220
224
  process.wait()
221
225
 
222
226
 
223
- def main():
224
- parser = argparse.ArgumentParser(description='Convert and quantize model using gguf.sh')
225
- parser.add_argument('-m', '--model_id', required=True, help='Set the HF model ID (e.g., "google/flan-t5-xl")')
226
- parser.add_argument('-u', '--username', help='Set your Hugging Face username (required for uploads)')
227
- parser.add_argument('-t', '--token', help='Set your Hugging Face API token (required for uploads)')
228
- parser.add_argument('-q', '--quantization_methods', default="q4_k_m,q5_k_m",
229
- help='Comma-separated quantization methods (default: q4_k_m,q5_k_m). Valid methods: q2_k, q3_k_l, q3_k_m, q3_k_s, q4_0, q4_1, q4_k_m, q4_k_s, q5_0, q5_1, q5_k_m, q5_k_s, q6_k, q8_0')
230
-
231
- args = parser.parse_args()
232
-
233
- try:
234
- convert(args.model_id, args.username, args.token, args.quantization_methods)
235
- except ValueError as e:
236
- print(e)
237
- exit(1)
238
-
239
- if __name__ == "__main__":
240
- main()
webscout/Extra/weather.py CHANGED
@@ -1,4 +1,10 @@
1
1
  import requests
2
+ from rich.console import Console
3
+ from rich.table import Table
4
+ from yaspin import yaspin
5
+ from pyfiglet import figlet_format
6
+
7
+ console = Console()
2
8
 
3
9
  def get(location):
4
10
  """Fetches weather data for the given location.
@@ -12,7 +18,9 @@ def get(location):
12
18
  """
13
19
  url = f"https://wttr.in/{location}?format=j1"
14
20
 
15
- response = requests.get(url)
21
+ with yaspin(text="Fetching weather data...") as spinner:
22
+ response = requests.get(url)
23
+ spinner.ok("✅ ")
16
24
 
17
25
  if response.status_code == 200:
18
26
  return response.json()
@@ -27,23 +35,33 @@ def print_weather(weather_data):
27
35
  or an error message.
28
36
  """
29
37
  if isinstance(weather_data, str):
30
- print(weather_data)
38
+ console.print(f"[bold red]Error:[/] {weather_data}")
31
39
  return
32
40
 
33
41
  current = weather_data['current_condition'][0]
34
42
  location_name = weather_data['nearest_area'][0]['areaName'][0]['value']
35
43
 
36
- print(f"Weather in {location_name}:")
37
- print(f"Temperature: {current['temp_C']}°C / {current['temp_F']}°F")
38
- print(f"Condition: {current['weatherDesc'][0]['value']}")
39
- print(f"Humidity: {current['humidity']}%")
40
- print(f"Wind: {current['windspeedKmph']} km/h, {current['winddir16Point']}")
44
+ console.print(f"[bold blue]\n{figlet_format('Weather Report')}[/]\n", justify="center")
45
+ console.print(f"[bold green]Weather in {location_name}:[/]\n")
41
46
 
47
+ table = Table(show_header=False, show_lines=True)
48
+ table.add_row("Temperature:", f"{current['temp_C']}°C / {current['temp_F']}°F")
49
+ table.add_row("Condition:", current['weatherDesc'][0]['value'])
50
+ table.add_row("Humidity:", f"{current['humidity']}%")
51
+ table.add_row("Wind:", f"{current['windspeedKmph']} km/h, {current['winddir16Point']}")
52
+ console.print(table)
42
53
 
43
- print("\nForecast:")
54
+ console.print(f"\n[bold green]Forecast:[/]")
55
+ table = Table(show_header=True, header_style="bold cyan")
56
+ table.add_column("Date", style="dim", width=12)
57
+ table.add_column("Temperature Range")
58
+ table.add_column("Description")
59
+
44
60
  for day in weather_data['weather']:
45
61
  date = day['date']
46
62
  max_temp = day['maxtempC']
47
63
  min_temp = day['mintempC']
48
64
  desc = day['hourly'][4]['weatherDesc'][0]['value']
49
- print(f"{date}: {min_temp}°C to {max_temp}°C, {desc}")
65
+ table.add_row(date, f"{min_temp}°C to {max_temp}°C", desc)
66
+ console.print(table)
67
+
@@ -1,5 +1,8 @@
1
1
  import requests
2
+ from rich.console import Console
3
+ from pyfiglet import figlet_format
2
4
 
5
+ console = Console()
3
6
  def get(location):
4
7
  """Fetches ASCII art weather data for the given location.
5
8
  Args:
@@ -9,6 +12,7 @@ def get(location):
9
12
  str: ASCII art weather report if the request is successful,
10
13
  otherwise an error message.
11
14
  """
15
+ console.print(f"[bold green]{figlet_format('Weather')}[/]\n", justify="center")
12
16
  url = f"https://wttr.in/{location}"
13
17
  response = requests.get(url, headers={'User-Agent': 'curl'})
14
18
 
@@ -16,3 +20,4 @@ def get(location):
16
20
  return "\n".join(response.text.splitlines()[:-1])
17
21
  else:
18
22
  return f"Error: Unable to fetch weather data. Status code: {response.status_code}"
23
+
webscout/GoogleS.py ADDED
@@ -0,0 +1,342 @@
1
+ import os
2
+ import random
3
+ import sys
4
+ import time
5
+ import ssl
6
+
7
+ if sys.version_info[0] > 2:
8
+ from http.cookiejar import LWPCookieJar
9
+ from urllib.request import Request, urlopen
10
+ from urllib.parse import quote_plus, urlparse, parse_qs
11
+ else:
12
+ from cookielib import LWPCookieJar
13
+ from urllib import quote_plus
14
+ from urllib2 import Request, urlopen
15
+ from urlparse import urlparse, parse_qs
16
+
17
+ try:
18
+ from bs4 import BeautifulSoup
19
+ is_bs4 = True
20
+ except ImportError:
21
+ from BeautifulSoup import BeautifulSoup # type: ignore
22
+ is_bs4 = False
23
+
24
+ __all__ = [
25
+
26
+ # Main search function.
27
+ 'search',
28
+
29
+ # Shortcut for "get lucky" search.
30
+ 'lucky',
31
+
32
+ # Miscellaneous utility functions.
33
+ 'get_random_user_agent', 'get_tbs',
34
+ ]
35
+
36
+ # URL templates to make Google searches.
37
+ url_home = "https://www.google.%(tld)s/"
38
+ url_search = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
39
+ "btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
40
+ "cr=%(country)s"
41
+ url_next_page = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
42
+ "start=%(start)d&tbs=%(tbs)s&safe=%(safe)s&" \
43
+ "cr=%(country)s"
44
+ url_search_num = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
45
+ "num=%(num)d&btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
46
+ "cr=%(country)s"
47
+ url_next_page_num = "https://www.google.%(tld)s/search?hl=%(lang)s&" \
48
+ "q=%(query)s&num=%(num)d&start=%(start)d&tbs=%(tbs)s&" \
49
+ "safe=%(safe)s&cr=%(country)s"
50
+ url_parameters = (
51
+ 'hl', 'q', 'num', 'btnG', 'start', 'tbs', 'safe', 'cr')
52
+
53
+ # Cookie jar. Stored at the user's home folder.
54
+ # If the cookie jar is inaccessible, the errors are ignored.
55
+ home_folder = os.getenv('HOME')
56
+ if not home_folder:
57
+ home_folder = os.getenv('USERHOME')
58
+ if not home_folder:
59
+ home_folder = '.' # Use the current folder on error.
60
+ cookie_jar = LWPCookieJar(os.path.join(home_folder, '.google-cookie'))
61
+ try:
62
+ cookie_jar.load()
63
+ except Exception:
64
+ pass
65
+
66
+ # Default user agent, unless instructed by the user to change it.
67
+ USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)'
68
+
69
+ try:
70
+ install_folder = os.path.abspath(os.path.split(__file__)[0])
71
+ try:
72
+ user_agents_file = os.path.join(install_folder, 'user_agents.txt.gz')
73
+ import gzip
74
+ fp = gzip.open(user_agents_file, 'rb')
75
+ try:
76
+ user_agents_list = [_.strip() for _ in fp.readlines()]
77
+ finally:
78
+ fp.close()
79
+ del fp
80
+ except Exception:
81
+ user_agents_file = os.path.join(install_folder, 'user_agents.txt')
82
+ with open(user_agents_file) as fp:
83
+ user_agents_list = [_.strip() for _ in fp.readlines()]
84
+ except Exception:
85
+ user_agents_list = [USER_AGENT]
86
+
87
+
88
+ # Get a random user agent.
89
+ def get_random_user_agent():
90
+ """
91
+ Get a random user agent string.
92
+
93
+ :rtype: str
94
+ :return: Random user agent string.
95
+ """
96
+ return random.choice(user_agents_list)
97
+
98
+
99
+ # Helper function to format the tbs parameter.
100
+ def get_tbs(from_date, to_date):
101
+ """
102
+ Helper function to format the tbs parameter.
103
+
104
+ :param datetime.date from_date: Python date object.
105
+ :param datetime.date to_date: Python date object.
106
+
107
+ :rtype: str
108
+ :return: Dates encoded in tbs format.
109
+ """
110
+ from_date = from_date.strftime('%m/%d/%Y')
111
+ to_date = to_date.strftime('%m/%d/%Y')
112
+ return 'cdr:1,cd_min:%(from_date)s,cd_max:%(to_date)s' % vars()
113
+
114
+
115
+ # Request the given URL and return the response page, using the cookie jar.
116
+ # If the cookie jar is inaccessible, the errors are ignored.
117
+ def get_page(url, user_agent=None, verify_ssl=True):
118
+ """
119
+ Request the given URL and return the response page, using the cookie jar.
120
+
121
+ :param str url: URL to retrieve.
122
+ :param str user_agent: User agent for the HTTP requests.
123
+ Use None for the default.
124
+ :param bool verify_ssl: Verify the SSL certificate to prevent
125
+ traffic interception attacks. Defaults to True.
126
+
127
+ :rtype: str
128
+ :return: Web page retrieved for the given URL.
129
+
130
+ :raises IOError: An exception is raised on error.
131
+ :raises urllib2.URLError: An exception is raised on error.
132
+ :raises urllib2.HTTPError: An exception is raised on error.
133
+ """
134
+ if user_agent is None:
135
+ user_agent = USER_AGENT
136
+ request = Request(url)
137
+ request.add_header('User-Agent', user_agent)
138
+ cookie_jar.add_cookie_header(request)
139
+ if verify_ssl:
140
+ response = urlopen(request)
141
+ else:
142
+ context = ssl._create_unverified_context()
143
+ response = urlopen(request, context=context)
144
+ cookie_jar.extract_cookies(response, request)
145
+ html = response.read()
146
+ response.close()
147
+ try:
148
+ cookie_jar.save()
149
+ except Exception:
150
+ pass
151
+ return html
152
+
153
+
154
+ # Filter links found in the Google result pages HTML code.
155
+ # Returns None if the link doesn't yield a valid result.
156
+ def filter_result(link):
157
+ try:
158
+
159
+ # Decode hidden URLs.
160
+ if link.startswith('/url?'):
161
+ o = urlparse(link, 'http')
162
+ link = parse_qs(o.query)['q'][0]
163
+
164
+ # Valid results are absolute URLs not pointing to a Google domain,
165
+ # like images.google.com or googleusercontent.com for example.
166
+ # TODO this could be improved!
167
+ o = urlparse(link, 'http')
168
+ if o.netloc and 'google' not in o.netloc:
169
+ return link
170
+
171
+ # On error, return None.
172
+ except Exception:
173
+ pass
174
+
175
+
176
+ # Returns a generator that yields URLs.
177
+ def search(query, tld='com', lang='en', tbs='0', safe='off', num=10, start=0,
178
+ stop=None, pause=2.0, country='', extra_params=None,
179
+ user_agent=None, verify_ssl=True):
180
+ """
181
+ Search the given query string using Google.
182
+
183
+ :param str query: Query string. Must NOT be url-encoded.
184
+ :param str tld: Top level domain.
185
+ :param str lang: Language.
186
+ :param str tbs: Time limits (i.e "qdr:h" => last hour,
187
+ "qdr:d" => last 24 hours, "qdr:m" => last month).
188
+ :param str safe: Safe search.
189
+ :param int num: Number of results per page.
190
+ :param int start: First result to retrieve.
191
+ :param int stop: Last result to retrieve.
192
+ Use None to keep searching forever.
193
+ :param float pause: Lapse to wait between HTTP requests.
194
+ A lapse too long will make the search slow, but a lapse too short may
195
+ cause Google to block your IP. Your mileage may vary!
196
+ :param str country: Country or region to focus the search on. Similar to
197
+ changing the TLD, but does not yield exactly the same results.
198
+ Only Google knows why...
199
+ :param dict extra_params: A dictionary of extra HTTP GET
200
+ parameters, which must be URL encoded. For example if you don't want
201
+ Google to filter similar results you can set the extra_params to
202
+ {'filter': '0'} which will append '&filter=0' to every query.
203
+ :param str user_agent: User agent for the HTTP requests.
204
+ Use None for the default.
205
+ :param bool verify_ssl: Verify the SSL certificate to prevent
206
+ traffic interception attacks. Defaults to True.
207
+
208
+ :rtype: generator of str
209
+ :return: Generator (iterator) that yields found URLs.
210
+ If the stop parameter is None the iterator will loop forever.
211
+ """
212
+ # Set of hashes for the results found.
213
+ # This is used to avoid repeated results.
214
+ hashes = set()
215
+
216
+ # Count the number of links yielded.
217
+ count = 0
218
+
219
+ # Prepare the search string.
220
+ query = quote_plus(query)
221
+
222
+ # If no extra_params is given, create an empty dictionary.
223
+ # We should avoid using an empty dictionary as a default value
224
+ # in a function parameter in Python.
225
+ if not extra_params:
226
+ extra_params = {}
227
+
228
+ # Check extra_params for overlapping.
229
+ for builtin_param in url_parameters:
230
+ if builtin_param in extra_params.keys():
231
+ raise ValueError(
232
+ 'GET parameter "%s" is overlapping with \
233
+ the built-in GET parameter',
234
+ builtin_param
235
+ )
236
+
237
+ # Grab the cookie from the home page.
238
+ get_page(url_home % vars(), user_agent, verify_ssl)
239
+
240
+ # Prepare the URL of the first request.
241
+ if start:
242
+ if num == 10:
243
+ url = url_next_page % vars()
244
+ else:
245
+ url = url_next_page_num % vars()
246
+ else:
247
+ if num == 10:
248
+ url = url_search % vars()
249
+ else:
250
+ url = url_search_num % vars()
251
+
252
+ # Loop until we reach the maximum result, if any (otherwise, loop forever).
253
+ while not stop or count < stop:
254
+
255
+ # Remeber last count to detect the end of results.
256
+ last_count = count
257
+
258
+ # Append extra GET parameters to the URL.
259
+ # This is done on every iteration because we're
260
+ # rebuilding the entire URL at the end of this loop.
261
+ for k, v in extra_params.items():
262
+ k = quote_plus(k)
263
+ v = quote_plus(v)
264
+ url = url + ('&%s=%s' % (k, v))
265
+
266
+ # Sleep between requests.
267
+ # Keeps Google from banning you for making too many requests.
268
+ time.sleep(pause)
269
+
270
+ # Request the Google Search results page.
271
+ html = get_page(url, user_agent, verify_ssl)
272
+
273
+ # Parse the response and get every anchored URL.
274
+ if is_bs4:
275
+ soup = BeautifulSoup(html, 'html.parser')
276
+ else:
277
+ soup = BeautifulSoup(html)
278
+ try:
279
+ anchors = soup.find(id='search').findAll('a')
280
+ # Sometimes (depending on the User-agent) there is
281
+ # no id "search" in html response...
282
+ except AttributeError:
283
+ # Remove links of the top bar.
284
+ gbar = soup.find(id='gbar')
285
+ if gbar:
286
+ gbar.clear()
287
+ anchors = soup.findAll('a')
288
+
289
+ # Process every anchored URL.
290
+ for a in anchors:
291
+
292
+ # Get the URL from the anchor tag.
293
+ try:
294
+ link = a['href']
295
+ except KeyError:
296
+ continue
297
+
298
+ # Filter invalid links and links pointing to Google itself.
299
+ link = filter_result(link)
300
+ if not link:
301
+ continue
302
+
303
+ # Discard repeated results.
304
+ h = hash(link)
305
+ if h in hashes:
306
+ continue
307
+ hashes.add(h)
308
+
309
+ # Yield the result.
310
+ yield link
311
+
312
+ # Increase the results counter.
313
+ # If we reached the limit, stop.
314
+ count += 1
315
+ if stop and count >= stop:
316
+ return
317
+
318
+ # End if there are no more results.
319
+ # XXX TODO review this logic, not sure if this is still true!
320
+ if last_count == count:
321
+ break
322
+
323
+ # Prepare the URL for the next request.
324
+ start += num
325
+ if num == 10:
326
+ url = url_next_page % vars()
327
+ else:
328
+ url = url_next_page_num % vars()
329
+
330
+
331
+ # Shortcut to single-item search.
332
+ # Evaluates the iterator to return the single URL as a string.
333
+ def lucky(*args, **kwargs):
334
+ """
335
+ Shortcut to single-item search.
336
+
337
+ Same arguments as the main search function, but the return value changes.
338
+
339
+ :rtype: str
340
+ :return: URL found by Google.
341
+ """
342
+ return next(search(*args, **kwargs))
webscout/__init__.py CHANGED
@@ -5,16 +5,16 @@ from .DWEBS import *
5
5
  from .transcriber import transcriber
6
6
  from .voice import play_audio
7
7
  from .websx_search import WEBSX
8
-
9
8
  from .LLM import VLM, LLM
10
9
  from .YTdownloader import *
11
- # from .Local import *
10
+ from .GoogleS import *
12
11
  import g4f
13
12
  from .YTdownloader import *
14
13
  from .Provider import *
15
14
  from .Extra import gguf
16
15
  from .Extra import autollama
17
16
  from .Extra import weather_ascii, weather
17
+
18
18
  __repo__ = "https://github.com/OE-LUCIFER/Webscout"
19
19
 
20
20
  webai = [
@@ -58,12 +58,15 @@ __all__ = [
58
58
  "WEBS",
59
59
  "AsyncWEBS",
60
60
  "__version__",
61
- "DeepWEBS",
61
+ "DWEBS",
62
62
  "transcriber",
63
63
  "play_audio",
64
64
  "TempMailClient",
65
65
  "TemporaryPhoneNumber",
66
66
  "LLM",
67
+ "YTdownloader",
68
+ "WEBSX",
69
+ "VLM",
67
70
  # Localai models and utilities
68
71
  # "Model",
69
72
  # "Thread",
webscout/cli.py CHANGED
@@ -20,8 +20,10 @@ from rich.table import Table
20
20
  from rich.style import Style
21
21
  from rich.text import Text
22
22
  from rich.align import Align
23
- from rich.progress import track
23
+ from rich.progress import track, Progress
24
24
  from rich.prompt import Prompt, Confirm
25
+ from rich.columns import Columns
26
+ from pyfiglet import figlet_format
25
27
 
26
28
  logger = logging.getLogger(__name__)
27
29
 
@@ -45,13 +47,12 @@ COLORS = {
45
47
  }
46
48
 
47
49
  def _print_data(data):
48
- """Prints data using rich panels and markdown, asynchronously."""
50
+ """Prints data using rich panels and markdown."""
49
51
  console = Console()
50
52
  if data:
51
53
  for i, e in enumerate(data, start=1):
52
- # Create a table for each result
53
- table = Table(title=f"{i}.", show_lines=True)
54
- table.add_column("Key", style="cyan", no_wrap=True)
54
+ table = Table(show_header=False, show_lines=True, expand=True, box=None) # Removed duplicate title
55
+ table.add_column("Key", style="cyan", no_wrap=True, width=15)
55
56
  table.add_column("Value", style="white")
56
57
 
57
58
  for j, (k, v) in enumerate(e.items(), start=1):
@@ -59,21 +60,22 @@ def _print_data(data):
59
60
  width = 300 if k in ("content", "href", "image", "source", "thumbnail", "url") else 78
60
61
  k = "language" if k == "detected_language" else k
61
62
  text = click.wrap_text(
62
- f"{v}", width=width, initial_indent="", subsequent_indent=" " * 12, preserve_paragraphs=True
63
- )
63
+ f"{v}", width=width, initial_indent="", subsequent_indent=" " * 18, preserve_paragraphs=True
64
+ ).replace("\n", "\n\n")
64
65
  else:
65
66
  text = v
66
67
  table.add_row(k, text)
67
68
 
68
- # Wrap the table in a panel with a title
69
- console.print(Panel(Align(table, align="left"), title=f"Result {i}", expand=False))
70
- console.print("\n")
69
+ # Only the Panel has the title now
70
+ console.print(Panel(table, title=f"Result {i}", expand=False, style="green on black"))
71
+ console.print("\n")
72
+
71
73
 
72
74
  def _sanitize_keywords(keywords):
73
75
  """Sanitizes keywords for file names and paths. Removes invalid characters like ':'. """
74
76
  keywords = (
75
77
  keywords.replace("filetype", "")
76
- .replace(":", "") # Remove colons
78
+ .replace(":", "")
77
79
  .replace('"', "'")
78
80
  .replace("site", "")
79
81
  .replace(" ", "_")
@@ -86,8 +88,8 @@ def _sanitize_keywords(keywords):
86
88
  @click.group(chain=True)
87
89
  def cli():
88
90
  """webscout CLI tool - Search the web with a rich UI."""
89
- pass
90
-
91
+ console = Console()
92
+ console.print(f"[bold blue]{figlet_format('Webscout')}[/]\n", justify="center")
91
93
 
92
94
  def safe_entry_point():
93
95
  try:
@@ -100,7 +102,7 @@ def safe_entry_point():
100
102
  def version():
101
103
  """Shows the current version of webscout."""
102
104
  console = Console()
103
- console.print(Panel(Text(f"webscout v{__version__}", style="cyan"), title="Version"))
105
+ console.print(Panel(Text(f"webscout v{__version__}", style="cyan"), title="Version", expand=False))
104
106
 
105
107
 
106
108
  @cli.command()
@@ -111,16 +113,15 @@ def chat(proxy):
111
113
  client = WEBS(proxy=proxy)
112
114
 
113
115
  console = Console()
114
- console.print(Panel(Text("Available AI Models:", style="cyan"), title="DuckDuckGo AI Chat"))
115
- for idx, model in enumerate(models, start=1):
116
- console.print(f"{idx}. {model}")
117
- chosen_model_idx = Prompt.ask("Choose a model by entering its number [1]", choices=[str(i) for i in range(1, len(models) + 1)], default="1")
116
+ console.print(Panel(Text("Available AI Models:", style="cyan"), title="DuckDuckGo AI Chat", expand=False))
117
+ console.print(Columns([Panel(Text(model, justify="center"), expand=True) for model in models]))
118
+ chosen_model_idx = Prompt.ask("[bold cyan]Choose a model by entering its number[/] [1]", choices=[str(i) for i in range(1, len(models) + 1)], default="1")
118
119
  chosen_model_idx = int(chosen_model_idx) - 1
119
120
  model = models[chosen_model_idx]
120
- console.print(f"Using model: {model}")
121
+ console.print(f"[bold green]Using model:[/] {model}")
121
122
 
122
123
  while True:
123
- user_input = input(f"{'-'*78}\nYou: ")
124
+ user_input = Prompt.ask(f"{'-'*78}\n[bold blue]You:[/]")
124
125
  if not user_input.strip():
125
126
  break
126
127
 
@@ -129,7 +130,7 @@ def chat(proxy):
129
130
  console.print(Panel(Text(f"AI: {text}", style="green"), title="AI Response"))
130
131
 
131
132
  if "exit" in user_input.lower() or "quit" in user_input.lower():
132
- console.print(Panel(Text("Exiting chat session.", style="cyan"), title="Goodbye"))
133
+ console.print(Panel(Text("Exiting chat session.", style="cyan"), title="Goodbye", expand=False))
133
134
  break
134
135
 
135
136
 
webscout/utils.py CHANGED
@@ -4,23 +4,33 @@ from html import unescape
4
4
  from math import atan2, cos, radians, sin, sqrt
5
5
  from typing import Any, Dict, List, Union
6
6
  from urllib.parse import unquote
7
- import orjson
8
7
 
9
8
  from .exceptions import WebscoutE
10
9
 
10
+ try:
11
+ HAS_ORJSON = True
12
+ import orjson
13
+ except ImportError:
14
+ HAS_ORJSON = False
15
+ import json
16
+
11
17
  REGEX_STRIP_TAGS = re.compile("<.*?>")
12
18
 
13
19
 
14
20
  def json_dumps(obj: Any) -> str:
15
21
  try:
16
- return orjson.dumps(obj).decode("utf-8")
22
+ return (
23
+ orjson.dumps(obj, option=orjson.OPT_INDENT_2).decode()
24
+ if HAS_ORJSON
25
+ else json.dumps(obj, ensure_ascii=False, indent=2)
26
+ )
17
27
  except Exception as ex:
18
28
  raise WebscoutE(f"{type(ex).__name__}: {ex}") from ex
19
29
 
20
30
 
21
31
  def json_loads(obj: Union[str, bytes]) -> Any:
22
32
  try:
23
- return orjson.loads(obj)
33
+ return orjson.loads(obj) if HAS_ORJSON else json.loads(obj)
24
34
  except Exception as ex:
25
35
  raise WebscoutE(f"{type(ex).__name__}: {ex}") from ex
26
36
 
webscout/webai.py CHANGED
@@ -37,11 +37,13 @@ from dotenv import load_dotenv
37
37
  import g4f
38
38
  import webscout
39
39
  import webscout.AIutel
40
+ from pyfiglet import figlet_format
40
41
 
41
42
  init_colorama(autoreset=True)
42
43
 
43
44
  load_dotenv() # loads .env variables
44
45
 
46
+ console = Console()
45
47
  logging.basicConfig(
46
48
  format="%(asctime)s - %(levelname)s : %(message)s ",
47
49
  datefmt="%H:%M:%S",
@@ -2606,6 +2608,7 @@ def make_commands():
2606
2608
  # @this.handle_exception
2607
2609
  def main(*args):
2608
2610
  """Fireup console programmically"""
2611
+ console.print(f"[bold green]{figlet_format('WebAI')}[/]\n", justify="center")
2609
2612
  sys.argv += list(args)
2610
2613
  args = sys.argv
2611
2614
  if len(args) == 1:
@@ -5,11 +5,14 @@ from datetime import datetime, timezone
5
5
  from decimal import Decimal
6
6
  from functools import cached_property
7
7
  from itertools import cycle, islice
8
+ from random import choice
8
9
  from threading import Event
9
10
  from types import TracebackType
10
11
  from typing import Dict, List, Optional, Tuple, Type, Union, cast
11
12
 
12
- import pyreqwest_impersonate as pri # type: ignore
13
+ import pyreqwest_impersonate as pri
14
+
15
+ from .utils import _calculate_distance, _extract_vqd, _normalize, _normalize_url, _text_extract_json, json_loads # type: ignore
13
16
 
14
17
  try:
15
18
  from lxml.etree import _Element
@@ -20,15 +23,8 @@ try:
20
23
  except ImportError:
21
24
  LXML_AVAILABLE = False
22
25
 
23
- from .exceptions import WebscoutE, RatelimitE, TimeoutE
24
- from .utils import (
25
- _calculate_distance,
26
- _extract_vqd,
27
- _normalize,
28
- _normalize_url,
29
- _text_extract_json,
30
- json_loads,
31
- )
26
+ from .exceptions import *
27
+
32
28
 
33
29
  logger = logging.getLogger("webscout.WEBS")
34
30
 
@@ -37,6 +33,15 @@ class WEBS:
37
33
  """webscout class to get search results from duckduckgo.com."""
38
34
 
39
35
  _executor: ThreadPoolExecutor = ThreadPoolExecutor()
36
+ _impersonates = (
37
+ "chrome_99", "chrome_100", "chrome_101", "chrome_104", "chrome_105", "chrome_106", "chrome_108",
38
+ "chrome_107", "chrome_109", "chrome_114", "chrome_116", "chrome_117", "chrome_118", "chrome_119",
39
+ "chrome_120", #"chrome_123", "chrome_124", "chrome_126",
40
+ "safari_ios_16.5", "safari_ios_17.2", "safari_ios_17.4.1", "safari_15.3", "safari_15.5",
41
+ "safari_15.6.1", "safari_16", "safari_16.5", "safari_17.2.1", "safari_17.4.1", "safari_17.5",
42
+ #"okhttp_3.9", "okhttp_3.11", "okhttp_3.13", "okhttp_3.14", "okhttp_4.9", "okhttp_4.10", "okhttp_5",
43
+ "edge_99", "edge_101", "edge_122",
44
+ ) # fmt: skip
40
45
 
41
46
  def __init__(
42
47
  self,
@@ -66,7 +71,7 @@ class WEBS:
66
71
  timeout=timeout,
67
72
  cookie_store=True,
68
73
  referer=True,
69
- impersonate="chrome_124",
74
+ impersonate=choice(self._impersonates),
70
75
  follow_redirects=False,
71
76
  verify=False,
72
77
  )
@@ -120,13 +125,14 @@ class WEBS:
120
125
  resp_content = self._get_url("POST", "https://duckduckgo.com", data={"q": keywords})
121
126
  return _extract_vqd(resp_content, keywords)
122
127
 
123
- def chat(self, keywords: str, model: str = "gpt-3.5") -> str:
128
+ def chat(self, keywords: str, model: str = "gpt-3.5", timeout: int = 20) -> str:
124
129
  """Initiates a chat session with DuckDuckGo AI.
125
130
 
126
131
  Args:
127
132
  keywords (str): The initial message or question to send to the AI.
128
133
  model (str): The model to use: "gpt-3.5", "claude-3-haiku", "llama-3-70b", "mixtral-8x7b".
129
134
  Defaults to "gpt-3.5".
135
+ timeout (int): Timeout value for the HTTP client. Defaults to 20.
130
136
 
131
137
  Returns:
132
138
  str: The response from the AI.
@@ -149,18 +155,16 @@ class WEBS:
149
155
  "messages": self._chat_messages,
150
156
  }
151
157
  resp = self.client.post(
152
- "https://duckduckgo.com/duckchat/v1/chat", headers={"x-vqd-4": self._chat_vqd}, json=json_data
158
+ "https://duckduckgo.com/duckchat/v1/chat",
159
+ headers={"x-vqd-4": self._chat_vqd},
160
+ json=json_data,
161
+ timeout=timeout,
153
162
  )
154
163
  self._chat_vqd = resp.headers.get("x-vqd-4", "")
155
164
 
156
- messages = []
157
- for line in resp.text.replace("data: ", "").replace("[DONE]", "").split("\n\n"):
158
- x = line.strip()
159
- if x:
160
- j = json_loads(x)
161
- message = j.get("message", "")
162
- messages.append(message)
163
- result = "".join(messages)
165
+ data = ",".join(x for line in resp.text.rstrip("[DONE]\n").split("data:") if (x := line.strip()))
166
+ result = "".join(x.get("message", "") for x in json_loads("[" + data + "]"))
167
+
164
168
  self._chat_messages.append({"role": "assistant", "content": result})
165
169
  return result
166
170
 
@@ -347,7 +351,7 @@ class WEBS:
347
351
  for e in elements:
348
352
  if isinstance(e, _Element):
349
353
  hrefxpath = e.xpath("./a/@href")
350
- href = str(hrefxpath[0]) if isinstance(hrefxpath, List) else None
354
+ href = str(hrefxpath[0]) if hrefxpath and isinstance(hrefxpath, List) else None
351
355
  if (
352
356
  href
353
357
  and href not in cache
@@ -357,9 +361,9 @@ class WEBS:
357
361
  ):
358
362
  cache.add(href)
359
363
  titlexpath = e.xpath("./h2/a/text()")
360
- title = str(titlexpath[0]) if isinstance(titlexpath, List) else ""
364
+ title = str(titlexpath[0]) if titlexpath and isinstance(titlexpath, List) else ""
361
365
  bodyxpath = e.xpath("./a//text()")
362
- body = "".join(str(x) for x in bodyxpath) if isinstance(bodyxpath, List) else ""
366
+ body = "".join(str(x) for x in bodyxpath) if bodyxpath and isinstance(bodyxpath, List) else ""
363
367
  result = {
364
368
  "title": _normalize(title),
365
369
  "href": _normalize_url(href),
@@ -449,10 +453,14 @@ class WEBS:
449
453
  else:
450
454
  cache.add(href)
451
455
  titlexpath = e.xpath(".//a//text()")
452
- title = str(titlexpath[0]) if isinstance(titlexpath, List) else ""
456
+ title = str(titlexpath[0]) if titlexpath and isinstance(titlexpath, List) else ""
453
457
  elif i == 2:
454
458
  bodyxpath = e.xpath(".//td[@class='result-snippet']//text()")
455
- body = "".join(str(x) for x in bodyxpath) if isinstance(bodyxpath, List) else ""
459
+ body = (
460
+ "".join(str(x) for x in bodyxpath).strip()
461
+ if bodyxpath and isinstance(bodyxpath, List)
462
+ else ""
463
+ )
456
464
  if href:
457
465
  result = {
458
466
  "title": _normalize(title),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: webscout
3
- Version: 4.3
3
+ Version: 4.4
4
4
  Summary: Search for anything using Google, DuckDuckGo, brave, qwant, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
5
5
  Author: OEvortex
6
6
  Author-email: helpingai5@gmail.com
@@ -34,6 +34,7 @@ Requires-Dist: tqdm
34
34
  Requires-Dist: webdriver-manager
35
35
  Requires-Dist: halo >=0.0.31
36
36
  Requires-Dist: g4f >=0.2.2.3
37
+ Requires-Dist: g4f[webdriver]
37
38
  Requires-Dist: rich
38
39
  Requires-Dist: python-dotenv
39
40
  Requires-Dist: beautifulsoup4
@@ -55,6 +56,11 @@ Requires-Dist: playsound
55
56
  Requires-Dist: poe-api-wrapper
56
57
  Requires-Dist: pyreqwest-impersonate
57
58
  Requires-Dist: ballyregan
59
+ Requires-Dist: nodriver
60
+ Requires-Dist: PyExecJS
61
+ Requires-Dist: ollama
62
+ Requires-Dist: pyfiglet
63
+ Requires-Dist: yaspin
58
64
  Provides-Extra: dev
59
65
  Requires-Dist: ruff >=0.1.6 ; extra == 'dev'
60
66
  Requires-Dist: pytest >=7.4.2 ; extra == 'dev'
@@ -1,30 +1,31 @@
1
- webscout/AIauto.py,sha256=5ZMoS39Tyy1AZS6s_bgVnng-x9CmvHhWWNB4QMB5v9U,20003
1
+ webscout/AIauto.py,sha256=gC01wLPpnqONf9DwKqkmbC_gIWo5Lh5V8YPu4OmYnhE,19923
2
2
  webscout/AIbase.py,sha256=GoHbN8r0gq2saYRZv6LA-Fr9Jlcjv80STKFXUq2ZeGU,4710
3
3
  webscout/AIutel.py,sha256=1NQAchS2e6c1SrIq0efsVtX3ANZ5XI1hjKVHGpJG7OU,34076
4
4
  webscout/DWEBS.py,sha256=QLuT1IKu0lnwdl7W6c-ctBAO7Jj0Zk3PYm6-13BC7rU,25740
5
+ webscout/GoogleS.py,sha256=dW_iArNTyFT5MWBEI1HQvqf-Noj3uJeJA_Eods8D4ms,11587
5
6
  webscout/LLM.py,sha256=LbGCZdJf8A5dwfoGS4tyy39tAh5BDdhMZP0ScKaaQfU,4184
6
7
  webscout/YTdownloader.py,sha256=uWpUWnw9pxeEGw9KJ_3XDyQ5gd38gH1dJpr-HJo4vzU,39144
7
- webscout/__init__.py,sha256=DX52bX0RKkXgKAWohQRyBKNdiamZmp2aQuTpsD5ohbY,2216
8
+ webscout/__init__.py,sha256=teSwl1Gx50AfNu7OibwZrltsErbRDcUuD7W5oAjIc7M,2257
8
9
  webscout/__main__.py,sha256=ZtTRgsRjUi2JOvYFLF1ZCh55Sdoz94I-BS-TlJC7WDU,126
9
10
  webscout/async_providers.py,sha256=MRj0klEhBYVQXnzZGG_15d0e-TPA0nOc2nn735H-wR4,622
10
- webscout/cli.py,sha256=EDxqTmcIshvhg9P0n2ZPaApj2-MEFY3uawS92zbBV_s,14705
11
+ webscout/cli.py,sha256=RlBKeS9CSIsiBMqlzxevWtKjbY9htkZvA7J0bM_hHE8,14999
11
12
  webscout/exceptions.py,sha256=YtIs-vXBwcjbt9TZ_wB7yI0dO7ANYIZAmEEeLmoQ2fI,487
12
13
  webscout/g4f.py,sha256=NNcnlOtIWV9R93UsBN4jBGBEJ9sJ-Np1WbgjkGVDcYc,24487
13
14
  webscout/models.py,sha256=5iQIdtedT18YuTZ3npoG7kLMwcrKwhQ7928dl_7qZW0,692
14
15
  webscout/tempid.py,sha256=5oc3UbXhPGKxrMRTfRABT-V-dNzH_hOKWtLYM6iCWd4,5896
15
16
  webscout/transcriber.py,sha256=EddvTSq7dPJ42V3pQVnGuEiYQ7WjJ9uyeR9kMSxN7uY,20622
16
- webscout/utils.py,sha256=CxeXvp0rWIulUrEaPZMaNfg_tSuQLRSV8uuHA2chyKE,2603
17
+ webscout/utils.py,sha256=2O8_lftBKsv5OEvVaXCN-h0sipup0m3jxzhFdWQrdY8,2873
17
18
  webscout/version.py,sha256=Pp5thQN3CvwDpubKz9MHn-UvDhuocamnBfB2VckwBGI,44
18
19
  webscout/voice.py,sha256=0QjXTHAQmCK07IDZXRc7JXem47cnPJH7u3X0sVP1-UQ,967
19
- webscout/webai.py,sha256=LPn9XKvc5SLxJ68slMsPUXxzkzfa4b0kzsiJyWs-yq0,88897
20
- webscout/webscout_search.py,sha256=lFAot1-Qil_YfXieeLakDVDEX8Ckcima4ueXdOYwiMc,42804
20
+ webscout/webai.py,sha256=32mRZqAGCOSCeyfAMzqac-94r2RD5_SWUxfh4QjZ95M,89037
21
+ webscout/webscout_search.py,sha256=lYrsPVB4QdSGJl4zehvSmGxK61xBAZ2dZGCmuN3ar2w,43686
21
22
  webscout/webscout_search_async.py,sha256=dooKGwLm0cwTml55Vy6NHPPY-nymEqX2h8laX94Zg5A,14537
22
23
  webscout/websx_search.py,sha256=n-qVwiHozJEF-GFRPcAfh4k1d_tscTmDe1dNL-1ngcU,12094
23
24
  webscout/Extra/__init__.py,sha256=GG1qUwS-HspT4TeeAIT4qFpM8PaO1ZdQhpelctaM7Rs,99
24
- webscout/Extra/autollama.py,sha256=DDdnb1tKEZWJaADVn9GXTZkMSwLKCcUGIjMKNlOBtK8,5419
25
- webscout/Extra/gguf.py,sha256=5zTNE5HxM_VQ5ONoocL8GG5fRXrgyLdEEjNzndG0oUw,7811
26
- webscout/Extra/weather.py,sha256=ocGwJYp5B9FwVWvIZ9wtoJTQsPFt64Vt8TitxJcdvAU,1687
27
- webscout/Extra/weather_ascii.py,sha256=sy6EEh2kN1CO1hKda8chD-mVCxH4p0NHyP7Uxr0-rgo,630
25
+ webscout/Extra/autollama.py,sha256=8lyodIWAgJABzlMMHytlolPCgvUKh8ynkZD6MMEltXs,5970
26
+ webscout/Extra/gguf.py,sha256=3QzQIClcVoHyAeb60xxv4msJudC2Maf41StdbzAq1bk,7009
27
+ webscout/Extra/weather.py,sha256=wdSrQxZRpbNfyaux0BeLdaDWyde5KwxZjSUM13820X0,2460
28
+ webscout/Extra/weather_ascii.py,sha256=Aed-_EUzvTEjBXbOpNRxkJBLa6fXsclknXP06HnQD18,808
28
29
  webscout/Local/__init__.py,sha256=RN6klpbabPGNX2YzPm_hdeUcQvieUwvJt22uAO2RKSM,238
29
30
  webscout/Local/_version.py,sha256=yH-h9AKl_KbJwMWeq0PDDOVI2FQ9NutjLDqcCGuAQ6I,83
30
31
  webscout/Local/formats.py,sha256=BiZZSoN3e8S6-S-ykBL9ogSUs0vK11GaZ3ghc9U8GRk,18994
@@ -61,9 +62,9 @@ webscout/Provider/Xjai.py,sha256=BIlk2ouz9Kh_0Gg9hPvTqhI7XtcmWdg5vHSX_4uGrIs,903
61
62
  webscout/Provider/Yepchat.py,sha256=2Eit-A7w1ph1GQKNQuur_yaDzI64r0yBGxCIjDefJxQ,19875
62
63
  webscout/Provider/Youchat.py,sha256=fhMpt94pIPE_XDbC4z9xyfgA7NbkNE2wlRFJabsjv90,8069
63
64
  webscout/Provider/__init__.py,sha256=j6lZqjLYext2a-KTnvGEvVm-D3jezHIlnanlj2H37FI,1962
64
- webscout-4.3.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
65
- webscout-4.3.dist-info/METADATA,sha256=Wh2IMCZhNgKcxsOqGNPriPzrEYoQ4uWfLakOnteemsc,57597
66
- webscout-4.3.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
67
- webscout-4.3.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
68
- webscout-4.3.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
69
- webscout-4.3.dist-info/RECORD,,
65
+ webscout-4.4.dist-info/LICENSE.md,sha256=9P0imsudI7MEvZe2pOcg8rKBn6E5FGHQ-riYozZI-Bk,2942
66
+ webscout-4.4.dist-info/METADATA,sha256=bgSEbiMKbSplv_CNKFCFWvY9Mp44VyxuuWpBJSxIKgQ,57749
67
+ webscout-4.4.dist-info/WHEEL,sha256=cpQTJ5IWu9CdaPViMhC9YzF8gZuS5-vlfoFihTBC86A,91
68
+ webscout-4.4.dist-info/entry_points.txt,sha256=Hh4YIIjvkqB9SVxZ2ri4DZUkgEu_WF_5_r_nZDIvfG8,73
69
+ webscout-4.4.dist-info/top_level.txt,sha256=nYIw7OKBQDr_Z33IzZUKidRD3zQEo8jOJYkMVMeN334,9
70
+ webscout-4.4.dist-info/RECORD,,
File without changes