abstract-webtools 0.1.6.144__py3-none-any.whl → 0.1.6.145__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,6 +10,7 @@ from .sslManager import *
10
10
  from .tlsAdapter import *
11
11
  from .urlManager import *
12
12
  from .userAgentManager import *
13
- from .seleniumManager import *
13
+ from .seleneumManager import *
14
14
  from .videoDownloader import *
15
15
  from .middleManager import *
16
+ seleniumManager = seleneumManager
@@ -328,10 +328,11 @@ class requestManager:
328
328
 
329
329
  def make_request(self):
330
330
  """
331
- Make a request and handle potential errors.
331
+ Make a request and handle potential errors, with retries.
332
332
  """
333
333
  if self.url_mgr.url is None:
334
334
  return None
335
+
335
336
  self.wait_between_requests()
336
337
  for _ in range(self.max_retries):
337
338
  try:
@@ -345,41 +346,52 @@ class requestManager:
345
346
  elif self._response.status_code == 429:
346
347
  logging.warning(f"Rate limited by {self.url_mgr.url}. Retrying...")
347
348
  time.sleep(5)
349
+ else:
350
+ # String/bytes from Selenium path
351
+ self.status_code = 200
352
+ return self._response
348
353
  except requests.Timeout as e:
349
354
  logging.error(f"Request to {self.url_mgr.url} timed out: {e}")
350
355
  except requests.ConnectionError:
351
356
  logging.error(f"Connection error for URL {self.url_mgr.url}.")
352
357
  except requests.RequestException as e:
353
358
  logging.error(f"Request exception for URL {self.url_mgr.url}: {e}")
354
- try:
355
- response = get_selenium_source(self.url_mgr.url)
356
- if response:
357
- self._response = response
358
- self.status_code = 200 # Assume success
359
- return self._response
360
- except Exception as e:
361
- logging.error(f"Failed to retrieve content from {self.url_mgr.url} after {self.max_retries} retries: {e}")
362
- return None
359
+
360
+ logging.error(f"Failed to retrieve content from {self.url_mgr.url} after {self.max_retries} retries")
361
+ return None
363
362
 
364
363
  def try_request(self) -> requests.Response | str | bytes | None:
365
364
  """
366
- Tries to make an HTTP request to the given URL using the provided session.
365
+ Tries Selenium first, then falls back to requests if Selenium fails.
367
366
  """
368
367
  if self.url_mgr.url is None:
369
368
  return None
369
+
370
+ # 1. Try Selenium
370
371
  try:
371
- return get_selenium_source(self.url_mgr.url) # or self.session.get(self.url_mgr.url, timeout=self.timeout, stream=self.stream)
372
+ return get_selenium_source(self.url_mgr.url)
373
+ except Exception as e:
374
+ logging.warning(f"Selenium failed for {self.url_mgr.url}, falling back to requests: {e}")
375
+
376
+ # 2. Fallback: requests
377
+ try:
378
+ resp = self.session.get(
379
+ self.url_mgr.url,
380
+ timeout=self.timeout or 10,
381
+ stream=self.stream
382
+ )
383
+ return resp
372
384
  except requests.RequestException as e:
373
- logging.error(f"Request failed: {e}")
385
+ logging.error(f"Requests fallback also failed for {self.url_mgr.url}: {e}")
374
386
  return None
375
387
 
376
- @property
377
- def url(self):
378
- return self.url_mgr.url
388
+ @property
389
+ def url(self):
390
+ return self.url_mgr.url
379
391
 
380
- @url.setter
381
- def url(self, new_url):
382
- self._url = new_url
392
+ @url.setter
393
+ def url(self, new_url):
394
+ self._url = new_url
383
395
  class SafeRequestSingleton:
384
396
  _instance = None
385
397
  @staticmethod
@@ -1,34 +1,114 @@
1
- import os
2
- #from ..abstract_webtools import urlManager
3
- from .urlManager import *
4
- from urllib.parse import urlparse
5
- from abstract_utilities import *
1
+ import os, time, re, json, logging, urllib3, requests,tempfile, shutil, socket, atexit, errno
2
+ from urllib.parse import urlparse, urljoin
3
+ from bs4 import BeautifulSoup # if you prefer, keep using your parser
6
4
  from selenium import webdriver
7
5
  from selenium.webdriver.chrome.options import Options
8
- import logging
9
- import urllib3
6
+ from selenium.webdriver.common.by import By
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ from selenium.webdriver.support import expected_conditions as EC
10
9
  from abstract_security import get_env_value
11
- # Suppress urllib3 warnings and debug logs
10
+ from abstract_utilities import *
11
+ from .urlManager import * # your urlManager
12
+
12
13
  urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
13
14
  logging.getLogger("urllib3").setLevel(logging.WARNING)
14
-
15
- # Suppress Selenium logs
16
15
  logging.getLogger("selenium").setLevel(logging.WARNING)
17
16
 
18
-
19
- # Setup Chrome options
17
+ # ---- Chrome options (keep yours; add safe fallbacks) ----
20
18
  chrome_options = Options()
21
- chrome_options.binary_location = get_env_value('CHROME_BINARY')
22
- chrome_options.add_argument("--headless") # Run in headless mode
19
+ _bin = get_env_value('CHROME_BINARY')
20
+ if _bin:
21
+ chrome_options.binary_location = _bin
22
+ chrome_options.add_argument("--headless=new")
23
23
  chrome_options.add_argument("--no-sandbox")
24
24
  chrome_options.add_argument("--disable-dev-shm-usage")
25
25
  chrome_options.add_argument("--disable-gpu")
26
26
  chrome_options.add_argument("--disable-software-rasterizer")
27
27
  chrome_options.add_argument("--disable-extensions")
28
28
  chrome_options.add_argument("--remote-debugging-port=9222")
29
+ chrome_prefs = {"profile.managed_default_content_settings.images": 2}
30
+ chrome_options.experimental_options["prefs"] = chrome_prefs
31
+
32
+ MIN_HTML_BYTES = 2048 # tune: consider <2KB suspicious for real pages
33
+ # --- NEW helpers: unique temp profile + free port + options builder ---
34
+
35
+ def _free_port() -> int:
36
+ s = socket.socket()
37
+ s.bind(("127.0.0.1", 0))
38
+ port = s.getsockname()[1]
39
+ s.close()
40
+ return port
41
+
42
+ def _make_profile_dir(base="/var/tmp/selenium-profiles") -> str:
43
+ os.makedirs(base, exist_ok=True)
44
+ return tempfile.mkdtemp(prefix="cw-", dir=base)
45
+
46
+ def _make_chrome_options(binary_path: str | None = None,
47
+ user_data_dir: str | None = None) -> tuple[Options, str]:
48
+ opts = Options()
49
+ if binary_path:
50
+ opts.binary_location = binary_path
51
+ opts.add_argument("--headless=new")
52
+ opts.add_argument("--no-sandbox")
53
+ opts.add_argument("--disable-dev-shm-usage")
54
+ opts.add_argument("--disable-gpu")
55
+ opts.add_argument("--disable-software-rasterizer")
56
+ opts.add_argument("--disable-extensions")
57
+
58
+ prof = user_data_dir or _make_profile_dir()
59
+ opts.add_argument(f"--user-data-dir={prof}")
60
+ opts.add_argument(f"--remote-debugging-port={_free_port()}")
61
+
62
+ prefs = {"profile.managed_default_content_settings.images": 2}
63
+ opts.add_experimental_option("prefs", prefs)
64
+ return opts, prof
65
+
29
66
 
67
+ def _looks_like_html(text_or_bytes: bytes | str) -> bool:
68
+ if not text_or_bytes:
69
+ return False
70
+ s = text_or_bytes if isinstance(text_or_bytes, str) else text_or_bytes.decode("utf-8", "ignore")
71
+ if len(s) < MIN_HTML_BYTES:
72
+ return False
73
+ lowered = s.lower()
74
+ return ("<html" in lowered and "</html>" in lowered) or "<body" in lowered
30
75
 
76
+ def _requests_fallback(url: str, headers: dict | None = None, timeout: float = 15.0):
77
+ """Plain requests fallback. Returns `requests.Response | None`."""
78
+ try:
79
+ sess = requests.Session()
80
+ sess.headers.update(headers or {"User-Agent": "Mozilla/5.0"})
81
+ # honor simple redirects and cert issues as needed
82
+ resp = sess.get(url, timeout=timeout, allow_redirects=True, verify=False)
83
+ return resp
84
+ except Exception as e:
85
+ logging.warning(f"requests fallback failed for {url}: {e}")
86
+ return None
31
87
 
88
+ def _wait_until_ready(driver, timeout: float = 10.0):
89
+ """Waits for DOM readiness and presence of <body>."""
90
+ try:
91
+ WebDriverWait(driver, timeout).until(
92
+ lambda d: d.execute_script("return document.readyState") in ("interactive", "complete")
93
+ )
94
+ except Exception:
95
+ pass
96
+ try:
97
+ WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.TAG_NAME, "body")))
98
+ except Exception:
99
+ pass
100
+ # small settle delay for late JS injections
101
+ time.sleep(0.3)
102
+ def normalize_url(url, base_url=None):
103
+ manager = seleniumManager(url)
104
+ base_url = manager.base_url
105
+ if url.startswith(base_url):
106
+ url = url[len(base_url):]
107
+ normalized_url = urljoin(base_url, url.split('#')[0])
108
+ if not normalized_url.startswith(base_url):
109
+ return None
110
+ return normalized_url
111
+ # ---- Singleton driver manager (your class; small fixes) ----
32
112
  class SingletonMeta(type):
33
113
  _instances = {}
34
114
  def __call__(cls, *args, **kwargs):
@@ -39,78 +119,123 @@ class SingletonMeta(type):
39
119
 
40
120
  class seleniumManager(metaclass=SingletonMeta):
41
121
  def __init__(self, url):
42
- if not hasattr(self, 'initialized'): # Prevent reinitialization
43
- self.initialized = True
44
- parsed_url = urlparse(url)
45
- self.domain = parsed_url.netloc
46
- self.scheme = parsed_url.scheme
47
- self.base_url= f"{self.scheme}{self.domain}"
48
- self.site_dir = os.path.join(os.getcwd(), self.domain)
49
- os.makedirs(self.site_dir, exist_ok=True)
50
- self.drivers = {}
51
- self.page_type = []
52
-
122
+ if getattr(self, "initialized", False):
123
+ return
124
+ self.initialized = True
125
+
126
+ p = urlparse(url)
127
+ self.domain = p.netloc
128
+ self.scheme = p.scheme or "https"
129
+ self.base_url = f"{self.scheme}://{self.domain}"
130
+
131
+ self.site_dir = os.path.join("/var/tmp", "cw-sites", self.domain)
132
+ os.makedirs(self.site_dir, exist_ok=True)
133
+
134
+ self._sessions: dict[str, dict] = {} # key -> {"driver": ..., "profile": ...}
135
+ atexit.register(lambda sm=self: sm.close_all())
136
+
53
137
  def get_url_to_path(self, url):
54
138
  url = eatAll(str(url), ['',' ','\n','\t','\\','/'])
55
- parsed_url = urlparse(url)
56
- if parsed_url.netloc == self.domain:
57
- paths = parsed_url.path.split('/')
58
- dir_path = self.site_dir
59
- for path in paths[:-1]:
60
- dir_path = os.path.join(dir_path, path)
61
- os.makedirs(dir_path, exist_ok=True)
62
- self.page_type.append(os.path.splitext(paths[-1])[-1] or 'html' if len(self.page_type) == 0 else self.page_type[-1])
63
-
64
- dir_path = os.path.join(dir_path, paths[-1])
65
- return dir_path
66
-
67
- def saved_url_check(self, url):
68
- path = self.get_url_to_path(url)
69
- return path
139
+ p = urlparse(url)
140
+ if p.netloc == self.domain:
141
+ parts = [x for x in p.path.split('/') if x]
142
+ d = self.site_dir
143
+ for seg in parts[:-1]:
144
+ d = os.path.join(d, seg)
145
+ os.makedirs(d, exist_ok=True)
146
+ last = parts[-1] if parts else "index.html"
147
+ ext = os.path.splitext(last)[-1] or ".html"
148
+ if not hasattr(self, "page_type"):
149
+ self.page_type = []
150
+ self.page_type.append(ext if not self.page_type else self.page_type[-1])
151
+ return os.path.join(d, last)
70
152
 
71
153
  def get_with_netloc(self, url):
72
- parsed_url = urlparse(url)
73
- if parsed_url.netloc == '':
74
- url = f"{self.scheme}://{self.domain}/{url.strip()}"
154
+ p = urlparse(url)
155
+ if p.netloc == '':
156
+ url = f"{self.scheme}://{self.domain}/{url.strip().lstrip('/')}"
75
157
  return url
76
158
 
77
- def get_driver(self, url):
78
- if url and url not in self.drivers:
79
- # chrome_options = Options()
80
- # chrome_options.add_argument("--headless")
81
- driver = webdriver.Chrome(options=chrome_options)
82
- self.drivers[url] = driver
83
- driver.get(url)
84
- return self.drivers[url]
85
- def normalize_url(url, base_url=None):
86
- """
87
- Normalize and resolve relative URLs, ensuring proper domain and format.
88
- """
89
- # If URL starts with the base URL repeated, remove the extra part
90
- manager = seleniumManager(url)
91
- base_url = manager.base_url
92
- if url.startswith(base_url):
93
- url = url[len(base_url):]
159
+ def get_driver(self, url) -> tuple[str, webdriver.Chrome]:
160
+ bin_path = get_env_value('CHROME_BINARY')
161
+ opts, prof = _make_chrome_options(binary_path=bin_path, user_data_dir=None)
162
+ driver = webdriver.Chrome(options=opts)
163
+ key = f"{url}#{time.time()}"
164
+ self._sessions[key] = {"driver": driver, "profile": prof}
165
+ return key, driver
94
166
 
95
- # Resolve the URL against the base URL
96
- normalized_url = urljoin(base_url, url.split('#')[0])
167
+ def close_driver(self, key: str):
168
+ sess = self._sessions.pop(key, None)
169
+ if not sess: return
170
+ try:
171
+ try: sess["driver"].quit()
172
+ except Exception: pass
173
+ finally:
174
+ shutil.rmtree(sess.get("profile") or "", ignore_errors=True)
97
175
 
98
- # Ensure only URLs belonging to the base domain are kept
99
- if not normalized_url.startswith(base_url):
100
- return None
176
+ def close_all(self):
177
+ for key in list(self._sessions.keys()):
178
+ self.close_driver(key)
101
179
 
102
- return normalized_url
103
- # Function to get Selenium page source
104
- def get_selenium_source(url):
180
+
181
+
182
+ # ---- Hardened page-source retrieval with fallback ----
183
+ def get_selenium_source(url, max_retries: int = 2, request_fallback: bool = True, timeout: float = 12.0):
105
184
  url_mgr = urlManager(url)
106
- if url_mgr.url:
107
- url = str(url_mgr.url)
108
- manager = seleniumManager(url)
109
- driver = manager.get_driver(url)
110
- try:
111
- # Get page source
112
- page_source = driver.page_source
113
- return page_source
114
- finally:
115
- # Don't quit the driver unless you're done with all interactions
116
- pass
185
+ if not url_mgr.url:
186
+ return None
187
+ url = str(url_mgr.url)
188
+
189
+ manager = seleniumManager(url)
190
+ key, driver = manager.get_driver(url)
191
+
192
+ last_exc = None
193
+ try:
194
+ for attempt in range(1, max_retries + 1):
195
+ try:
196
+ driver.get(url)
197
+ _wait_until_ready(driver, timeout=timeout)
198
+ html = driver.page_source or ""
199
+ if not _looks_like_html(html):
200
+ html = driver.execute_script(
201
+ "return document.documentElement ? document.documentElement.outerHTML : '';"
202
+ ) or html
203
+ if _looks_like_html(html):
204
+ return html
205
+ logging.warning(f"Selenium returned suspicious HTML (len={len(html)}) for {url} "
206
+ f"[attempt {attempt}/{max_retries}]")
207
+ except Exception as e:
208
+ last_exc = e
209
+ logging.warning(f"Selenium attempt {attempt}/{max_retries} failed for {url}: {e}")
210
+ time.sleep(0.5 * attempt)
211
+
212
+ if request_fallback:
213
+ resp = _requests_fallback(url, headers={"User-Agent": "Mozilla/5.0"})
214
+ if resp is not None:
215
+ ctype = (resp.headers.get("content-type") or "").lower()
216
+ body = resp.text if hasattr(resp, "text") else (
217
+ resp.content.decode("utf-8", "ignore") if hasattr(resp, "content") else ""
218
+ )
219
+ if "application/json" in ctype:
220
+ try:
221
+ return json.dumps(resp.json())
222
+ except Exception:
223
+ return body
224
+ return body if _looks_like_html(body) or body else None
225
+ finally:
226
+ # critical: release the user-data-dir to avoid “already in use”
227
+ manager.close_driver(key)
228
+
229
+ if last_exc:
230
+ logging.error(f"Unable to retrieve page for {url}: {last_exc}")
231
+ return None
232
+
233
+ def get_driver(self, url):
234
+ # always new
235
+ bin_path = get_env_value('CHROME_BINARY')
236
+ opts, prof = _make_chrome_options(binary_path=bin_path, user_data_dir=None)
237
+ driver = webdriver.Chrome(options=opts)
238
+ # store so close_all() can clean up
239
+ key = f"{url}#{time.time()}"
240
+ self._sessions[key] = {"driver": driver, "profile": prof}
241
+ return driver
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract_webtools
3
- Version: 0.1.6.144
3
+ Version: 0.1.6.145
4
4
  Summary: Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.
5
5
  Home-page: https://github.com/AbstractEndeavors/abstract_essentials/tree/main/abstract_webtools
6
6
  Author: putkoff
@@ -10,7 +10,7 @@ abstract_webtools/main.py,sha256=_I7pPXPkoLZOoYGLQDrSLGhGuQt6-PVyXEHZSmglk2g,132
10
10
  abstract_webtools/soup_gui.py,sha256=n95YAps1R6DpMwR4UbthSqQby0C5WHUa9tsW-f2qpLg,5184
11
11
  abstract_webtools/url_grabber.py,sha256=pnCCev7ZIuM-6cAGTLmK5HfzZg_AX-fLcRpB6ZE70B8,10441
12
12
  abstract_webtools/url_grabber_new.py,sha256=xb23qo4anOY0Ax3CAfaHJ8s5VEz61Sinh-XpEDFW7Is,3621
13
- abstract_webtools/managers/__init__.py,sha256=d7Q6_McRuKOHmKuna19s0l1wMgtM1JgUX8rHaSqJIcE,436
13
+ abstract_webtools/managers/__init__.py,sha256=RXQAK5z9nYlocM91P2OC4jR352-MiqT5bAi4xZl7_FU,470
14
14
  abstract_webtools/managers/allss\.py,sha256=IBhlyRQHfK-BtwUnSEbIPqlI1MtZ8-XsdaHv0b91HQ0,269
15
15
  abstract_webtools/managers/cipherManager.py,sha256=NHQGdR11eNSm-1H-GezD5dyQgsPTJwY5kczt8Sher2s,1621
16
16
  abstract_webtools/managers/crawlManager.py,sha256=62Ej6AQC6-qXX_EWOmcJ2szNvEjmebFGugMz65HF1qI,12983
@@ -21,7 +21,7 @@ abstract_webtools/managers/dynamicRateLimiter.py,sha256=ycn5VQEPnmxjNMew4IVh-t5t
21
21
  abstract_webtools/managers/get_test.py,sha256=nISrhUGdyvRv18wTGoifGhizBFoHeK0N3FymMASloFw,825
22
22
  abstract_webtools/managers/mySocketClient.py,sha256=-j1Q8Ds9RCSbjZdx3ZF9mVpgwxaO0BBssanUcpYVQoY,2045
23
23
  abstract_webtools/managers/networkManager.py,sha256=Op2QDXrP-gmm0tCToe-Ryt9xuOtMppcN2KLKP1WZiu0,952
24
- abstract_webtools/managers/seleneumManager.py,sha256=1toMSoIPZmKwU88FMDTJl0DL398Zg_7uH-O1QqJpZC4,4184
24
+ abstract_webtools/managers/seleneumManager.py,sha256=wyo4SpocgRz3W50b33GW3po32_uxYwmdE1TFZ_0k07s,9539
25
25
  abstract_webtools/managers/seleniumManager.py,sha256=RRpA1_oOnZuzzQ4S6VX7tDFcI31E_mOou2CZOOZH6yI,4274
26
26
  abstract_webtools/managers/sslManager.py,sha256=I9YUqJo8_KwLOwfBTAoSfzKSfR4Vtjw1HQXsXRnCV-g,641
27
27
  abstract_webtools/managers/tlsAdapter.py,sha256=XZSMZz9EUOhv-h3_Waf6mjV1dA3oN_M_oWuoo4VZ_HE,1454
@@ -39,14 +39,14 @@ abstract_webtools/managers/middleManager/src/UnifiedWebManager.py,sha256=qYCvfjU
39
39
  abstract_webtools/managers/middleManager/src/__init__.py,sha256=YaSAh7AG1EvFWFZBIe4pGvzmfr60rpR9ZDWoQKqAMd0,61
40
40
  abstract_webtools/managers/middleManager/src/legacy_tools.py,sha256=2cCnRaq8UO7HdtffNtAOsZFJm_mpZbpvBuX0pIIWGaM,125
41
41
  abstract_webtools/managers/requestManager/__init__.py,sha256=z2qGtweEoO_OKr959LGxVXEMu1hu7PIkmh89BEh5TI8,30
42
- abstract_webtools/managers/requestManager/requestManager.py,sha256=26BdfGrkWq2ouDaf0P8HTVK46PtPZJHUO46lIZgd8D8,19768
42
+ abstract_webtools/managers/requestManager/requestManager.py,sha256=0d1Z5dFIjOg8KyJakzOilJiiq6SR3iKUr5vfnssWDu8,20048
43
43
  abstract_webtools/managers/soupManager/__init__.py,sha256=mqfXfqM9sWlYpOkoXUqtBoVvk2KQx1862NnmRVJwGtY,27
44
44
  abstract_webtools/managers/soupManager/asoueces.py,sha256=OaXqolZl0dI7b09NYwJ3Wnhuxf89ahZ1GjsOqy0GXfk,3506
45
45
  abstract_webtools/managers/soupManager/soupManager.py,sha256=75gwqVXIRwgVqzATBC-DiJF2AT_AdE6FSBWy3DbW5ZA,17393
46
46
  abstract_webtools/managers/urlManager/__init__.py,sha256=gaJCHeK91Z-eYsBnxgdhbIUten1-gbx-zqx70R6ag-Y,26
47
47
  abstract_webtools/managers/urlManager/urlManager (Copy).py,sha256=vCFuLADmv3h7icaaoAsImGqb_49VizPY_ZvMl-C7PYk,7756
48
48
  abstract_webtools/managers/urlManager/urlManager.py,sha256=vY4KQXtcrlC2YtlultxQpVe581l5kAuT5VGA0WrI16g,8945
49
- abstract_webtools-0.1.6.144.dist-info/METADATA,sha256=3pP4vVIzPwj649lj6QTC2wxt7yxsx4YRwl-iRkixR9M,7289
50
- abstract_webtools-0.1.6.144.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
51
- abstract_webtools-0.1.6.144.dist-info/top_level.txt,sha256=2DMJ7RmjTcjCsa-uwAV0K6eXXlIIkFDEjBLg_uyCmCI,18
52
- abstract_webtools-0.1.6.144.dist-info/RECORD,,
49
+ abstract_webtools-0.1.6.145.dist-info/METADATA,sha256=7eU_thbiawnNyvNUcQOBHclY44_tH3DikGbdnllhtXE,7289
50
+ abstract_webtools-0.1.6.145.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
51
+ abstract_webtools-0.1.6.145.dist-info/top_level.txt,sha256=2DMJ7RmjTcjCsa-uwAV0K6eXXlIIkFDEjBLg_uyCmCI,18
52
+ abstract_webtools-0.1.6.145.dist-info/RECORD,,