abstract-webtools 0.1.6.144__tar.gz → 0.1.6.146__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/PKG-INFO +1 -1
  2. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/setup.py +1 -1
  3. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/__init__.py +2 -1
  4. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/cipherManager.py +12 -13
  5. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/crawlManager.py +35 -26
  6. abstract_webtools-0.1.6.146/src/abstract_webtools/managers/curlMgr.py +26 -0
  7. abstract_webtools-0.1.6.146/src/abstract_webtools/managers/meta_dump.py +27 -0
  8. abstract_webtools-0.1.6.146/src/abstract_webtools/managers/networkManager.py +50 -0
  9. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/requestManager/requestManager.py +31 -19
  10. abstract_webtools-0.1.6.146/src/abstract_webtools/managers/seleneumManager.py +241 -0
  11. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/soupManager/soupManager.py +46 -19
  12. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/sslManager.py +11 -2
  13. abstract_webtools-0.1.6.146/src/abstract_webtools/managers/userAgentManager.py +51 -0
  14. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools.egg-info/PKG-INFO +1 -1
  15. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools.egg-info/SOURCES.txt +1 -0
  16. abstract_webtools-0.1.6.144/src/abstract_webtools/managers/curlMgr.py +0 -48
  17. abstract_webtools-0.1.6.144/src/abstract_webtools/managers/networkManager.py +0 -15
  18. abstract_webtools-0.1.6.144/src/abstract_webtools/managers/seleneumManager.py +0 -116
  19. abstract_webtools-0.1.6.144/src/abstract_webtools/managers/userAgentManager.py +0 -60
  20. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/README.md +0 -0
  21. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/pyproject.toml +0 -0
  22. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/setup.cfg +0 -0
  23. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/__init__.py +0 -0
  24. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/abstract_usurpit.py +0 -0
  25. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/abstract_webtools.py +0 -0
  26. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/big_user_agent_list.py +0 -0
  27. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/domain_identifier.py +0 -0
  28. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/extention_list.py +0 -0
  29. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/find_dirs.py +0 -0
  30. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/k2s_downloader.py +0 -0
  31. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/main.py +0 -0
  32. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/allss//.py" +0 -0
  33. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/clownworld/__init__.py +0 -0
  34. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/clownworld/get_bolshevid_video.py +0 -0
  35. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/crawlmgr2.py +0 -0
  36. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/domainManager.py +0 -0
  37. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/dynamicRateLimiter.py +0 -0
  38. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/get_test.py +0 -0
  39. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/linkManager/__init__.py +0 -0
  40. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/linkManager/linkManager.py +0 -0
  41. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/__init__.py +0 -0
  42. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/imports.py +0 -0
  43. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/src/UnifiedWebManage3r.py +0 -0
  44. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/src/UnifiedWebManager.py +0 -0
  45. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/src/__init__.py +0 -0
  46. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/middleManager/src/legacy_tools.py +0 -0
  47. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/mySocketClient.py +0 -0
  48. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/requestManager/__init__.py +0 -0
  49. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/soupManager/__init__.py +0 -0
  50. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/soupManager/asoueces.py +0 -0
  51. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/tlsAdapter.py +0 -0
  52. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/urlManager/__init__.py +0 -0
  53. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/urlManager/urlManager (Copy).py +0 -0
  54. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/urlManager/urlManager.py +0 -0
  55. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/videoDownloader.py +0 -0
  56. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/managers/videoDownloader2.py +0 -0
  57. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/soup_gui.py +0 -0
  58. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/url_grabber.py +0 -0
  59. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools/url_grabber_new.py +0 -0
  60. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools.egg-info/dependency_links.txt +0 -0
  61. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools.egg-info/requires.txt +0 -0
  62. {abstract_webtools-0.1.6.144 → abstract_webtools-0.1.6.146}/src/abstract_webtools.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract_webtools
3
- Version: 0.1.6.144
3
+ Version: 0.1.6.146
4
4
  Summary: Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.
5
5
  Home-page: https://github.com/AbstractEndeavors/abstract_essentials/tree/main/abstract_webtools
6
6
  Author: putkoff
@@ -4,7 +4,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
4
4
  long_description = fh.read()
5
5
  setuptools.setup(
6
6
  name='abstract_webtools',
7
- version='0.1.6.144',
7
+ version='0.1.6.146',
8
8
  author='putkoff',
9
9
  author_email='partners@abstractendeavors.com',
10
10
  description='Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.',
@@ -10,6 +10,7 @@ from .sslManager import *
10
10
  from .tlsAdapter import *
11
11
  from .urlManager import *
12
12
  from .userAgentManager import *
13
- from .seleniumManager import *
13
+ from .seleneumManager import *
14
14
  from .videoDownloader import *
15
15
  from .middleManager import *
16
+ seleniumManager = seleneumManager
@@ -1,21 +1,20 @@
1
1
  class CipherManager:
2
2
  @staticmethod
3
- def get_default_ciphers()-> list:
3
+ def get_default_ciphers() -> list:
4
4
  return [
5
- "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384",
6
- "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA384",
7
- "ECDHE-RSA-AES256-SHA", "ECDHE-ECDSA-AES256-SHA",
8
- "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256",
9
- "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES128-SHA256",
10
- "AES256-SHA", "AES128-SHA"
5
+ "ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-AES256-GCM-SHA384",
6
+ "ECDHE-RSA-AES256-SHA384","ECDHE-ECDSA-AES256-SHA384",
7
+ "ECDHE-RSA-AES256-SHA","ECDHE-ECDSA-AES256-SHA",
8
+ "ECDHE-RSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-SHA256",
9
+ "ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-SHA256",
10
+ "AES256-SHA","AES128-SHA"
11
11
  ]
12
+ def __init__(self, cipher_list=None):
13
+ self.cipher_list = cipher_list or self.get_default_ciphers()
14
+ if isinstance(self.cipher_list, str):
15
+ self.cipher_list = [c.strip() for c in self.cipher_list.split(',') if c.strip()]
16
+ self.ciphers_string = ','.join(self.cipher_list) if self.cipher_list else ''
12
17
 
13
- def __init__(self,cipher_list=None):
14
- if cipher_list == None:
15
- cipher_list=self.get_default_ciphers()
16
- self.cipher_list = cipher_list
17
- self.create_list()
18
- self.ciphers_string = self.add_string_list()
19
18
  def add_string_list(self):
20
19
  if len(self.cipher_list)==0:
21
20
  return ''
@@ -119,14 +119,42 @@ if __name__ == "__main__":
119
119
  generator.run()
120
120
 
121
121
  class crawlManager:
122
- def __init__(self, url=None, req_mgr=None, url_mgr=None, source_code=None, parse_type="html.parser"):
122
+ def __init__(self, url, req_mgr, url_mgr, source_code=None, parse_type="html.parser"):
123
+ self.url_mgr = url_mgr
124
+ self.req_mgr = req_mgr
123
125
  self.url = url
124
- self.source_code = source_code
125
126
  self.parse_type = parse_type
126
- self.url_mgr = url_mgr or urlManager(url=self.url)
127
- self.req_mgr = req_mgr or requestManager(url_mgr=self.url_mgr)
128
- self.get_new_source_and_url(url)
129
- self.sime_map_mgr = SitemapGenerator(self.url_mgr.domain)
127
+ self.source_code = source_code or req_mgr.source_code
128
+ self.soup = BeautifulSoup(self.source_code or "", parse_type)
129
+ self.base_netloc = urlparse(self.url).netloc
130
+
131
+ def is_internal(self, link):
132
+ u = urlparse(link)
133
+ return (not u.netloc) or (u.netloc == self.base_netloc)
134
+
135
+ def links_on_page(self):
136
+ out = set()
137
+ for a in self.soup.find_all("a", href=True):
138
+ out.add(urljoin(self.url, a["href"]))
139
+ return out
140
+
141
+ def crawl(self, start=None, max_depth=2, _depth=0, visited=None, session=None):
142
+ start = start or self.url
143
+ visited = visited or set()
144
+ if _depth > max_depth or start in visited:
145
+ return visited
146
+ visited.add(start)
147
+
148
+ # fetch
149
+ r = self.req_mgr.session.get(start, timeout=30)
150
+ r.raise_for_status()
151
+ soup = BeautifulSoup(r.text, self.parse_type)
152
+
153
+ for a in soup.find_all("a", href=True):
154
+ link = urljoin(start, a["href"])
155
+ if self.is_internal(link) and link not in visited:
156
+ self.crawl(link, max_depth=max_depth, _depth=_depth+1, visited=visited)
157
+ return visited
130
158
  def get_new_source_and_url(self, url=None):
131
159
  """Fetches new source code and response for a given URL."""
132
160
  url = url
@@ -194,26 +222,7 @@ class crawlManager:
194
222
  return ('yearly', '0.3')
195
223
  return ('weekly', '1.0')
196
224
 
197
- def crawl(self, url, max_depth=3, depth=1, visited=None):
198
- """Recursively crawls the site up to max_depth and returns valid internal links."""
199
- visited = visited or set()
200
- if depth > max_depth or url in visited:
201
- return []
202
-
203
- visited.add(url)
204
- try:
205
- soup = get_soup(url)
206
- links = []
207
- for tag in soup.find_all('a', href=True):
208
- link = urljoin(url, tag['href'])
209
- if urlparse(link).netloc == urlparse(url).netloc and link not in visited:
210
- links.append(link)
211
- self.crawl(link, max_depth, depth + 1, visited)
212
- return links
213
- except Exception as e:
214
- print(f"Error crawling {url}: {e}")
215
- return []
216
-
225
+
217
226
  def get_meta_info(self, url=None):
218
227
  """Fetches metadata, including title and meta tags, from the page."""
219
228
  url = url or self.url
@@ -0,0 +1,26 @@
1
+ import os, subprocess, requests
2
+
3
+ def curl_download(website, destination_path, user_agent=None):
4
+ os.makedirs(os.path.dirname(destination_path), exist_ok=True)
5
+ ua = user_agent or ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
6
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
7
+ "Chrome/91.0.4472.124 Safari/537.36")
8
+ subprocess.run([
9
+ "curl","-L","--output", destination_path,
10
+ "-H", f"User-Agent: {ua}",
11
+ "-H", "Accept: */*",
12
+ website
13
+ ], check=True)
14
+
15
+ def requests_download(website, destination_path, headers=None):
16
+ os.makedirs(os.path.dirname(destination_path), exist_ok=True)
17
+ hdr = {"User-Agent": ("Mozilla/5.0 ... Chrome/91.0 Safari/537.36"),
18
+ "Accept": "*/*"}
19
+ if headers: hdr.update(headers)
20
+ r = requests.get(website, headers=hdr, allow_redirects=True, timeout=30)
21
+ r.raise_for_status()
22
+ with open(destination_path, "wb") as f:
23
+ f.write(r.content)
24
+
25
+ if __name__ == "__main__":
26
+ pass # no side effects
@@ -0,0 +1,27 @@
1
+ # meta_dump.py
2
+ from abstract_webtools.managers.networkManager import NetworkManager
3
+ from abstract_webtools.managers.userAgentManager import UserAgentManager
4
+ from abstract_webtools.managers.soupManager.soupManager import soupManager
5
+ import json, sys
6
+
7
+ def dump_all_meta(url: str):
8
+ ua = UserAgentManager(browser="Chrome", operating_system="Windows")
9
+ net = NetworkManager(user_agent_manager=ua)
10
+
11
+ r = net.session.get(url, timeout=30)
12
+ r.raise_for_status()
13
+
14
+ sm = soupManager(url=url, source_code=r.text, req_mgr=net)
15
+ out = {
16
+ "url": url,
17
+ "title": sm.soup.title.string.strip() if sm.soup.title and sm.soup.title.string else None,
18
+ "meta": sm.all_meta(),
19
+ "citation": sm.citation_dict(),
20
+ "links": sm.all_links(),
21
+ "json_ld": sm.all_jsonld(),
22
+ }
23
+ print(json.dumps(out, indent=2, ensure_ascii=False))
24
+
25
+ if __name__ == "__main__":
26
+ url = sys.argv[1]
27
+ dump_all_meta(url)
@@ -0,0 +1,50 @@
1
+ from typing import Optional, List
2
+ import requests
3
+ from ..abstract_webtools import *
4
+ from .sslManager import SSLManager
5
+ from .cipherManager import CipherManager
6
+
7
+ class TLSAdapter(HTTPAdapter):
8
+ def __init__(self, ssl_manager: SSLManager=None):
9
+ ssl_manager = ssl_manager or SSLManager()
10
+ self.ssl_context = ssl_manager.ssl_context
11
+ super().__init__()
12
+ def init_poolmanager(self, *args, **kwargs):
13
+ kwargs['ssl_context'] = self.ssl_context
14
+ return super().init_poolmanager(*args, **kwargs)
15
+
16
+ class NetworkManager:
17
+ def __init__(self, user_agent_manager=None, ssl_manager=None, proxies=None, cookies=None,
18
+ ciphers=None, certification: Optional[str]=None, ssl_options: Optional[List[str]]=None):
19
+ self.ua_mgr = user_agent_manager or UserAgentManager()
20
+ self.ssl_mgr = ssl_manager or SSLManager(
21
+ ciphers=ciphers or CipherManager().ciphers_string,
22
+ ssl_options=ssl_options,
23
+ certification=certification
24
+ )
25
+
26
+ self.session = requests.Session()
27
+ self.session.headers.update({
28
+ "User-Agent": self.ua_mgr.user_agent,
29
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
30
+ "Accept-Language": "en-US,en;q=0.9",
31
+ "Connection": "keep-alive"
32
+ })
33
+ adapter = TLSAdapter(self.ssl_mgr)
34
+ self.session.mount("https://", adapter)
35
+ self.session.mount("http://", HTTPAdapter())
36
+
37
+ if proxies:
38
+ self.session.proxies = proxies
39
+ if cookies:
40
+ if isinstance(cookies, requests.cookies.RequestsCookieJar):
41
+ self.session.cookies = cookies
42
+ elif isinstance(cookies, dict):
43
+ jar = requests.cookies.RequestsCookieJar()
44
+ for k,v in cookies.items(): jar.set(k,v)
45
+ self.session.cookies = jar
46
+ # if string: up to you—parse or ignore
47
+
48
+ # retries (optional)
49
+ from requests.adapters import Retry
50
+ self.session.adapters['https://'].max_retries = Retry(total=5, backoff_factor=0.5, status_forcelist=[429,500,502,503,504])
@@ -328,10 +328,11 @@ class requestManager:
328
328
 
329
329
  def make_request(self):
330
330
  """
331
- Make a request and handle potential errors.
331
+ Make a request and handle potential errors, with retries.
332
332
  """
333
333
  if self.url_mgr.url is None:
334
334
  return None
335
+
335
336
  self.wait_between_requests()
336
337
  for _ in range(self.max_retries):
337
338
  try:
@@ -345,41 +346,52 @@ class requestManager:
345
346
  elif self._response.status_code == 429:
346
347
  logging.warning(f"Rate limited by {self.url_mgr.url}. Retrying...")
347
348
  time.sleep(5)
349
+ else:
350
+ # String/bytes from Selenium path
351
+ self.status_code = 200
352
+ return self._response
348
353
  except requests.Timeout as e:
349
354
  logging.error(f"Request to {self.url_mgr.url} timed out: {e}")
350
355
  except requests.ConnectionError:
351
356
  logging.error(f"Connection error for URL {self.url_mgr.url}.")
352
357
  except requests.RequestException as e:
353
358
  logging.error(f"Request exception for URL {self.url_mgr.url}: {e}")
354
- try:
355
- response = get_selenium_source(self.url_mgr.url)
356
- if response:
357
- self._response = response
358
- self.status_code = 200 # Assume success
359
- return self._response
360
- except Exception as e:
361
- logging.error(f"Failed to retrieve content from {self.url_mgr.url} after {self.max_retries} retries: {e}")
362
- return None
359
+
360
+ logging.error(f"Failed to retrieve content from {self.url_mgr.url} after {self.max_retries} retries")
361
+ return None
363
362
 
364
363
  def try_request(self) -> requests.Response | str | bytes | None:
365
364
  """
366
- Tries to make an HTTP request to the given URL using the provided session.
365
+ Tries Selenium first, then falls back to requests if Selenium fails.
367
366
  """
368
367
  if self.url_mgr.url is None:
369
368
  return None
369
+
370
+ # 1. Try Selenium
370
371
  try:
371
- return get_selenium_source(self.url_mgr.url) # or self.session.get(self.url_mgr.url, timeout=self.timeout, stream=self.stream)
372
+ return get_selenium_source(self.url_mgr.url)
373
+ except Exception as e:
374
+ logging.warning(f"Selenium failed for {self.url_mgr.url}, falling back to requests: {e}")
375
+
376
+ # 2. Fallback: requests
377
+ try:
378
+ resp = self.session.get(
379
+ self.url_mgr.url,
380
+ timeout=self.timeout or 10,
381
+ stream=self.stream
382
+ )
383
+ return resp
372
384
  except requests.RequestException as e:
373
- logging.error(f"Request failed: {e}")
385
+ logging.error(f"Requests fallback also failed for {self.url_mgr.url}: {e}")
374
386
  return None
375
387
 
376
- @property
377
- def url(self):
378
- return self.url_mgr.url
388
+ @property
389
+ def url(self):
390
+ return self.url_mgr.url
379
391
 
380
- @url.setter
381
- def url(self, new_url):
382
- self._url = new_url
392
+ @url.setter
393
+ def url(self, new_url):
394
+ self._url = new_url
383
395
  class SafeRequestSingleton:
384
396
  _instance = None
385
397
  @staticmethod
@@ -0,0 +1,241 @@
1
+ import os, time, re, json, logging, urllib3, requests,tempfile, shutil, socket, atexit, errno
2
+ from urllib.parse import urlparse, urljoin
3
+ from bs4 import BeautifulSoup # if you prefer, keep using your parser
4
+ from selenium import webdriver
5
+ from selenium.webdriver.chrome.options import Options
6
+ from selenium.webdriver.common.by import By
7
+ from selenium.webdriver.support.ui import WebDriverWait
8
+ from selenium.webdriver.support import expected_conditions as EC
9
+ from abstract_security import get_env_value
10
+ from abstract_utilities import *
11
+ from .urlManager import * # your urlManager
12
+
13
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
14
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
15
+ logging.getLogger("selenium").setLevel(logging.WARNING)
16
+
17
+ # ---- Chrome options (keep yours; add safe fallbacks) ----
18
+ chrome_options = Options()
19
+ _bin = get_env_value('CHROME_BINARY')
20
+ if _bin:
21
+ chrome_options.binary_location = _bin
22
+ chrome_options.add_argument("--headless=new")
23
+ chrome_options.add_argument("--no-sandbox")
24
+ chrome_options.add_argument("--disable-dev-shm-usage")
25
+ chrome_options.add_argument("--disable-gpu")
26
+ chrome_options.add_argument("--disable-software-rasterizer")
27
+ chrome_options.add_argument("--disable-extensions")
28
+ chrome_options.add_argument("--remote-debugging-port=9222")
29
+ chrome_prefs = {"profile.managed_default_content_settings.images": 2}
30
+ chrome_options.experimental_options["prefs"] = chrome_prefs
31
+
32
+ MIN_HTML_BYTES = 2048 # tune: consider <2KB suspicious for real pages
33
+ # --- NEW helpers: unique temp profile + free port + options builder ---
34
+
35
+ def _free_port() -> int:
36
+ s = socket.socket()
37
+ s.bind(("127.0.0.1", 0))
38
+ port = s.getsockname()[1]
39
+ s.close()
40
+ return port
41
+
42
+ def _make_profile_dir(base="/var/tmp/selenium-profiles") -> str:
43
+ os.makedirs(base, exist_ok=True)
44
+ return tempfile.mkdtemp(prefix="cw-", dir=base)
45
+
46
+ def _make_chrome_options(binary_path: str | None = None,
47
+ user_data_dir: str | None = None) -> tuple[Options, str]:
48
+ opts = Options()
49
+ if binary_path:
50
+ opts.binary_location = binary_path
51
+ opts.add_argument("--headless=new")
52
+ opts.add_argument("--no-sandbox")
53
+ opts.add_argument("--disable-dev-shm-usage")
54
+ opts.add_argument("--disable-gpu")
55
+ opts.add_argument("--disable-software-rasterizer")
56
+ opts.add_argument("--disable-extensions")
57
+
58
+ prof = user_data_dir or _make_profile_dir()
59
+ opts.add_argument(f"--user-data-dir={prof}")
60
+ opts.add_argument(f"--remote-debugging-port={_free_port()}")
61
+
62
+ prefs = {"profile.managed_default_content_settings.images": 2}
63
+ opts.add_experimental_option("prefs", prefs)
64
+ return opts, prof
65
+
66
+
67
+ def _looks_like_html(text_or_bytes: bytes | str) -> bool:
68
+ if not text_or_bytes:
69
+ return False
70
+ s = text_or_bytes if isinstance(text_or_bytes, str) else text_or_bytes.decode("utf-8", "ignore")
71
+ if len(s) < MIN_HTML_BYTES:
72
+ return False
73
+ lowered = s.lower()
74
+ return ("<html" in lowered and "</html>" in lowered) or "<body" in lowered
75
+
76
+ def _requests_fallback(url: str, headers: dict | None = None, timeout: float = 15.0):
77
+ """Plain requests fallback. Returns `requests.Response | None`."""
78
+ try:
79
+ sess = requests.Session()
80
+ sess.headers.update(headers or {"User-Agent": "Mozilla/5.0"})
81
+ # honor simple redirects and cert issues as needed
82
+ resp = sess.get(url, timeout=timeout, allow_redirects=True, verify=False)
83
+ return resp
84
+ except Exception as e:
85
+ logging.warning(f"requests fallback failed for {url}: {e}")
86
+ return None
87
+
88
+ def _wait_until_ready(driver, timeout: float = 10.0):
89
+ """Waits for DOM readiness and presence of <body>."""
90
+ try:
91
+ WebDriverWait(driver, timeout).until(
92
+ lambda d: d.execute_script("return document.readyState") in ("interactive", "complete")
93
+ )
94
+ except Exception:
95
+ pass
96
+ try:
97
+ WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.TAG_NAME, "body")))
98
+ except Exception:
99
+ pass
100
+ # small settle delay for late JS injections
101
+ time.sleep(0.3)
102
+ def normalize_url(url, base_url=None):
103
+ manager = seleniumManager(url)
104
+ base_url = manager.base_url
105
+ if url.startswith(base_url):
106
+ url = url[len(base_url):]
107
+ normalized_url = urljoin(base_url, url.split('#')[0])
108
+ if not normalized_url.startswith(base_url):
109
+ return None
110
+ return normalized_url
111
+ # ---- Singleton driver manager (your class; small fixes) ----
112
+ class SingletonMeta(type):
113
+ _instances = {}
114
+ def __call__(cls, *args, **kwargs):
115
+ if cls not in cls._instances:
116
+ instance = super().__call__(*args, **kwargs)
117
+ cls._instances[cls] = instance
118
+ return cls._instances[cls]
119
+
120
+ class seleniumManager(metaclass=SingletonMeta):
121
+ def __init__(self, url):
122
+ if getattr(self, "initialized", False):
123
+ return
124
+ self.initialized = True
125
+
126
+ p = urlparse(url)
127
+ self.domain = p.netloc
128
+ self.scheme = p.scheme or "https"
129
+ self.base_url = f"{self.scheme}://{self.domain}"
130
+
131
+ self.site_dir = os.path.join("/var/tmp", "cw-sites", self.domain)
132
+ os.makedirs(self.site_dir, exist_ok=True)
133
+
134
+ self._sessions: dict[str, dict] = {} # key -> {"driver": ..., "profile": ...}
135
+ atexit.register(lambda sm=self: sm.close_all())
136
+
137
+ def get_url_to_path(self, url):
138
+ url = eatAll(str(url), ['',' ','\n','\t','\\','/'])
139
+ p = urlparse(url)
140
+ if p.netloc == self.domain:
141
+ parts = [x for x in p.path.split('/') if x]
142
+ d = self.site_dir
143
+ for seg in parts[:-1]:
144
+ d = os.path.join(d, seg)
145
+ os.makedirs(d, exist_ok=True)
146
+ last = parts[-1] if parts else "index.html"
147
+ ext = os.path.splitext(last)[-1] or ".html"
148
+ if not hasattr(self, "page_type"):
149
+ self.page_type = []
150
+ self.page_type.append(ext if not self.page_type else self.page_type[-1])
151
+ return os.path.join(d, last)
152
+
153
+ def get_with_netloc(self, url):
154
+ p = urlparse(url)
155
+ if p.netloc == '':
156
+ url = f"{self.scheme}://{self.domain}/{url.strip().lstrip('/')}"
157
+ return url
158
+
159
+ def get_driver(self, url) -> tuple[str, webdriver.Chrome]:
160
+ bin_path = get_env_value('CHROME_BINARY')
161
+ opts, prof = _make_chrome_options(binary_path=bin_path, user_data_dir=None)
162
+ driver = webdriver.Chrome(options=opts)
163
+ key = f"{url}#{time.time()}"
164
+ self._sessions[key] = {"driver": driver, "profile": prof}
165
+ return key, driver
166
+
167
+ def close_driver(self, key: str):
168
+ sess = self._sessions.pop(key, None)
169
+ if not sess: return
170
+ try:
171
+ try: sess["driver"].quit()
172
+ except Exception: pass
173
+ finally:
174
+ shutil.rmtree(sess.get("profile") or "", ignore_errors=True)
175
+
176
+ def close_all(self):
177
+ for key in list(self._sessions.keys()):
178
+ self.close_driver(key)
179
+
180
+
181
+
182
+ # ---- Hardened page-source retrieval with fallback ----
183
+ def get_selenium_source(url, max_retries: int = 2, request_fallback: bool = True, timeout: float = 12.0):
184
+ url_mgr = urlManager(url)
185
+ if not url_mgr.url:
186
+ return None
187
+ url = str(url_mgr.url)
188
+
189
+ manager = seleniumManager(url)
190
+ key, driver = manager.get_driver(url)
191
+
192
+ last_exc = None
193
+ try:
194
+ for attempt in range(1, max_retries + 1):
195
+ try:
196
+ driver.get(url)
197
+ _wait_until_ready(driver, timeout=timeout)
198
+ html = driver.page_source or ""
199
+ if not _looks_like_html(html):
200
+ html = driver.execute_script(
201
+ "return document.documentElement ? document.documentElement.outerHTML : '';"
202
+ ) or html
203
+ if _looks_like_html(html):
204
+ return html
205
+ logging.warning(f"Selenium returned suspicious HTML (len={len(html)}) for {url} "
206
+ f"[attempt {attempt}/{max_retries}]")
207
+ except Exception as e:
208
+ last_exc = e
209
+ logging.warning(f"Selenium attempt {attempt}/{max_retries} failed for {url}: {e}")
210
+ time.sleep(0.5 * attempt)
211
+
212
+ if request_fallback:
213
+ resp = _requests_fallback(url, headers={"User-Agent": "Mozilla/5.0"})
214
+ if resp is not None:
215
+ ctype = (resp.headers.get("content-type") or "").lower()
216
+ body = resp.text if hasattr(resp, "text") else (
217
+ resp.content.decode("utf-8", "ignore") if hasattr(resp, "content") else ""
218
+ )
219
+ if "application/json" in ctype:
220
+ try:
221
+ return json.dumps(resp.json())
222
+ except Exception:
223
+ return body
224
+ return body if _looks_like_html(body) or body else None
225
+ finally:
226
+ # critical: release the user-data-dir to avoid “already in use”
227
+ manager.close_driver(key)
228
+
229
+ if last_exc:
230
+ logging.error(f"Unable to retrieve page for {url}: {last_exc}")
231
+ return None
232
+
233
+ def get_driver(self, url):
234
+ # always new
235
+ bin_path = get_env_value('CHROME_BINARY')
236
+ opts, prof = _make_chrome_options(binary_path=bin_path, user_data_dir=None)
237
+ driver = webdriver.Chrome(options=opts)
238
+ # store so close_all() can clean up
239
+ key = f"{url}#{time.time()}"
240
+ self._sessions[key] = {"driver": driver, "profile": prof}
241
+ return driver
@@ -1,6 +1,9 @@
1
1
  from ...abstract_webtools import *
2
2
  from ..urlManager import *
3
3
  from ..requestManager import *
4
+ from bs4 import BeautifulSoup
5
+ import re, json
6
+
4
7
  class soupManager:
5
8
  """
6
9
  SoupManager is a class for managing and parsing HTML source code using BeautifulSoup.
@@ -39,25 +42,49 @@ class soupManager:
39
42
  - The SoupManager class is designed for parsing HTML source code using BeautifulSoup.
40
43
  - It provides various methods to extract data and discover elements within the source code.
41
44
  """
42
- def __init__(self,url=None,source_code=None,url_mgr=None,req_mgr=None,soup=None, parse_type="html.parser"):
43
- self.soup=[]
44
- url = get_url(url=url,url_mgr=url_mgr)
45
- self.url_mgr = get_url_mgr(url=url,url_mgr=url_mgr)
46
- self.url=self.url_mgr.url
47
- self.req_mgr = get_req_mgr(req_mgr=req_mgr,url=self.url,url_mgr=self.url_mgr,source_code=source_code)
48
- self.parse_type = parse_type
49
- source_code = source_code or self.req_mgr.source_code or self.req_mgr.source_code_bytes
50
- if source_code:
51
- source_code = str(source_code)
52
- self.source_code = source_code or ''
53
- self.soup= soup or BeautifulSoup(self.source_code, self.parse_type)
54
- self.all_tags_and_attribute_names = self.get_all_tags_and_attribute_names()
55
- self.all_tags = self.all_tags_and_attribute_names.get('tags')
56
- self.all_attribute_names = self.all_tags_and_attribute_names.get('attributes')
57
- self.all_tags_and_attributes = self.all_tags + self.all_attribute_names
58
-
59
- self._all_links_data = None
60
- self._meta_tags_data = None
45
+
46
+ def __init__(self, url=None, source_code=None, req_mgr=None, parse_type="html.parser"):
47
+ self.url = url
48
+ self.req_mgr = req_mgr
49
+ self.source_code = (source_code or (req_mgr.source_code if req_mgr else "")) or ""
50
+ self.soup = BeautifulSoup(self.source_code, parse_type)
51
+
52
+ def all_meta(self):
53
+ out = []
54
+ for m in self.soup.find_all("meta"):
55
+ row = {}
56
+ for k in ("name","property","http-equiv","itemprop","charset","content"):
57
+ v = m.get(k)
58
+ if v: row[k] = v
59
+ if row: out.append(row)
60
+ return out
61
+
62
+ def citation_dict(self):
63
+ out = {}
64
+ for m in self.soup.find_all("meta"):
65
+ k = (m.get("name") or m.get("property") or "").lower()
66
+ if k.startswith("citation_") and m.get("content"):
67
+ out.setdefault(k, []).append(m["content"])
68
+ return out
69
+
70
+ def all_links(self):
71
+ res = []
72
+ for l in self.soup.find_all("link"):
73
+ rel = l.get("rel")
74
+ if isinstance(rel, list): rel = " ".join(rel)
75
+ res.append({
76
+ "rel": rel, "href": l.get("href"),
77
+ "type": l.get("type"), "title": l.get("title"), "hreflang": l.get("hreflang")
78
+ })
79
+ return res
80
+
81
+ def all_jsonld(self):
82
+ blocks = []
83
+ for s in self.soup.find_all("script", type=re.compile("application/ld\\+json", re.I)):
84
+ txt = s.get_text(strip=True)
85
+ try: blocks.append(json.loads(txt))
86
+ except Exception: blocks.append({"raw": txt})
87
+ return blocks
61
88
  def re_initialize(self):
62
89
  self.soup= BeautifulSoup(self.source_code, self.parse_type)
63
90
  self._all_links_data = None
@@ -1,12 +1,21 @@
1
1
  from ..abstract_webtools import *
2
+ # sslManager.py
3
+ from ..abstract_webtools import * # must expose ssl, ssl_
4
+ from .cipherManager import CipherManager # be explicit, safer
5
+
2
6
  class SSLManager:
3
7
  def __init__(self, ciphers=None, ssl_options=None, certification=None):
4
8
  self.ciphers = ciphers or CipherManager().ciphers_string
5
9
  self.ssl_options = ssl_options or self.get_default_ssl_settings()
6
10
  self.certification = certification or ssl.CERT_REQUIRED
7
11
  self.ssl_context = self.get_context()
12
+
8
13
  def get_default_ssl_settings(self):
9
14
  return ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
10
- def get_context(self):
11
- return ssl_.create_urllib3_context(ciphers=self.ciphers, cert_reqs=self.certification, options=self.ssl_options)
12
15
 
16
+ def get_context(self):
17
+ return ssl_.create_urllib3_context(
18
+ ciphers=self.ciphers,
19
+ cert_reqs=self.certification,
20
+ options=self.ssl_options
21
+ )
@@ -0,0 +1,51 @@
1
+ # userAgentManager.py
2
+ from ..abstract_webtools import *
3
+ import random
4
+
5
+ operating_systems = ['Macintosh','Windows','Linux']
6
+ browsers = ['Firefox','Chrome','IceDragon','Waterfox','Gecko','Safari','MetaSr']
7
+
8
+ def _pick(val, options):
9
+ if not val: return options[0]
10
+ if val in options: return val
11
+ l = val.lower()
12
+ for o in options:
13
+ if l in o.lower():
14
+ return o
15
+ return options[0]
16
+
17
+ class UserAgentManager:
18
+ def __init__(self, operating_system=None, browser=None, version=None, user_agent=None):
19
+ self.operating_system = _pick(operating_system, operating_systems)
20
+ self.browser = _pick(browser, browsers)
21
+ self.version = version or '42.0'
22
+ self.user_agent = user_agent or self.get_user_agent()
23
+ self.header = {"user-agent": self.user_agent}
24
+
25
+ @staticmethod
26
+ def user_agent_db():
27
+ from ..big_user_agent_list import big_user_agent_dict
28
+ return big_user_agent_dict
29
+
30
+ def get_user_agent(self):
31
+ ua_db = self.user_agent_db()
32
+ os_db = ua_db.get(self.operating_system) or random.choice(list(ua_db.values()))
33
+ br_db = os_db.get(self.browser) or random.choice(list(os_db.values()))
34
+ if self.version in br_db:
35
+ return br_db[self.version]
36
+ return random.choice(list(br_db.values()))
37
+
38
+ class UserAgentManagerSingleton:
39
+ _instance = None
40
+
41
+ @staticmethod
42
+ def get_instance(**kwargs):
43
+ ua = kwargs.get("user_agent")
44
+ if UserAgentManagerSingleton._instance is None:
45
+ UserAgentManagerSingleton._instance = UserAgentManager(**kwargs)
46
+ else:
47
+ # rebuild if user_agent explicitly changed
48
+ inst = UserAgentManagerSingleton._instance
49
+ if ua and ua != inst.user_agent:
50
+ UserAgentManagerSingleton._instance = UserAgentManager(**kwargs)
51
+ return UserAgentManagerSingleton._instance
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: abstract_webtools
3
- Version: 0.1.6.144
3
+ Version: 0.1.6.146
4
4
  Summary: Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.
5
5
  Home-page: https://github.com/AbstractEndeavors/abstract_essentials/tree/main/abstract_webtools
6
6
  Author: putkoff
@@ -28,6 +28,7 @@ src/abstract_webtools/managers/curlMgr.py
28
28
  src/abstract_webtools/managers/domainManager.py
29
29
  src/abstract_webtools/managers/dynamicRateLimiter.py
30
30
  src/abstract_webtools/managers/get_test.py
31
+ src/abstract_webtools/managers/meta_dump.py
31
32
  src/abstract_webtools/managers/mySocketClient.py
32
33
  src/abstract_webtools/managers/networkManager.py
33
34
  src/abstract_webtools/managers/seleneumManager.py
@@ -1,48 +0,0 @@
1
- import os
2
- import requests
3
- import os
4
- import subprocess
5
- import stat
6
-
7
- def get_site(website, destination_dir, filename):
8
- # Ensure the directory exists
9
- os.makedirs(destination_dir, exist_ok=True)
10
-
11
- # Adjust directory permissions if needed (e.g. rwxr-xr-x -> 0o755)
12
- os.chmod(destination_dir, 0o755)
13
-
14
- # Construct the complete file path
15
- destination_path = os.path.join(destination_dir, filename)
16
-
17
- # Use curl to download the site
18
- # The example user-agent is arbitrary; you can change it to your needs
19
- os.system(
20
- f'curl -L --output "{destination_path}" '
21
- f'-H "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
22
- f'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 '
23
- f'Safari/537.36" -H "Accept: */*" "{website}"'
24
- )
25
-
26
- def download_site(website, destination_dir, filename):
27
- os.makedirs(destination_dir, exist_ok=True)
28
- os.chmod(destination_dir, 0o755) # set directory permissions if needed
29
-
30
- destination_path = os.path.join(destination_dir, filename)
31
-
32
- # GET the resource
33
- response = requests.get(website, headers={
34
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
35
- "AppleWebKit/537.36 (KHTML, like Gecko) "
36
- "Chrome/91.0.4472.124 Safari/537.36",
37
- "Accept": "*/*"
38
- }, allow_redirects=True)
39
-
40
- # Raise an exception if the download fails
41
- response.raise_for_status()
42
-
43
- # Write content to file
44
- with open(destination_path, "wb") as f:
45
- f.write(response.content)
46
- website = 'https://www.pornhub.com'
47
- destination = '/home/computron/Documents/doge'
48
- get_site(website,destination,'doge')
@@ -1,15 +0,0 @@
1
- from ..abstract_webtools import *
2
- from ..big_user_agent_list import *
3
- class NetworkManager:
4
- def __init__(self, user_agent_manager=None,ssl_manager=None, tls_adapter=None,user_agent=None,proxies=None,cookies=None,ciphers=None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None):
5
- if ssl_manager == None:
6
- ssl_manager = SSLManager(ciphers=ciphers, ssl_options=ssl_options, certification=certification)
7
- self.ssl_manager=ssl_manager
8
- if tls_adapter == None:
9
- tls_adapter=TLSAdapter(ssl_manager=ssl_manager,ciphers=ciphers, certification=certification, ssl_options=ssl_options)
10
- self.tls_adapter=tls_adapter
11
- self.ciphers=tls_adapter.ciphers
12
- self.certification=tls_adapter.certification
13
- self.ssl_options=tls_adapter.ssl_options
14
- self.proxies=None or {}
15
- self.cookies=cookies or "cb4c883efc59d0e990caf7508902591f4569e7bf-1617321078-0-150"
@@ -1,116 +0,0 @@
1
- import os
2
- #from ..abstract_webtools import urlManager
3
- from .urlManager import *
4
- from urllib.parse import urlparse
5
- from abstract_utilities import *
6
- from selenium import webdriver
7
- from selenium.webdriver.chrome.options import Options
8
- import logging
9
- import urllib3
10
- from abstract_security import get_env_value
11
- # Suppress urllib3 warnings and debug logs
12
- urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
13
- logging.getLogger("urllib3").setLevel(logging.WARNING)
14
-
15
- # Suppress Selenium logs
16
- logging.getLogger("selenium").setLevel(logging.WARNING)
17
-
18
-
19
- # Setup Chrome options
20
- chrome_options = Options()
21
- chrome_options.binary_location = get_env_value('CHROME_BINARY')
22
- chrome_options.add_argument("--headless") # Run in headless mode
23
- chrome_options.add_argument("--no-sandbox")
24
- chrome_options.add_argument("--disable-dev-shm-usage")
25
- chrome_options.add_argument("--disable-gpu")
26
- chrome_options.add_argument("--disable-software-rasterizer")
27
- chrome_options.add_argument("--disable-extensions")
28
- chrome_options.add_argument("--remote-debugging-port=9222")
29
-
30
-
31
-
32
- class SingletonMeta(type):
33
- _instances = {}
34
- def __call__(cls, *args, **kwargs):
35
- if cls not in cls._instances:
36
- instance = super().__call__(*args, **kwargs)
37
- cls._instances[cls] = instance
38
- return cls._instances[cls]
39
-
40
- class seleniumManager(metaclass=SingletonMeta):
41
- def __init__(self, url):
42
- if not hasattr(self, 'initialized'): # Prevent reinitialization
43
- self.initialized = True
44
- parsed_url = urlparse(url)
45
- self.domain = parsed_url.netloc
46
- self.scheme = parsed_url.scheme
47
- self.base_url= f"{self.scheme}{self.domain}"
48
- self.site_dir = os.path.join(os.getcwd(), self.domain)
49
- os.makedirs(self.site_dir, exist_ok=True)
50
- self.drivers = {}
51
- self.page_type = []
52
-
53
- def get_url_to_path(self, url):
54
- url = eatAll(str(url), ['',' ','\n','\t','\\','/'])
55
- parsed_url = urlparse(url)
56
- if parsed_url.netloc == self.domain:
57
- paths = parsed_url.path.split('/')
58
- dir_path = self.site_dir
59
- for path in paths[:-1]:
60
- dir_path = os.path.join(dir_path, path)
61
- os.makedirs(dir_path, exist_ok=True)
62
- self.page_type.append(os.path.splitext(paths[-1])[-1] or 'html' if len(self.page_type) == 0 else self.page_type[-1])
63
-
64
- dir_path = os.path.join(dir_path, paths[-1])
65
- return dir_path
66
-
67
- def saved_url_check(self, url):
68
- path = self.get_url_to_path(url)
69
- return path
70
-
71
- def get_with_netloc(self, url):
72
- parsed_url = urlparse(url)
73
- if parsed_url.netloc == '':
74
- url = f"{self.scheme}://{self.domain}/{url.strip()}"
75
- return url
76
-
77
- def get_driver(self, url):
78
- if url and url not in self.drivers:
79
- # chrome_options = Options()
80
- # chrome_options.add_argument("--headless")
81
- driver = webdriver.Chrome(options=chrome_options)
82
- self.drivers[url] = driver
83
- driver.get(url)
84
- return self.drivers[url]
85
- def normalize_url(url, base_url=None):
86
- """
87
- Normalize and resolve relative URLs, ensuring proper domain and format.
88
- """
89
- # If URL starts with the base URL repeated, remove the extra part
90
- manager = seleniumManager(url)
91
- base_url = manager.base_url
92
- if url.startswith(base_url):
93
- url = url[len(base_url):]
94
-
95
- # Resolve the URL against the base URL
96
- normalized_url = urljoin(base_url, url.split('#')[0])
97
-
98
- # Ensure only URLs belonging to the base domain are kept
99
- if not normalized_url.startswith(base_url):
100
- return None
101
-
102
- return normalized_url
103
- # Function to get Selenium page source
104
- def get_selenium_source(url):
105
- url_mgr = urlManager(url)
106
- if url_mgr.url:
107
- url = str(url_mgr.url)
108
- manager = seleniumManager(url)
109
- driver = manager.get_driver(url)
110
- try:
111
- # Get page source
112
- page_source = driver.page_source
113
- return page_source
114
- finally:
115
- # Don't quit the driver unless you're done with all interactions
116
- pass
@@ -1,60 +0,0 @@
1
- from ..abstract_webtools import *
2
- import random
3
- operating_systems = ['Macintosh','Windows','Linux']
4
- browsers = ['Firefox','Chrome','IceDragon','Waterfox','Gecko','Safari','MetaSr']
5
- def get_itter(iter_input,itter_list):
6
- if not iter_input:
7
- return itter_list[0]
8
- if iter_input in itter_list:
9
- return iter_input
10
- iter_input_lower = iter_input.lower()
11
- for itter in itter_list:
12
- itter_lower = itter.lower()
13
- if iter_input_lower in itter_lower:
14
- return itter
15
- return itter_list[0]
16
- def get_browser(browser=None):
17
- return get_itter(browser,browsers)
18
- def get_operating_system(operating_system=None):
19
- return get_itter(operating_system,operating_systems)
20
- class UserAgentManager:
21
- def __init__(self, operating_system=None, browser=None, version=None,user_agent=None):
22
- self.operating_system = get_operating_system(operating_system=operating_system)
23
- self.browser = get_browser(browser=browser)
24
- self.version = version or '42.0'
25
- self.user_agent = user_agent or self.get_user_agent()
26
- self.header = self.user_agent_header()
27
- @staticmethod
28
- def user_agent_db():
29
- from ..big_user_agent_list import big_user_agent_dict
30
- return big_user_agent_dict
31
-
32
- def get_user_agent(self):
33
- ua_db = self.user_agent_db()
34
-
35
- if self.operating_system and self.operating_system in ua_db:
36
- operating_system_db = ua_db[self.operating_system]
37
- else:
38
- operating_system_db = random.choice(list(ua_db.values()))
39
-
40
- if self.browser and self.browser in operating_system_db:
41
- browser_db = operating_system_db[self.browser]
42
- else:
43
- browser_db = random.choice(list(operating_system_db.values()))
44
-
45
- if self.version and self.version in browser_db:
46
- return browser_db[self.version]
47
- else:
48
- return random.choice(list(browser_db.values()))
49
-
50
- def user_agent_header(self):
51
- return {"user-agent": self.user_agent}
52
- class UserAgentManagerSingleton:
53
- _instance = None
54
- @staticmethod
55
- def get_instance(user_agent=UserAgentManager().get_user_agent()[0]):
56
- if UserAgentManagerSingleton._instance is None:
57
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
58
- elif UserAgentManagerSingleton._instance.user_agent != user_agent:
59
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
60
- return UserAgentManagerSingleton._instance