abstract-webtools 0.1.6.2__tar.gz → 0.1.6.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {abstract_webtools-0.1.6.2/src/abstract_webtools.egg-info → abstract_webtools-0.1.6.4}/PKG-INFO +1 -1
  2. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/setup.py +1 -1
  3. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/crawlManager.py +3 -18
  4. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/seleniumManager.py +18 -0
  5. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4/src/abstract_webtools.egg-info}/PKG-INFO +1 -1
  6. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/LICENSE +0 -0
  7. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/README.md +0 -0
  8. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/pyproject.toml +0 -0
  9. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/setup.cfg +0 -0
  10. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/__init__.py +0 -0
  11. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/abstract_webtools.py +0 -0
  12. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/big_user_agent_list.py +0 -0
  13. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/main.py +0 -0
  14. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/__init__.py +0 -0
  15. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/cipherManager.py +0 -0
  16. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/domainManager.py +0 -0
  17. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/dynamicRateLimiter.py +0 -0
  18. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/linkManager.py +0 -0
  19. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/mySocketClient.py +0 -0
  20. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/networkManager.py +0 -0
  21. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/requestManager.py +0 -0
  22. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/soupManager.py +0 -0
  23. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/sslManager.py +0 -0
  24. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/tlsAdapter.py +0 -0
  25. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/urlManager.py +0 -0
  26. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/userAgentManager.py +0 -0
  27. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/managers/videoDownloader.py +0 -0
  28. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/soup_gui.py +0 -0
  29. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/url_grabber.py +0 -0
  30. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools/url_grabber_new.py +0 -0
  31. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools.egg-info/SOURCES.txt +0 -0
  32. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools.egg-info/dependency_links.txt +0 -0
  33. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools.egg-info/requires.txt +0 -0
  34. {abstract_webtools-0.1.6.2 → abstract_webtools-0.1.6.4}/src/abstract_webtools.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: abstract_webtools
3
- Version: 0.1.6.2
3
+ Version: 0.1.6.4
4
4
  Summary: Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.
5
5
  Home-page: https://github.com/AbstractEndeavors/abstract_essentials/tree/main/abstract_webtools
6
6
  Author: putkoff
@@ -4,7 +4,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
4
4
  long_description = fh.read()
5
5
  setuptools.setup(
6
6
  name='abstract_webtools',
7
- version='0.1.6.02',
7
+ version='0.1.6.04',
8
8
  author='putkoff',
9
9
  author_email='partners@abstractendeavors.com',
10
10
  description='Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.',
@@ -1,21 +1,6 @@
1
1
  from .soupManager import *
2
2
 
3
- def normalize_url(url, base_url):
4
- """
5
- Normalize and resolve relative URLs, ensuring proper domain and format.
6
- """
7
- # If URL starts with the base URL repeated, remove the extra part
8
- if url.startswith(base_url):
9
- url = url[len(base_url):]
10
3
 
11
- # Resolve the URL against the base URL
12
- normalized_url = urljoin(base_url, url.split('#')[0])
13
-
14
- # Ensure only URLs belonging to the base domain are kept
15
- if not normalized_url.startswith(base_url):
16
- return None
17
-
18
- return normalized_url
19
4
  class crawlManager():
20
5
  def __init__(self,url=None,req_mgr=None,url_mgr=None,source_code=None,parse_type="html.parser"):
21
6
  self.url=url
@@ -73,7 +58,7 @@ class crawlManager():
73
58
  """
74
59
  all_urls=[self.url_mgr.url]
75
60
  domain = self.url_mgr.domain
76
- all_attribs = get_attribs(self.url_mgr.url)
61
+ all_attribs = get_all_attribute_values(self.url_mgr.url)
77
62
  for href in all_attribs.get('href',[]):
78
63
  if href == "" or href is None:
79
64
  # href empty tag
@@ -163,7 +148,7 @@ class crawlManager():
163
148
  # Fetch the title if available
164
149
  meta_tags = soup_mgr.find_all("meta")
165
150
  url = eatAll(str(url),['',' ','\n','\t','\\','/'])
166
- attribs = get_attribs(url)
151
+ attribs = get_all_attribute_values(url)
167
152
  soup = get_soup(url)
168
153
 
169
154
  for meta_tag in meta_tags:
@@ -194,7 +179,7 @@ class crawlManager():
194
179
  string += f' <url>\n <loc>{url}</loc>\n'
195
180
  preprocess=[]
196
181
  self.get_new_source_and_url(url=url)
197
- links = get_attribs(url)
182
+ links = get_all_attribute_values(url)
198
183
  images = [link for link in links if link.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.svg', '.webp'))]
199
184
 
200
185
  for img in images:
@@ -46,6 +46,7 @@ class seleniumManager(metaclass=SingletonMeta):
46
46
  parsed_url = urlparse(url)
47
47
  self.domain = parsed_url.netloc
48
48
  self.scheme = parsed_url.scheme
49
+ self.base_url= f"{self.scheme}{self.domain}"
49
50
  self.site_dir = os.path.join(os.getcwd(), self.domain)
50
51
  os.makedirs(self.site_dir, exist_ok=True)
51
52
  self.drivers = {}
@@ -83,7 +84,24 @@ class seleniumManager(metaclass=SingletonMeta):
83
84
  self.drivers[url] = driver
84
85
  driver.get(url)
85
86
  return self.drivers[url]
87
+ def normalize_url(url, base_url=None):
88
+ """
89
+ Normalize and resolve relative URLs, ensuring proper domain and format.
90
+ """
91
+ # If URL starts with the base URL repeated, remove the extra part
92
+ manager = seleniumManager(url)
93
+ base_url = manager.base_url
94
+ if url.startswith(base_url):
95
+ url = url[len(base_url):]
86
96
 
97
+ # Resolve the URL against the base URL
98
+ normalized_url = urljoin(base_url, url.split('#')[0])
99
+
100
+ # Ensure only URLs belonging to the base domain are kept
101
+ if not normalized_url.startswith(base_url):
102
+ return None
103
+
104
+ return normalized_url
87
105
  # Function to get Selenium page source
88
106
  def get_selenium_source(url):
89
107
  url_mgr = urlManager(url)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: abstract_webtools
3
- Version: 0.1.6.2
3
+ Version: 0.1.6.4
4
4
  Summary: Abstract Web Tools is a Python package that provides various utility functions for web scraping tasks. It is built on top of popular libraries such as `requests`, `BeautifulSoup`, and `urllib3` to simplify the process of fetching and parsing web content.
5
5
  Home-page: https://github.com/AbstractEndeavors/abstract_essentials/tree/main/abstract_webtools
6
6
  Author: putkoff