abstract-webtools 0.1.5.81__py3-none-any.whl → 0.1.5.83__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -84,9 +84,12 @@ from requests.adapters import HTTPAdapter
84
84
  from urllib.parse import urlparse, urljoin
85
85
  from requests.packages.urllib3.util import ssl_
86
86
  from requests.packages.urllib3.poolmanager import PoolManager
87
+ from urllib.parse import urlparse, parse_qs
88
+ import time
89
+ import requests
90
+ from .managers import *
87
91
  from abstract_utilities import get_time_stamp,get_sleep,sleep_count_down,eatInner,eatAll,eatOuter,ThreadManager
88
92
  logging.basicConfig(level=logging.INFO)
89
-
90
93
  def try_request(request):
91
94
  try:
92
95
  respnse = requests.get(url)
@@ -95,1758 +98,3 @@ def try_request(request):
95
98
  response = None
96
99
  return response
97
100
 
98
- class CipherManager:
99
- @staticmethod
100
- def get_default_ciphers()-> list:
101
- return [
102
- "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384",
103
- "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA384",
104
- "ECDHE-RSA-AES256-SHA", "ECDHE-ECDSA-AES256-SHA",
105
- "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256",
106
- "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES128-SHA256",
107
- "AES256-SHA", "AES128-SHA"
108
- ]
109
-
110
- def __init__(self,cipher_list=None):
111
- if cipher_list == None:
112
- cipher_list=self.get_default_ciphers()
113
- self.cipher_list = cipher_list
114
- self.create_list()
115
- self.ciphers_string = self.add_string_list()
116
- def add_string_list(self):
117
- if len(self.cipher_list)==0:
118
- return ''
119
- return','.join(self.cipher_list)
120
- def create_list(self):
121
- if self.cipher_list == None:
122
- self.cipher_list= []
123
- elif isinstance(self.cipher_list, str):
124
- self.cipher_list=self.cipher_list.split(',')
125
- if isinstance(self.cipher_list, str):
126
- self.cipher_list=[self.cipher_list]
127
- class CipherManagerSingleton:
128
- _instance = None
129
- @staticmethod
130
- def get_instance(cipher_list=None):
131
- if CipherManagerSingleton._instance is None:
132
- CipherManagerSingleton._instance = CipherManager(cipher_list=cipher_list)
133
- elif CipherManagerSingleton._instance.cipher_list != cipher_list:
134
- CipherManagerSingleton._instance = CipherManager(cipher_list=cipher_list)
135
- return CipherManagerSingleton._instance
136
- class SSLManager:
137
- def __init__(self, ciphers=None, ssl_options=None, certification=None):
138
- self.ciphers = ciphers or CipherManager().ciphers_string
139
- self.ssl_options = ssl_options or self.get_default_ssl_settings()
140
- self.certification = certification or ssl.CERT_REQUIRED
141
- self.ssl_context = self.get_context()
142
- def get_default_ssl_settings(self):
143
- return ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
144
- def get_context(self):
145
- return ssl_.create_urllib3_context(ciphers=self.ciphers, cert_reqs=self.certification, options=self.ssl_options)
146
-
147
- class SSLManagerSingleton:
148
- _instance = None
149
- @staticmethod
150
- def get_instance(ciphers=None, ssl_options_list=None, certification=None):
151
- if SSLManagerSingleton._instance is None:
152
- SSLManagerSingleton._instance = SSLManager(ciphers=ciphers, ssl_options_list=ssl_options_list, certification=certification)
153
- elif SSLManagerSingleton._instance.cipher_manager.ciphers_string != ciphers or SSLManagerSingleton._instance.ssl_options_list !=ssl_options_list or SSLManagerSingleton._instance.certification !=certification:
154
- SSLManagerSingleton._instance = SSLManager(ciphers=ciphers, ssl_options_list=ssl_options_list, certification=certification)
155
- return SSLManagerSingleton._instance
156
- class TLSAdapter(HTTPAdapter):
157
- def __init__(self, ssl_manager=None,ciphers=None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None):
158
- if ssl_manager == None:
159
- ssl_manager = SSLManager(ciphers=ciphers, ssl_options=ssl_options, certification=certification)
160
- self.ssl_manager = ssl_manager
161
- self.ciphers = ssl_manager.ciphers
162
- self.certification = ssl_manager.certification
163
- self.ssl_options = ssl_manager.ssl_options
164
- self.ssl_context = self.ssl_manager.ssl_context
165
- super().__init__()
166
-
167
- def init_poolmanager(self, *args, **kwargs):
168
- kwargs['ssl_context'] = self.ssl_context
169
- return super().init_poolmanager(*args, **kwargs)
170
- class TLSAdapterSingleton:
171
- _instance: Optional[TLSAdapter] = None
172
-
173
- @staticmethod
174
- def get_instance(ciphers: Optional[List[str]] = None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None) -> TLSAdapter:
175
- if (not TLSAdapterSingleton._instance) or (
176
- TLSAdapterSingleton._instance.ciphers != ciphers or
177
- TLSAdapterSingleton._instance.certification != certification or
178
- TLSAdapterSingleton._instance.ssl_options != ssl_options
179
- ):
180
- TLSAdapterSingleton._instance = TLSAdapter(ciphers=ciphers, certification=certification, ssl_options=ssl_options)
181
- return TLSAdapterSingleton._instance
182
- class UserAgentManager:
183
- def __init__(self, os=None, browser=None, version=None,user_agent=None):
184
- self.os = os or 'Windows'
185
- self.browser = browser or "Firefox"
186
- self.version = version or '42.0'
187
- self.user_agent = user_agent or self.get_user_agent()
188
- self.header = self.user_agent_header()
189
- @staticmethod
190
- def user_agent_db():
191
- from .big_user_agent_list import big_user_agent_dict
192
- return big_user_agent_dict
193
-
194
- def get_user_agent(self):
195
- ua_db = self.user_agent_db()
196
-
197
- if self.os and self.os in ua_db:
198
- os_db = ua_db[self.os]
199
- else:
200
- os_db = random.choice(list(ua_db.values()))
201
-
202
- if self.browser and self.browser in os_db:
203
- browser_db = os_db[self.browser]
204
- else:
205
- browser_db = random.choice(list(os_db.values()))
206
-
207
- if self.version and self.version in browser_db:
208
- return browser_db[self.version]
209
- else:
210
- return random.choice(list(browser_db.values()))
211
-
212
- def user_agent_header(self):
213
- return {"user-agent": self.user_agent}
214
- class UserAgentManagerSingleton:
215
- _instance = None
216
- @staticmethod
217
- def get_instance(user_agent=UserAgentManager().get_user_agent()[0]):
218
- if UserAgentManagerSingleton._instance is None:
219
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
220
- elif UserAgentManagerSingleton._instance.user_agent != user_agent:
221
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
222
- return UserAgentManagerSingleton._instance
223
- class NetworkManager:
224
- def __init__(self, user_agent_manager=None,ssl_manager=None, tls_adapter=None,user_agent=None,proxies=None,cookies=None,ciphers=None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None):
225
- if ssl_manager == None:
226
- ssl_manager = SSLManager(ciphers=ciphers, ssl_options=ssl_options, certification=certification)
227
- self.ssl_manager=ssl_manager
228
- if tls_adapter == None:
229
- tls_adapter=TLSAdapter(ssl_manager=ssl_manager,ciphers=ciphers, certification=certification, ssl_options=ssl_options)
230
- self.tls_adapter=tls_adapter
231
- self.ciphers=tls_adapter.ciphers
232
- self.certification=tls_adapter.certification
233
- self.ssl_options=tls_adapter.ssl_options
234
- self.proxies=None or {}
235
- self.cookies=cookies or "cb4c883efc59d0e990caf7508902591f4569e7bf-1617321078-0-150"
236
- class MySocketClient:
237
- def __init__(self, ip_address=None, port=None,domain=None):
238
- self.sock
239
- self.ip_address= ip_address or None
240
- self.port = port or None
241
-
242
- self.domain = domain or None
243
- def receive_data(self):
244
- chunks = []
245
- while True:
246
- chunk = self.sock.recv(4096)
247
- if chunk:
248
- chunks.append(chunk)
249
- else:
250
- break
251
- return b''.join(chunks).decode('utf-8')
252
- def _parse_socket_response_as_json(self, data, *args, **kwargs):
253
- return self._parse_json(data[data.find('{'):data.rfind('}') + 1], *args, **kwargs)
254
- def process_data(self):
255
- data = self.receive_data()
256
- return self._parse_socket_response_as_json(data)
257
- def _parse_json(self,json_string):
258
- return json.loads(json_string)
259
- def get_ip(self,domain=None):
260
- try:
261
- return self.sock.gethostbyname(domain if domain != None else self.domain)
262
- except self.sock.gaierror:
263
- return None
264
- def grt_host_name(self,ip_address=None):
265
- return self.sock.gethostbyaddr(ip_address if ip_address != None else self.ip_address)
266
- def toggle_sock(self):
267
- if self.sock != None:
268
- self.sock.close()
269
- else:
270
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
271
- if host and socket:
272
- self.sock.connect((host, port))
273
- class MySocketClient():
274
- _instance = None
275
- @staticmethod
276
- def get_instance(ip_address='local_host',port=22,domain="example.com"):
277
- if MySocketClientSingleton._instance is None:
278
- MySocketClientSingleton._instance = MySocketClient(ip_address=ip_address,port=port,domain=domain)
279
- elif MySocketClientSingleton._instance.ip_address != ip_address or MySocketClientSingleton._instance.port != port or UrlManagerSingleton._instance.domain != domain:
280
- MySocketClientSingleton._instance = MySocketClient(ip_address=ip_address,port=port,domain=domain)
281
- return MySocketClient
282
-
283
- class UrlManager:
284
- """
285
- UrlManager is a class for managing URLs, including cleaning, validating, and finding the correct version.
286
-
287
- Args:
288
- url (str or None): The URL to manage (default is None).
289
- session (requests.Session): A custom requests session (default is the requests module's session).
290
-
291
- Attributes:
292
- session (requests.Session): The requests session used for making HTTP requests.
293
- clean_urls (list): List of cleaned URL variations.
294
- url (str): The current URL.
295
- protocol (str): The protocol part of the URL (e.g., "https").
296
- domain (str): The domain part of the URL (e.g., "example.com").
297
- path (str): The path part of the URL (e.g., "/path/to/resource").
298
- query (str): The query part of the URL (e.g., "?param=value").
299
- all_urls (list): List of all URLs (not used in the provided code).
300
-
301
- Methods:
302
- url_to_pieces(url): Split a URL into its protocol, domain, path, and query components.
303
- clean_url(url): Return a list of potential URL versions with and without 'www' and 'http(s)'.
304
- get_correct_url(url): Get the correct version of the URL from possible variations.
305
- update_url(url): Update the URL and related attributes.
306
- get_domain(url): Get the domain name from a URL.
307
- url_join(url, path): Join a base URL with a path.
308
- is_valid_url(url): Check if a URL is valid.
309
- make_valid(href, url): Make a URL valid by joining it with a base URL.
310
- get_relative_href(url, href): Get the relative href URL by joining it with a base URL.
311
-
312
- Note:
313
- - The UrlManager class provides methods for managing URLs, including cleaning and validating them.
314
- - It also includes methods for joining and validating relative URLs.
315
- """
316
-
317
- def __init__(self, url=None, session=None):
318
- """
319
- Initialize a UrlManager instance.
320
-
321
- Args:
322
- url (str or None): The URL to manage (default is None).
323
- session (requests.Session): A custom requests session (default is the requests module's session).
324
- """
325
- self._url=url or 'www.example.com'
326
- self.url = url or 'www.example.com'
327
- self.session= session or requests
328
- self.clean_urls = self.clean_url(url=url)
329
- self.url = self.get_correct_url(clean_urls=self.clean_urls)
330
- url_pieces = self.url_to_pieces(url=self.url)
331
- self.protocol,self.domain,self.path,self.query=url_pieces
332
- self.all_urls = []
333
- def url_to_pieces(self, url):
334
-
335
- try:
336
- match = re.match(r'^(https?)?://?([^/]+)(/[^?]+)?(\?.+)?', url)
337
- if match:
338
- protocol = match.group(1) if match.group(1) else None
339
- domain = match.group(2) if match.group(1) else None
340
- path = match.group(3) if match.group(3) else "" # Handle None
341
- query = match.group(4) if match.group(4) else "" # Handle None
342
- except:
343
- print(f'the url {url} was not reachable')
344
- protocol,domain,path,query=None,None,"",""
345
- return protocol, domain, path, query
346
-
347
- def clean_url(self,url=None) -> list:
348
- """
349
- Given a URL, return a list with potential URL versions including with and without 'www.',
350
- and with 'http://' and 'https://'.
351
- """
352
- if url == None:
353
- url=self.url
354
- urls=[]
355
- if url:
356
- # Remove http:// or https:// prefix
357
- cleaned = url.replace("http://", "").replace("https://", "")
358
- no_subdomain = cleaned.replace("www.", "", 1)
359
-
360
- urls = [
361
- f"https://{cleaned}",
362
- f"http://{cleaned}",
363
- ]
364
-
365
- # Add variants without 'www' if it was present
366
- if cleaned != no_subdomain:
367
- urls.extend([
368
- f"https://{no_subdomain}",
369
- f"http://{no_subdomain}",
370
- ])
371
-
372
- # Add variants with 'www' if it wasn't present
373
- else:
374
- urls.extend([
375
- f"https://www.{cleaned}",
376
- f"http://www.{cleaned}",
377
- ])
378
-
379
- return urls
380
-
381
- def get_correct_url(self,url=None,clean_urls=None) -> (str or None):
382
- """
383
- Gets the correct URL from the possible variations by trying each one with an HTTP request.
384
-
385
- Args:
386
- url (str): The URL to find the correct version of.
387
- session (type(requests.Session), optional): The requests session to use for making HTTP requests.
388
- Defaults to requests.
389
-
390
- Returns:
391
- str: The correct version of the URL if found, or None if none of the variations are valid.
392
- """
393
- self.url = url
394
- if url==None and clean_urls != None:
395
- if self.url:
396
- url=self.url or clean_urls[0]
397
- if url!=None and clean_urls==None:
398
- clean_urls=self.clean_url(url)
399
- elif url==None and clean_urls==None:
400
- url=self.url
401
- clean_urls=self.clean_urls
402
- # Get the correct URL from the possible variations
403
- for url in clean_urls:
404
- try:
405
- source = self.session.get(url)
406
- return url
407
- except requests.exceptions.RequestException as e:
408
- print(e)
409
- return None
410
- def update_url(self,url):
411
- # These methods seem essential for setting up the UrlManager object.
412
- self.url = url
413
- self.clean_urls = self.clean_url()
414
- self.correct_url = self.get_correct_url()
415
- self.url =self.correct_url
416
- self.protocol,self.domain,self.path,self.query=self.url_to_pieces(url=self.url)
417
- self.all_urls = []
418
- def get_domain(self,url):
419
- return urlparse(url).netloc
420
- def url_join(self,url,path):
421
- url = eatOuter(url,['/'])
422
- path = eatInner(path,['/'])
423
- slash=''
424
- if path[0] not in ['?','&']:
425
- slash = '/'
426
- url = url+slash+path
427
- return url
428
- @property
429
- def url(self):
430
- return self._url
431
- @url.setter
432
- def url(self, new_url):
433
- self._url = new_url
434
- @staticmethod
435
- def is_valid_url(url):
436
- """
437
- Check if the given URL is valid.
438
- """
439
- parsed = urlparse(url)
440
- return bool(parsed.netloc) and bool(parsed.scheme)
441
- @staticmethod
442
- def make_valid(href,url):
443
- def is_valid_url(url):
444
- """
445
- Check if the given URL is valid.
446
- """
447
- parsed = urlparse(url)
448
- return bool(parsed.netloc) and bool(parsed.scheme)
449
- if is_valid_url(href):
450
- return href
451
- new_link=urljoin(url,href)
452
- if is_valid_url(new_link):
453
- return new_link
454
- return False
455
- @staticmethod
456
- def get_relative_href(url,href):
457
- # join the URL if it's relative (not an absolute link)
458
- href = urljoin(url, href)
459
- parsed_href = urlparse(href)
460
- # remove URL GET parameters, URL fragments, etc.
461
- href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
462
- return href
463
- def url_basename(url):
464
- path = urllib.parse.urlparse(url).path
465
- return path.strip('/').split('/')[-1]
466
-
467
-
468
- def base_url(url):
469
- return re.match(r'https?://[^?#]+/', url).group()
470
-
471
-
472
- def urljoin(base, path):
473
- if isinstance(path, bytes):
474
- path = path.decode()
475
- if not isinstance(path, str) or not path:
476
- return None
477
- if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
478
- return path
479
- if isinstance(base, bytes):
480
- base = base.decode()
481
- if not isinstance(base, str) or not re.match(
482
- r'^(?:https?:)?//', base):
483
- return None
484
- return urllib.parse.urljoin(base, path)
485
- class UrlManagerSingleton:
486
- _instance = None
487
- @staticmethod
488
- def get_instance(url=None,session=requests):
489
- if UrlManagerSingleton._instance is None:
490
- UrlManagerSingleton._instance = UrlManager(url,session=session)
491
- elif UrlManagerSingleton._instance.session != session or UrlManagerSingleton._instance.url != url:
492
- UrlManagerSingleton._instance = UrlManager(url,session=session)
493
- return UrlManagerSingleton._instance
494
- class SafeRequest:
495
- """
496
- SafeRequest is a class for making HTTP requests with error handling and retries.
497
-
498
- Args:
499
- url (str or None): The URL to make requests to (default is None).
500
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
501
- network_manager (NetworkManager or None): An instance of NetworkManager (default is None).
502
- user_agent_manager (UserAgentManager or None): An instance of UserAgentManager (default is None).
503
- ssl_manager (SSlManager or None): An instance of SSLManager (default is None).
504
- tls_adapter (TLSAdapter or None): An instance of TLSAdapter (default is None).
505
- user_agent (str or None): The user agent string to use for requests (default is None).
506
- proxies (dict or None): Proxy settings for requests (default is None).
507
- headers (dict or None): Additional headers for requests (default is None).
508
- cookies (dict or None): Cookie settings for requests (default is None).
509
- session (requests.Session or None): A custom requests session (default is None).
510
- adapter (str or None): A custom adapter for requests (default is None).
511
- protocol (str or None): The protocol to use for requests (default is 'https://').
512
- ciphers (str or None): Cipher settings for requests (default is None).
513
- auth (tuple or None): Authentication credentials (default is None).
514
- login_url (str or None): The URL for authentication (default is None).
515
- email (str or None): Email for authentication (default is None).
516
- password (str or None): Password for authentication (default is None).
517
- certification (str or None): Certification settings for requests (default is None).
518
- ssl_options (str or None): SSL options for requests (default is None).
519
- stream (bool): Whether to stream the response content (default is False).
520
- timeout (float or None): Timeout for requests (default is None).
521
- last_request_time (float or None): Timestamp of the last request (default is None).
522
- max_retries (int or None): Maximum number of retries for requests (default is None).
523
- request_wait_limit (float or None): Wait time between requests (default is None).
524
-
525
- Methods:
526
- update_url_manager(url_manager): Update the URL manager and reinitialize the SafeRequest.
527
- update_url(url): Update the URL and reinitialize the SafeRequest.
528
- re_initialize(): Reinitialize the SafeRequest with the current settings.
529
- authenticate(s, login_url=None, email=None, password=None, checkbox=None, dropdown=None): Authenticate and make a request.
530
- fetch_response(): Fetch the response from the server.
531
- initialize_session(): Initialize the requests session with custom settings.
532
- process_response_data(): Process the fetched response data.
533
- get_react_source_code(): Extract JavaScript and JSX source code from <script> tags.
534
- get_status(url=None): Get the HTTP status code of a URL.
535
- wait_between_requests(): Wait between requests based on the request_wait_limit.
536
- make_request(): Make a request and handle potential errors.
537
- try_request(): Try to make an HTTP request using the provided session.
538
-
539
- Note:
540
- - The SafeRequest class is designed for making HTTP requests with error handling and retries.
541
- - It provides methods for authentication, response handling, and error management.
542
- """
543
- def __init__(self,
544
- url=None,
545
- source_code=None,
546
- url_manager=None,
547
- network_manager=None,
548
- user_agent_manager=None,
549
- ssl_manager=None,
550
- ssl_options=None,
551
- tls_adapter=None,
552
- user_agent=None,
553
- proxies=None,
554
- headers=None,
555
- cookies=None,
556
- session=None,
557
- adapter=None,
558
- protocol=None,
559
- ciphers=None,
560
- spec_login=False,
561
- login_referer=None,
562
- login_user_agent=None,
563
- auth=None,
564
- login_url=None,
565
- email = None,
566
- password=None,
567
- checkbox=None,
568
- dropdown=None,
569
- certification=None,
570
- stream=False,
571
- timeout = None,
572
- last_request_time=None,
573
- max_retries=None,
574
- request_wait_limit=None):
575
- self.url_manager=url_manager or UrlManager(url=url)
576
- self._url=url
577
- self.url=url
578
-
579
-
580
- self.url_manager=url_manager
581
- self._url_manager = self.url_manager
582
- self.user_agent = user_agent
583
- self.user_agent_manager = user_agent_manager or UserAgentManager(user_agent=self.user_agent)
584
- self.headers= headers or self.user_agent_manager.header or {'Accept': '*/*'}
585
- self.user_agent= self.user_agent_manager.user_agent
586
- self.ciphers=ciphers or CipherManager().ciphers_string
587
- self.certification=certification
588
- self.ssl_options=ssl_options
589
- self.ssl_manager = ssl_manager or SSLManager(ciphers=self.ciphers, ssl_options=self.ssl_options, certification=self.certification)
590
- self.tls_adapter=tls_adapter or TLSAdapter(ssl_manager=self.ssl_manager,certification=self.certification,ssl_options=self.ssl_manager.ssl_options)
591
- self.network_manager= network_manager or NetworkManager(user_agent_manager=self.user_agent_manager,ssl_manager=self.ssl_manager, tls_adapter=self.tls_adapter,user_agent=user_agent,proxies=proxies,cookies=cookies,ciphers=ciphers, certification=certification, ssl_options=ssl_options)
592
- self.stream=stream
593
- self.tls_adapter=self.network_manager.tls_adapter
594
- self.ciphers=self.network_manager.ciphers
595
- self.certification=self.network_manager.certification
596
- self.ssl_options=self.network_manager.ssl_options
597
- self.proxies=self.network_manager.proxies
598
- self.timeout=timeout
599
- self.cookies=self.network_manager.cookies
600
- self.session = session or requests.session()
601
- self.auth = auth
602
- self.spec_login=spec_login
603
- self.password=password
604
- self.email = email
605
- self.checkbox=checkbox
606
- self.dropdown=dropdown
607
- self.login_url=login_url
608
- self.login_user_agent=login_user_agent
609
- self.login_referer=login_referer
610
- self.protocol=protocol or 'https://'
611
-
612
- self.stream=stream if isinstance(stream,bool) else False
613
- self.initialize_session()
614
- self.last_request_time=last_request_time
615
- self.max_retries = max_retries or 3
616
- self.request_wait_limit = request_wait_limit or 1.5
617
- self._response=None
618
- self.make_request()
619
- self.source_code = None
620
- self.source_code_bytes=None
621
- self.source_code_json = {}
622
- self.react_source_code=[]
623
- self._response_data = None
624
- self.process_response_data()
625
- def update_url_manager(self,url_manager):
626
- self.url_manager=url_manager
627
- self.re_initialize()
628
- def update_url(self,url):
629
- self.url_manager.update_url(url=url)
630
- self.re_initialize()
631
- def re_initialize(self):
632
- self._response=None
633
- self.make_request()
634
- self.source_code = None
635
- self.source_code_bytes=None
636
- self.source_code_json = {}
637
- self.react_source_code=[]
638
- self._response_data = None
639
- self.process_response_data()
640
- @property
641
- def response(self):
642
- """Lazy-loading of response."""
643
- if self._response is None:
644
- self._response = self.fetch_response()
645
-
646
- return self._response
647
- def authenticate(self,session, login_url=None, email=None, password=None,checkbox=None,dropdown=None):
648
- login_urls = login_url or [self.url_manager.url,self.url_manager.domain,self.url_manager.url_join(url=self.url_manager.domain,path='login'),self.url_manager.url_join(url=self.url_manager.domain,path='auth')]
649
- s = session
650
- if not isinstance(login_urls,list):
651
- login_urls=[login_urls]
652
- for login_url in login_urls:
653
- login_url_manager = UrlManager(login_url)
654
- login_url = login_url_manager.url
655
-
656
- r = s.get(login_url)
657
- soup = BeautifulSoup(r.content, "html.parser")
658
- # Find the token or any CSRF protection token
659
- token = soup.find('input', {'name': 'token'}).get('value') if soup.find('input', {'name': 'token'}) else None
660
- if token != None:
661
- break
662
- login_data = {}
663
- if email != None:
664
- login_data['email']=email
665
- if password != None:
666
- login_data['password'] = password
667
- if checkbox != None:
668
- login_data['checkbox'] = checkbox
669
- if dropdown != None:
670
- login_data['dropdown']=dropdown
671
- if token != None:
672
- login_data['token'] = token
673
- s.post(login_url, data=login_data)
674
- return s
675
-
676
- def fetch_response(self) -> Union[requests.Response, None]:
677
- """Actually fetches the response from the server."""
678
- # You can further adapt this method to use retries or other logic you had
679
- # in your original code, but the main goal here is to fetch and return the response
680
- return self.try_request()
681
- def spec_auth(self, session=None, email=None, password=None, login_url=None, login_referer=None, login_user_agent=None):
682
- s = session or requests.session()
683
-
684
- domain = self.url_manager.url_join(self.url_manager.get_correct_url(self.url_manager.domain),'login') if login_url is None else login_url
685
- login_url = self.url_manager.get_correct_url(url=domain)
686
-
687
- login_referer = login_referer or self.url_manager.url_join(url=login_url, path='?role=fast&to=&s=1&m=1&email=YOUR_EMAIL')
688
- login_user_agent = login_user_agent or 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0'
689
-
690
- headers = {"Referer": login_referer, 'User-Agent': login_user_agent}
691
- payload = {'email': email, 'pass': password}
692
-
693
- page = s.get(login_url)
694
- soup = BeautifulSoup(page.content, 'lxml')
695
- action_url = soup.find('form')['action']
696
- s.post(action_url, data=payload, headers=headers)
697
- return s
698
- def initialize_session(self):
699
- s = self.session
700
- if self.auth:
701
- s= self.auth
702
- elif self.spec_login:
703
- s=self.spec_auth(session=s,email=self.email, password=self.password, login_url=self.login_url, login_referer=self.login_referer, login_user_agent=self.login_user_agent)
704
- elif any([self.password, self.email, self.login_url, self.checkbox, self.dropdown]):
705
- s=self.authenticate(session=s, login_url=self.login_url, email=self.email, password=self.password, checkbox=self.checkbox, dropdown=self.dropdown)
706
- s.proxies = self.proxies
707
- s.cookies["cf_clearance"] = self.network_manager.cookies
708
- s.headers.update(self.headers)
709
- s.mount(self.protocol, self.network_manager.tls_adapter)
710
- return s
711
- def process_response_data(self):
712
- """Processes the fetched response data."""
713
- if not self.response:
714
- return # No data to process
715
-
716
- self.source_code = self.response.text
717
- self.source_code_bytes = self.response.content
718
-
719
- if self.response.headers.get('content-type') == 'application/json':
720
- data = convert_to_json(self.source_code)
721
- if data:
722
- self.source_code_json = data.get("response", data)
723
-
724
- self.get_react_source_code()
725
- def get_react_source_code(self) -> list:
726
- """
727
- Fetches the source code of the specified URL and extracts JavaScript and JSX source code (React components).
728
-
729
- Args:
730
- url (str): The URL to fetch the source code from.
731
-
732
- Returns:
733
- list: A list of strings containing JavaScript and JSX source code found in <script> tags.
734
- """
735
- if self.url_manager.url is None:
736
- return []
737
- soup = BeautifulSoup(self.source_code_bytes,"html.parser")
738
- script_tags = soup.find_all('script', type=lambda t: t and ('javascript' in t or 'jsx' in t))
739
- for script_tag in script_tags:
740
- self.react_source_code.append(script_tag.string)
741
-
742
-
743
- def get_status(url:str=None) -> int:
744
- """
745
- Gets the HTTP status code of the given URL.
746
-
747
- Args:
748
- url (str): The URL to check the status of.
749
-
750
- Returns:
751
- int: The HTTP status code of the URL, or None if the request fails.
752
- """
753
- # Get the status code of the URL
754
- return try_request(url=url).status_code
755
- def wait_between_requests(self):
756
- """
757
- Wait between requests based on the request_wait_limit.
758
- """
759
- if self.last_request_time:
760
- sleep_time = self.request_wait_limit - (get_time_stamp() - self.last_request_time)
761
- if sleep_time > 0:
762
- logging.info(f"Sleeping for {sleep_time:.2f} seconds.")
763
- get_sleep(sleep_time)
764
-
765
- def make_request(self):
766
- """
767
- Make a request and handle potential errors.
768
- """
769
- # Update the instance attributes if they are passed
770
-
771
- self.wait_between_requests()
772
- for _ in range(self.max_retries):
773
- try:
774
- self.try_request() # 10 seconds timeout
775
- if self.response:
776
- if self.response.status_code == 200:
777
- self.last_request_time = get_time_stamp()
778
- return self.response
779
- elif self.response.status_code == 429:
780
- logging.warning(f"Rate limited by {self.url_manager.url}. Retrying...")
781
- get_sleep(5) # adjust this based on the server's rate limit reset time
782
- except requests.Timeout as e:
783
- logging.error(f"Request to {cleaned_url} timed out: {e}")
784
- except requests.ConnectionError:
785
- logging.error(f"Connection error for URL {self.url_manager.url}.")
786
- except requests.Timeout:
787
- logging.error(f"Request timeout for URL {self.url_manager.url}.")
788
- except requests.RequestException as e:
789
- logging.error(f"Request exception for URL {self.url_manager.url}: {e}")
790
-
791
- logging.error(f"Failed to retrieve content from {self.url_manager.url} after {self.max_retries} retries.")
792
- return None
793
- def try_request(self) -> Union[requests.Response, None]:
794
- """
795
- Tries to make an HTTP request to the given URL using the provided session.
796
-
797
- Args:
798
- timeout (int): Timeout for the request.
799
-
800
- Returns:
801
- requests.Response or None: The response object if the request is successful, or None if the request fails.
802
- """
803
- try:
804
- return self.session.get(url=self.url_manager.url, timeout=self.timeout,stream=self.stream)
805
- except requests.exceptions.RequestException as e:
806
- print(e)
807
- return None
808
-
809
- @property
810
- def url(self):
811
- return self.url_manager.url
812
-
813
- @url.setter
814
- def url(self, new_url):
815
- self._url = new_url
816
- class SafeRequestSingleton:
817
- _instance = None
818
- @staticmethod
819
- def get_instance(url=None,headers:dict=None,max_retries=3,last_request_time=None,request_wait_limit=1.5):
820
- if SafeRequestSingleton._instance is None:
821
- SafeRequestSingleton._instance = SafeRequest(url,url_manager=UrlManagerSingleton,headers=headers,max_retries=max_retries,last_request_time=last_request_time,request_wait_limit=request_wait_limit)
822
- elif SafeRequestSingleton._instance.url != url or SafeRequestSingleton._instance.headers != headers or SafeRequestSingleton._instance.max_retries != max_retries or SafeRequestSingleton._instance.request_wait_limit != request_wait_limit:
823
- SafeRequestSingleton._instance = SafeRequest(url,url_manager=UrlManagerSingleton,headers=headers,max_retries=max_retries,last_request_time=last_request_time,request_wait_limit=request_wait_limit)
824
- return SafeRequestSingleton._instance
825
- class SoupManager:
826
- """
827
- SoupManager is a class for managing and parsing HTML source code using BeautifulSoup.
828
-
829
- Args:
830
- url (str or None): The URL to be parsed (default is None).
831
- source_code (str or None): The HTML source code (default is None).
832
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
833
- request_manager (SafeRequest or None): An instance of SafeRequest (default is None).
834
- parse_type (str): The type of parser to be used by BeautifulSoup (default is "html.parser").
835
-
836
- Methods:
837
- re_initialize(): Reinitialize the SoupManager with the current settings.
838
- update_url(url): Update the URL and reinitialize the SoupManager.
839
- update_source_code(source_code): Update the source code and reinitialize the SoupManager.
840
- update_request_manager(request_manager): Update the request manager and reinitialize the SoupManager.
841
- update_url_manager(url_manager): Update the URL manager and reinitialize the SoupManager.
842
- update_parse_type(parse_type): Update the parsing type and reinitialize the SoupManager.
843
- all_links: A property that provides access to all discovered links.
844
- _all_links_get(): A method to load all discovered links.
845
- get_all_website_links(tag="a", attr="href"): Get all URLs belonging to the same website.
846
- meta_tags: A property that provides access to all discovered meta tags.
847
- _meta_tags_get(): A method to load all discovered meta tags.
848
- get_meta_tags(): Get all meta tags in the source code.
849
- find_all(element, soup=None): Find all instances of an HTML element in the source code.
850
- get_class(class_name, soup=None): Get the specified class from the HTML source code.
851
- has_attributes(tag, *attrs): Check if an HTML tag has the specified attributes.
852
- get_find_all_with_attributes(*attrs): Find all HTML tags with specified attributes.
853
- get_all_desired_soup(tag=None, attr=None, attr_value=None): Get HTML tags based on specified criteria.
854
- extract_elements(url, tag=None, class_name=None, class_value=None): Extract portions of source code based on filters.
855
- find_all_with_attributes(class_name=None, *attrs): Find classes with associated href or src attributes.
856
- get_images(tag_name, class_name, class_value): Get images with specific class and attribute values.
857
- discover_classes_and_meta_images(tag_name, class_name_1, class_name_2, class_value, attrs): Discover classes and meta images.
858
-
859
- Note:
860
- - The SoupManager class is designed for parsing HTML source code using BeautifulSoup.
861
- - It provides various methods to extract data and discover elements within the source code.
862
- """
863
- def __init__(self,url=None,source_code=None,url_manager=None,request_manager=None, parse_type="html.parser"):
864
- self.soup=[]
865
- self.url=url
866
- if url_manager == None:
867
- url_manager=UrlManager(url=self.url)
868
- if self.url != None and url_manager != None and url_manager.url != UrlManager(url=url).url:
869
- url_manager.update_url(url=self.url)
870
- self.url_manager= url_manager
871
- self.url=self.url_manager.url
872
- if request_manager == None:
873
- request_manager = SafeRequest(url_manager=self.url_manager)
874
- self.request_manager = request_manager
875
- if self.request_manager.url_manager != self.url_manager:
876
- self.request_manager.update_url_manager(url_manager=self.url_manager)
877
- self.parse_type = parse_type
878
- if source_code != None:
879
- self.source_code = source_code
880
- else:
881
- self.source_code = self.request_manager.source_code_bytes
882
- self.soup= BeautifulSoup(self.source_code, self.parse_type)
883
- self._all_links_data = None
884
- self._meta_tags_data = None
885
- def re_initialize(self):
886
- self.soup= BeautifulSoup(self.source_code, self.parse_type)
887
- self._all_links_data = None
888
- self._meta_tags_data = None
889
- def update_url(self,url):
890
- self.url_manager.update_url(url=url)
891
- self.url=self.url_manager.url
892
- self.request_manager.update_url(url=url)
893
- self.source_code = self.request_manager.source_code_bytes
894
- self.re_initialize()
895
- def update_source_code(self,source_code):
896
- self.source_code = source_code
897
- self.re_initialize()
898
- def update_request_manager(self,request_manager):
899
- self.request_manager = request_manager
900
- self.url_manager=self.request_manager.url_manager
901
- self.url=self.url_manager.url
902
- self.source_code = self.request_manager.source_code_bytes
903
- self.re_initialize()
904
- def update_url_manager(self,url_manager):
905
- self.url_manager=url_manager
906
- self.url=self.url_manager.url
907
- self.request_manager.update_url_manager(url_manager=self.url_manager)
908
- self.source_code = self.request_manager.source_code_bytes
909
- self.re_initialize()
910
- def update_parse_type(self,parse_type):
911
- self.parse_type=parse_type
912
- self.re_initialize()
913
- @property
914
- def all_links(self):
915
- """This is a property that provides access to the _all_links_data attribute.
916
- The first time it's accessed, it will load the data."""
917
- if self._all_links_data is None:
918
- print("Loading all links for the first time...")
919
- self._all_links_data = self._all_links_get()
920
- return self._all_links_data
921
- def _all_links_get(self):
922
- """A method that loads the data (can be replaced with whatever data loading logic you have)."""
923
- return self.get_all_website_links()
924
- def get_all_website_links(self,tag="a",attr="href") -> list:
925
- """
926
- Returns all URLs that are found on the specified URL and belong to the same website.
927
-
928
- Args:
929
- url (str): The URL to search for links.
930
-
931
- Returns:
932
- list: A list of URLs that belong to the same website as the specified URL.
933
- """
934
- all_urls=[self.url_manager.url]
935
- domain = self.url_manager.domain
936
- all_desired=self.get_all_desired_soup(tag=tag,attr=attr)
937
- for tag in all_desired:
938
- href = tag.attrs.get(attr)
939
- if href == "" or href is None:
940
- # href empty tag
941
- continue
942
- href=self.url_manager.get_relative_href(self.url_manager.url,href)
943
- if not self.url_manager.is_valid_url(href):
944
- # not a valid URL
945
- continue
946
- if href in all_urls:
947
- # already in the set
948
- continue
949
- if domain not in href:
950
- # external link
951
- continue
952
- all_urls.append(href)
953
-
954
- return all_urls
955
-
956
-
957
- @property
958
- def meta_tags(self):
959
- """This is a property that provides access to the _all_links_data attribute.
960
- The first time it's accessed, it will load the data."""
961
- if self._meta_tags_data is None:
962
- print("Loading all links for the first time...")
963
- self._meta_tags_data = self._all_links_get()
964
- return self._meta_tags_data
965
- def _meta_tags_get(self):
966
- """A method that loads the data (can be replaced with whatever data loading logic you have)."""
967
- return self.get_meta_tags()
968
- def get_meta_tags(self):
969
- tags = self.find_all("meta")
970
- for meta_tag in tags:
971
- for attr, values in meta_tag.attrs.items():
972
- if attr not in self.meta_tags:
973
- self.meta_tags[attr] = []
974
- if values not in self.meta_tags[attr]:
975
- self.meta_tags[attr].append(values)
976
-
977
-
978
- def find_all(self,element,soup=None):
979
- soup = self.soup if soup == None else soup
980
- return soup.find_all(element)
981
- def get_class(self,class_name,soup=None):
982
- soup = self.soup if soup == None else soup
983
- return soup.get(class_name)
984
- @staticmethod
985
- def has_attributes(tag, *attrs):
986
- return any(tag.has_attr(attr) for attr in attrs)
987
- def get_find_all_with_attributes(self, *attrs):
988
- return self.soup.find_all(lambda t: self.has_attributes(t, *attrs))
989
- def find_tags_by_attributes(self, tag: str = None, attr: str = None, attr_values: List[str] = None) ->List:
990
- if not tag:
991
- tags = self.soup.find_all(True) # get all tags
992
- else:
993
- tags = self.soup.find_all(tag) # get specific tags
994
-
995
- extracted_tags = []
996
- for t in tags:
997
- if attr:
998
- attribute_value = t.get(attr)
999
- if not attribute_value: # skip tags without the desired attribute
1000
- continue
1001
- if attr_values and not any(value in attribute_value for value in attr_values): # skip tags without any of the desired attribute values
1002
- continue
1003
- extracted_tags.append(t)
1004
- return extracted_tags
1005
-
1006
-
1007
- def extract_elements(self,url:str=None, tag:str=None, class_name:str=None, class_value:str=None) -> list:
1008
- """
1009
- Extracts portions of the source code from the specified URL based on provided filters.
1010
-
1011
- Args:
1012
- url (str): The URL to fetch the source code from.
1013
- element_type (str, optional): The HTML element type to filter by. Defaults to None.
1014
- attribute_name (str, optional): The attribute name to filter by. Defaults to None.
1015
- class_name (str, optional): The class name to filter by. Defaults to None.
1016
-
1017
- Returns:
1018
- list: A list of strings containing portions of the source code that match the provided filters.
1019
- """
1020
- elements = []
1021
- # If no filters are provided, return the entire source code
1022
- if not tag and not class_name and not class_value:
1023
- elements.append(str(self.soup))
1024
- return elements
1025
- # Find elements based on the filters provided
1026
- if tag:
1027
- elements.extend([str(tags) for tags in self.get_all_desired(tag)])
1028
- if class_name:
1029
- elements.extend([str(tags) for tags in self.get_all_desired(tag={class_name: True})])
1030
- if class_value:
1031
- elements.extend([str(tags) for tags in self.get_all_desired(class_name=class_name)])
1032
- return elements
1033
- def find_all_with_attributes(self, class_name=None, *attrs):
1034
- """
1035
- Discovers classes in the HTML content of the provided URL
1036
- that have associated href or src attributes.
1037
-
1038
- Args:
1039
- base_url (str): The URL from which to discover classes.
1040
-
1041
- Returns:
1042
- set: A set of unique class names.
1043
- """
1044
-
1045
-
1046
- unique_classes = set()
1047
- for tag in self.get_find_all_with_attributes(*attrs):
1048
- class_list = self.get_class(class_name=class_name, soup=tag)
1049
- unique_classes.update(class_list)
1050
- return unique_classes
1051
- def get_images(self, tag_name, class_name, class_value):
1052
- images = []
1053
- for tag in self.soup.find_all(tag_name):
1054
- if class_name in tag.attrs and tag.attrs[class_name] == class_value:
1055
- content = tag.attrs.get('content', '')
1056
- if content:
1057
- images.append(content)
1058
- return images
1059
- def extract_text_sections(self) -> list:
1060
- """
1061
- Extract all sections of text from an HTML content using BeautifulSoup.
1062
-
1063
- Args:
1064
- html_content (str): The HTML content to be parsed.
1065
-
1066
- Returns:
1067
- list: A list containing all sections of text.
1068
- """
1069
- # Remove any script or style elements to avoid extracting JavaScript or CSS code
1070
- for script in self.soup(['script', 'style']):
1071
- script.decompose()
1072
-
1073
- # Extract text from the remaining elements
1074
- text_sections = self.soup.stripped_strings
1075
- return [text for text in text_sections if text]
1076
- def discover_classes_and_meta_images(self, tag_name, class_name_1, class_name_2, class_value, attrs):
1077
- """
1078
- Discovers classes in the HTML content of the provided URL
1079
- that have associated href or src attributes. Also, fetches
1080
- image references from meta tags.
1081
-
1082
- Args:
1083
- base_url (str): The URL from which to discover classes and meta images.
1084
-
1085
- Returns:
1086
- tuple: A set of unique class names and a list of meta images.
1087
- """
1088
-
1089
- unique_classes = self.find_all_with_attributes(class_name=class_name_1, *attrs)
1090
- images = self.get_images(tag_name=tag_name, class_name=class_name_2, class_value=class_value)
1091
- return unique_classes, images
1092
- def get_all_tags_and_attribute_names(self):
1093
- tag_names = set() # Using a set to ensure uniqueness
1094
- attribute_names = set()
1095
- get_all = self.find_tags_by_attributes()
1096
- for tag in get_all: # True matches all tags
1097
- tag_names.add(tag.name)
1098
- for attr in tag.attrs:
1099
- attribute_names.add(attr)
1100
- tag_names_list = list(tag_names)
1101
- attribute_names_list = list(attribute_names)
1102
- return {"tags":tag_names_list,"attributes":attribute_names_list}
1103
-
1104
- def get_all_attribute_values(self):
1105
- attribute_values={}
1106
- get_all = self.find_tags_by_attributes()
1107
- for tag in get_all: # True matches all tags
1108
- for attr, value in tag.attrs.items():
1109
- # If attribute is not yet in the dictionary, add it with an empty set
1110
- if attr not in attribute_values:
1111
- attribute_values[attr] = set()
1112
- # If the attribute value is a list (e.g., class), extend the set with the list
1113
- if isinstance(value, list):
1114
- attribute_values[attr].update(value)
1115
- else:
1116
- attribute_values[attr].add(value)
1117
- for attr, values in attribute_values.items():
1118
- attribute_values[attr] = list(values)
1119
- return attribute_values
1120
-
1121
- @property
1122
- def url(self):
1123
- return self._url
1124
- @url.setter
1125
- def url(self, new_url):
1126
- self._url = new_url
1127
-
1128
- class SoupManagerSingleton():
1129
- _instance = None
1130
- @staticmethod
1131
- def get_instance(url_manager,request_manager,parse_type="html.parser",source_code=None):
1132
- if SoupManagerSingleton._instance is None:
1133
- SoupManagerSingleton._instance = SoupManager(url_manager,request_manager,parse_type=parse_type,source_code=source_code)
1134
- elif parse_type != SoupManagerSingleton._instance.parse_type or source_code != SoupManagerSingleton._instance.source_code:
1135
- SoupManagerSingleton._instance = SoupManager(url_manager,request_manager,parse_type=parse_type,source_code=source_code)
1136
- return SoupManagerSingleton._instance
1137
- class VideoDownloader:
1138
- """
1139
- VideoDownloader is a class for downloading videos from URLs using YouTube-DL.
1140
-
1141
- Args:
1142
- link (str or list): The URL(s) of the video(s) to be downloaded.
1143
- temp_directory (str or None): The directory to store temporary video files (default is None, uses video_directory/temp_files).
1144
- video_directory (str or None): The directory to store downloaded videos (default is None, uses 'videos' in the current working directory).
1145
- remove_existing (bool): Whether to remove existing video files with the same name (default is True).
1146
-
1147
- Methods:
1148
- count_outliers(speed, threshold): Count speed outliers below the threshold.
1149
- filter_outliers(speeds): Filter out speed outliers in the list of speeds.
1150
- remove_temps(file_name): Remove temporary video files based on the file name.
1151
- move_video(): Move the downloaded video to the final directory.
1152
- yt_dlp_downloader(url, ydl_opts={}, download=True): Download video information using YouTube-DL.
1153
- progress_callback(d): Callback function to monitor download progress.
1154
- download(): Download video(s) based on the provided URL(s).
1155
- monitor(): Monitor the download progress.
1156
- start(): Start the download and monitoring threads.
1157
-
1158
- Note:
1159
- - The VideoDownloader class uses YouTube-DL to download videos.
1160
- - It allows downloading from multiple URLs.
1161
- - You need to have YouTube-DL installed to use this class.
1162
- """
1163
- def __init__(self, link,temp_directory=None,video_directory=None,remove_existing=True):
1164
- if video_directory==None:
1165
- video_directory=os.path.join(os.getcwd(),'videos')
1166
- if temp_directory == None:
1167
- temp_directory=os.path.join(video_directory,'temp_files')
1168
- self.thread_manager = ThreadManager()
1169
- self.pause_event = self.thread_manager.add_thread('pause_event')
1170
- self.link = link
1171
- self.temp_directory = temp_directory
1172
- self.video_directory = video_directory
1173
- self.remove_existing=remove_existing
1174
- self.video_urls=self.link if isinstance(self.link,list) else [self.link]
1175
- self.starttime = None
1176
- self.downloaded = 0
1177
- self.time_interval=60
1178
- self.monitoring=True
1179
- self.temp_file_name = None
1180
- self.file_name = None
1181
- self.dl_speed = None
1182
- self.dl_eta=None
1183
- self.total_bytes_est=None
1184
- self.percent_speed=None
1185
- self.percent=None
1186
- self.speed_track = []
1187
- self.video_url=None
1188
- self.last_checked = get_time_stamp()
1189
- self.num=0
1190
- self.start()
1191
- def count_outliers(self,speed,threshold):
1192
- if speed < threshold:
1193
- self.outlier_count+=1
1194
- else:
1195
- self.outlier_count=0
1196
- def filter_outliers(self,speeds):
1197
- # Step 1: Compute initial average
1198
- initial_avg = sum(speeds) / len(speeds)
1199
-
1200
- # Step 2: Remove speeds 25% under the average
1201
- threshold = initial_avg * 0.75 # 25% under average
1202
- filtered_speeds = [speed for speed in speeds if speed >= threshold]
1203
-
1204
- # Step 3: Compute the new average of the filtered list
1205
- if filtered_speeds: # Ensure the list is not empty
1206
- self.count_outliers(speeds[-1],threshold)
1207
- return filtered_speeds
1208
- else:
1209
- # This can happen if all values are outliers, it's up to you how to handle it
1210
- self.outlier_count=0
1211
- return speeds
1212
- def remove_temps(self,file_name):
1213
- for temp_vid in os.listdir(self.temp_directory):
1214
- if len(file_name)<=len(temp_vid):
1215
- if temp_vid[:len(file_name)] == file_name:
1216
- os.remove(os.path.join(self.temp_directory,temp_vid))
1217
- print(f"removing {temp_vid} from {self.temp_directory}")
1218
- def move_video(self):
1219
- if os.path.exists(self.temp_file_path):
1220
- shutil.move(self.temp_file_path, self.video_directory)
1221
- print(f"moving {self.file_name} from {self.temp_directory} to {self.video_directory}")
1222
- self.remove_temps(self.file_name)
1223
- return True
1224
- if os.path.exists(self.complete_file_path):
1225
- print(f"{self.file_name} already existed in {self.video_directory}; removing it from {self.temp_directory}")
1226
- self.remove_temps(self.file_name)
1227
- return True
1228
- return False
1229
- def yt_dlp_downloader(self,url,ydl_opts={},download=True):
1230
- try:
1231
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
1232
- self.info_dict=ydl.extract_info(url=url, download=download)
1233
- return True
1234
- except:
1235
- return False
1236
- def progress_callback(self, d):
1237
- self.status_dict = d
1238
- keys = ['status',
1239
- 'downloaded_bytes',
1240
- 'fragment_index',
1241
- 'fragment_count',
1242
- 'filename',
1243
- 'tmpfilename',
1244
- 'max_progress',
1245
- 'progress_idx',
1246
- 'elapsed',
1247
- 'total_bytes_estimate',
1248
- 'speed',
1249
- 'eta',
1250
- '_eta_str',
1251
- '_speed_str',
1252
- '_percent_str',
1253
- '_total_bytes_str',
1254
- '_total_bytes_estimate_str',
1255
- '_downloaded_bytes_str',
1256
- '_elapsed_str',
1257
- '_default_template']
1258
- if self.status_dict['status'] == 'finished':
1259
- print("Done downloading, moving video to final directory...")
1260
- self.move_video()
1261
- return
1262
- if get_time_stamp()-self.last_checked>5:
1263
- print(self.status_dict['_default_template'])
1264
- self.last_checked = get_time_stamp()
1265
- if (get_time_stamp()-self.start_time/5)>6:
1266
- self.speed_track.append(self.status_dict['speed'])
1267
- self.speed_track=self.filter_outliers(self.speed_track)
1268
-
1269
- def download(self):
1270
- if not os.path.exists(self.video_directory):
1271
- os.makedirs(self.video_directory,exist_ok=True)
1272
- if not os.path.exists(self.temp_directory):
1273
- os.makedirs(self.temp_directory,exist_ok=True)
1274
- for self.num,video_url in enumerate(self.video_urls):
1275
- if video_url != self.video_url or self.video_url == None:
1276
- self.video_url=video_url
1277
- self.info_dict=None
1278
- result = self.yt_dlp_downloader(url=self.video_url,ydl_opts={'quiet': True, 'no_warnings': True},download=False)
1279
- if self.info_dict != None and result:
1280
- self.start_time = get_time_stamp()
1281
- self.downloaded = 0
1282
- self.video_title = self.info_dict.get('title', None)
1283
- self.video_ext = self.info_dict.get('ext', 'mp4')
1284
- self.file_name =f"{self.video_title}.{self.video_ext}"
1285
- self.temp_file_path = os.path.join(self.temp_directory, self.file_name)
1286
- self.complete_file_path = os.path.join(self.video_directory, self.file_name)
1287
- if not self.move_video():
1288
- self.dl_speed = []
1289
- self.percent=None
1290
- self.dl_eta=None
1291
- self.total_bytes_est=None
1292
- self.percent_speed=None
1293
- self.speed_track = []
1294
- self.outlier_count=0
1295
- ydl_opts = {
1296
- 'outtmpl': self.temp_file_path,
1297
- 'noprogress':True,
1298
- 'progress_hooks': [self.progress_callback]
1299
- }
1300
-
1301
-
1302
- print("Starting download...") # Check if this point in code is reached
1303
- result = self.yt_dlp_downloader(url=self.video_url,ydl_opts=ydl_opts,download=True)
1304
- if result:
1305
- print("Download finished!") # Check if download completes
1306
- else:
1307
- print(f'error downloding {self.video_url}')
1308
- self.move_video()
1309
- else:
1310
- print(f"The video from {self.video_url} already exists in the directory {self.video_directory}. Skipping download.")
1311
- else:
1312
- print(f"could not find video info from {self.video_url} Skipping download.")
1313
- if self.num==len(self.video_urls)-1:
1314
- self.monitoring=False
1315
- self.time_interval=0
1316
-
1317
- def monitor(self):
1318
- while self.monitoring:
1319
- self.thread_manager.wait(name='pause_event',n=self.time_interval)# check every minute
1320
- if self.monitoring:
1321
- if 'eta' in self.status_dict:
1322
- if self.outlier_count>=3 and (self.status_dict['eta']/60)>10:
1323
- self.start()
1324
-
1325
- def start(self):
1326
- download_thread = self.thread_manager.add_thread(name='download_thread',target=self.download)
1327
- monitor_thread = self.thread_manager.add_thread(name='monitor_thread',target_function=self.monitor)
1328
- self.thread_manager.start(name='download_thread')
1329
- self.thread_manager.start(name='monitor_thread')
1330
- self.thread_manager.join(name='download_thread')
1331
- self.thread_manager.join(name='monitor_thread')
1332
- class VideoDownloaderSingleton():
1333
- _instance = None
1334
- @staticmethod
1335
- def get_instance(url_manager,request_manager,title=None,video_extention='mp4',download_directory=os.getcwd(),user_agent=None,download=True,get_info=False):
1336
- if VideoDownloaderSingleton._instance is None:
1337
- VideoDownloaderSingleton._instance = VideoDownloader(url=url,title=title,video_extention=video_extention,download_directory=download_directory,download=download,get_info=get_info,user_agent=user_agent)
1338
- elif VideoDownloaderSingleton._instance.title != title or video_extention != VideoDownloaderSingleton._instance.video_extention or url != VideoDownloaderSingleton._instance.url or download_directory != VideoDownloaderSingleton._instance.download_directory or user_agent != VideoDownloaderSingleton._instance.user_agent:
1339
- VideoDownloaderSingleton._instance = VideoDownloader(url=url,title=title,video_extention=video_extention,download_directory=download_directory,download=download,get_info=get_info,user_agent=user_agent)
1340
- return VideoDownloaderSingleton._instance
1341
-
1342
- class LinkManager:
1343
- """
1344
- LinkManager is a class for managing and extracting links and image links from a web page.
1345
-
1346
- Args:
1347
- url (str): The URL of the web page (default is "https://example.com").
1348
- source_code (str or None): The source code of the web page (default is None).
1349
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
1350
- request_manager (SafeRequest or None): An instance of SafeRequest (default is None).
1351
- soup_manager (SoupManager or None): An instance of SoupManager (default is None).
1352
- image_link_tags (str): HTML tags to identify image links (default is 'img').
1353
- img_link_attrs (str): HTML attributes to identify image link URLs (default is 'src').
1354
- link_tags (str): HTML tags to identify links (default is 'a').
1355
- link_attrs (str): HTML attributes to identify link URLs (default is 'href').
1356
- strict_order_tags (bool): Flag to indicate if tags and attributes should be matched strictly (default is False).
1357
- img_attr_value_desired (list or None): Desired attribute values for image links (default is None).
1358
- img_attr_value_undesired (list or None): Undesired attribute values for image links (default is None).
1359
- link_attr_value_desired (list or None): Desired attribute values for links (default is None).
1360
- link_attr_value_undesired (list or None): Undesired attribute values for links (default is None).
1361
- associated_data_attr (list): HTML attributes to associate with the extracted links (default is ["data-title", 'alt', 'title']).
1362
- get_img (list): HTML attributes used to identify associated images (default is ["data-title", 'alt', 'title']).
1363
-
1364
- Methods:
1365
- re_initialize(): Reinitialize the LinkManager with the current settings.
1366
- update_url_manager(url_manager): Update the URL manager with a new instance.
1367
- update_url(url): Update the URL and reinitialize the LinkManager.
1368
- update_source_code(source_code): Update the source code and reinitialize the LinkManager.
1369
- update_soup_manager(soup_manager): Update the SoupManager and reinitialize the LinkManager.
1370
- update_desired(...): Update the desired settings and reinitialize the LinkManager.
1371
- find_all_desired(...): Find all desired links or image links based on the specified criteria.
1372
- find_all_domain(): Find all unique domain names in the extracted links.
1373
-
1374
- Note:
1375
- - The LinkManager class helps manage and extract links and image links from web pages.
1376
- - The class provides flexibility in specifying criteria for link extraction.
1377
- """
1378
- def __init__(self,url="https://example.com",source_code=None,url_manager=None,request_manager=None,soup_manager=None,image_link_tags='img',img_link_attrs='src',link_tags='a',link_attrs='href',strict_order_tags=False,img_attr_value_desired=None,img_attr_value_undesired=None,link_attr_value_desired=None,link_attr_value_undesired=None,associated_data_attr=["data-title",'alt','title'],get_img=["data-title",'alt','title']):
1379
- if url_manager==None:
1380
- url_manager=UrlManager(url=url)
1381
- self.url_manager= url_manager
1382
- self.url=self.url_manager.url
1383
- if request_manager==None:
1384
- request_manager = SafeRequest(url_manager=self.url_manager)
1385
- self.request_manager=request_manager
1386
- if soup_manager == None:
1387
- soup_manager = SoupManager(url_manager=self.url_manager,request_manager=self.request_manager)
1388
- self.soup_manager = soup_manager
1389
- if source_code != None:
1390
- self.source_code=source_code
1391
- else:
1392
- self.source_code=self.request_manager.source_code_bytes
1393
- if self.source_code != self.soup_manager.source_code:
1394
- self.soup_manager.update_source_code(source_code=self.source_code)
1395
- self.strict_order_tags=strict_order_tags
1396
- self.image_link_tags=image_link_tags
1397
- self.img_link_attrs=img_link_attrs
1398
- self.link_tags=link_tags
1399
- self.link_attrs=link_attrs
1400
- self.img_attr_value_desired=img_attr_value_desired
1401
- self.img_attr_value_undesired=img_attr_value_undesired
1402
- self.link_attr_value_desired=link_attr_value_desired
1403
- self.link_attr_value_undesired=link_attr_value_undesired
1404
- self.associated_data_attr=associated_data_attr
1405
- self.get_img=get_img
1406
- self.all_desired_image_links=self.find_all_desired_links(tag=self.image_link_tags,
1407
- attr=self.img_link_attrs,
1408
- attr_value_desired=self.img_attr_value_desired,
1409
- attr_value_undesired=self.img_attr_value_undesired)
1410
- self.all_desired_links=self.find_all_desired_links(tag=self.link_tags,
1411
- attr=self.link_attrs,
1412
- attr_value_desired=self.link_attr_value_desired,
1413
- attr_value_undesired=self.link_attr_value_undesired,
1414
- associated_data_attr=self.associated_data_attr,
1415
- get_img=get_img)
1416
- def re_initialize(self):
1417
- self.all_desired_image_links=self.find_all_desired_links(tag=self.image_link_tags,attr=self.img_link_attrs,strict_order_tags=self.strict_order_tags,attr_value_desired=self.img_attr_value_desired,attr_value_undesired=self.img_attr_value_undesired)
1418
- self.all_desired_links=self.find_all_desired_links(tag=self.link_tags,attr=self.link_attrs,strict_order_tags=self.strict_order_tags,attr_value_desired=self.link_attr_value_desired,attr_value_undesired=self.link_attr_value_undesired,associated_data_attr=self.associated_data_attr,get_img=self.get_img)
1419
- def update_url_manager(self,url_manager):
1420
- self.url_manager=url_manager
1421
- self.url=self.url_manager.url
1422
- self.request_manager.update_url_manager(url_manager=self.url_manager)
1423
- self.soup_manager.update_url_manager(url_manager=self.url_manager)
1424
- self.source_code=self.soup_manager.source_code
1425
- self.re_initialize()
1426
- def update_url(self,url):
1427
- self.url=url
1428
- self.url_manager.update_url(url=self.url)
1429
- self.url=self.url_manager.url
1430
- self.request_manager.update_url(url=self.url)
1431
- self.soup_manager.update_url(url=self.url)
1432
- self.source_code=self.soup_manager.source_code
1433
- self.re_initialize()
1434
- def update_source_code(self,source_code):
1435
- self.source_code=source_code
1436
- if self.source_code != self.soup_manager.source_code:
1437
- self.soup_manager.update_source_code(source_code=self.source_code)
1438
- self.re_initialize()
1439
- def update_soup_manager(self,soup_manager):
1440
- self.soup_manager=soup_manager
1441
- self.source_code=self.soup_manager.source_code
1442
- self.re_initialize()
1443
- def update_desired(self,img_attr_value_desired=None,img_attr_value_undesired=None,link_attr_value_desired=None,link_attr_value_undesired=None,image_link_tags=None,img_link_attrs=None,link_tags=None,link_attrs=None,strict_order_tags=None,associated_data_attr=None,get_img=None):
1444
- self.strict_order_tags = strict_order_tags or self.strict_order_tags
1445
- self.img_attr_value_desired=img_attr_value_desired or self.img_attr_value_desired
1446
- self.img_attr_value_undesired=img_attr_value_undesired or self.img_attr_value_undesired
1447
- self.link_attr_value_desired=link_attr_value_desired or self.link_attr_value_desired
1448
- self.link_attr_value_undesired=link_attr_value_undesired or self.link_attr_value_undesired
1449
- self.image_link_tags=image_link_tags or self.image_link_tags
1450
- self.img_link_attrs=img_link_attrs or self.img_link_attrs
1451
- self.link_tags=link_tags or self.link_tags
1452
- self.link_attrs=link_attrs or self.link_attrs
1453
- self.associated_data_attr=associated_data_attr or self.associated_data_attr
1454
- self.get_img=get_img or self.get_img
1455
- self.re_initialize()
1456
- def find_all_desired(self,tag='img',attr='src',strict_order_tags=False,attr_value_desired=None,attr_value_undesired=None,associated_data_attr=None,get_img=None):
1457
- def make_list(obj):
1458
- if isinstance(obj,list) or obj==None:
1459
- return obj
1460
- return [obj]
1461
- def get_desired_value(attr,attr_value_desired=None,attr_value_undesired=None):
1462
- if attr_value_desired:
1463
- for value in attr_value_desired:
1464
- if value not in attr:
1465
- return False
1466
- if attr_value_undesired:
1467
- for value in attr_value_undesired:
1468
- if value in attr:
1469
- return False
1470
- return True
1471
- attr_value_desired,attr_value_undesired,associated_data_attr,tags,attribs=make_list(attr_value_desired),make_list(attr_value_undesired),make_list(associated_data_attr),make_list(tag),make_list(attr)
1472
- desired_ls = []
1473
- assiciated_data=[]
1474
- for i,tag in enumerate(tags):
1475
- attribs_list=attribs
1476
- if strict_order_tags:
1477
- if len(attribs)<=i:
1478
- attribs_list=[None]
1479
- else:
1480
- attribs_list=make_list(attribs[i])
1481
- for attr in attribs_list:
1482
- for component in self.soup_manager.soup.find_all(tag):
1483
- if attr in component.attrs and get_desired_value(attr=component[attr],attr_value_desired=attr_value_desired,attr_value_undesired=attr_value_undesired):
1484
- if component[attr] not in desired_ls:
1485
- desired_ls.append(component[attr])
1486
- assiciated_data.append({"value":component[attr]})
1487
- if associated_data_attr:
1488
- for data in associated_data_attr:
1489
- if data in component.attrs:
1490
- assiciated_data[-1][data]=component.attrs[data]
1491
- if get_img and component.attrs[data]:
1492
- if data in get_img and len(component.attrs[data])!=0:
1493
- for each in self.soup_manager.soup.find_all('img'):
1494
- if 'alt' in each.attrs:
1495
- if each.attrs['alt'] == component.attrs[data] and 'src' in each.attrs:
1496
- assiciated_data[-1]['image']=each.attrs['src']
1497
- desired_ls.append(assiciated_data)
1498
- return desired_ls
1499
- def find_all_domain(self):
1500
- domains_ls=[self.url_manager.protocol+'://'+self.url_manager.domain]
1501
- for desired in all_desired[:-1]:
1502
- if url_manager.is_valid_url(desired):
1503
- parse = urlparse(desired)
1504
- domain = parse.scheme+'://'+parse.netloc
1505
- if domain not in domains_ls:
1506
- domains_ls.append(domain)
1507
- def find_all_desired_links(self,tag='img', attr='src',attr_value_desired=None,strict_order_tags=False,attr_value_undesired=None,associated_data_attr=None,all_desired=None,get_img=None):
1508
- all_desired = all_desired or self.find_all_desired(tag=tag,attr=attr,strict_order_tags=strict_order_tags,attr_value_desired=attr_value_desired,attr_value_undesired=attr_value_undesired,associated_data_attr=associated_data_attr,get_img=get_img)
1509
- assiciated_attrs = all_desired[-1]
1510
- valid_assiciated_attrs = []
1511
- desired_links=[]
1512
- for i,attr in enumerate(all_desired[:-1]):
1513
- valid_attr=self.url_manager.make_valid(attr,self.url_manager.protocol+'://'+self.url_manager.domain)
1514
- if valid_attr:
1515
- desired_links.append(valid_attr)
1516
- valid_assiciated_attrs.append(assiciated_attrs[i])
1517
- valid_assiciated_attrs[-1]["link"]=valid_attr
1518
- desired_links.append(valid_assiciated_attrs)
1519
- return desired_links
1520
-
1521
- def CrawlManager():
1522
- def __init__(self,url=None,source_code=None,parse_type="html.parser"):
1523
- self.url=url
1524
- self.source_code=source_code
1525
- self.parse_type=parse_type
1526
- get_new_source_and_url(self,url)
1527
- def get_new_source_and_url(self,url=None):
1528
- if url == None:
1529
- url = self.url
1530
- self.response = self.response_manager.response
1531
- self.source_code=self.response_manager.source_code
1532
- def get_classes_and_meta_info():
1533
- class_name_1,class_name_2, class_value = 'meta','class','property','og:image'
1534
- attrs = 'href','src'
1535
- unique_classes, images=discover_classes_and_images(self,tag_name,class_name_1,class_name_2,class_value,attrs)
1536
- return unique_classes, images
1537
- def extract_links_from_url(self):
1538
- """
1539
- Extracts all href and src links from a given URL's source code.
1540
-
1541
- Args:
1542
- base_url (str): The URL from which to extract links.
1543
-
1544
- Returns:
1545
- dict: Dictionary containing image links and external links under the parent page.
1546
- """
1547
- agg_js = {'images':[],'external_links':[]}
1548
-
1549
- if self.response != None:
1550
- attrs = 'href','src'
1551
- href_links,src_links='',''
1552
- links = [href_links,src_links]
1553
- for i,each in enumerate(attrs):
1554
- links[i]= [a[attr[i]] for a in get_find_all_with_attributes(self, attrs[i])]
1555
- # Convert all links to absolute links
1556
- absolute_links = [(url, link) for link in links[0] + links[1]]
1557
- # Separate images and external links
1558
- images = [link for link in absolute_links if link.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.svg', '.webp'))]
1559
- external_links = [link for link in absolute_links if urlparse(link).netloc != urlparse(url).netloc]
1560
- agg_js['images']=images
1561
- agg_js['external_links']=external_links
1562
-
1563
- return agg_js
1564
-
1565
-
1566
- def correct_xml(xml_string):
1567
- # Parse the XML string
1568
- root = ET.fromstring(xml_string)
1569
-
1570
- # Loop through each <image:loc> element and correct its text if needed
1571
- for image_loc in root.findall(".//image:loc", namespaces={'image': 'http://www.google.com/schemas/sitemap-image/1.1'}):
1572
- # Replace '&' with '&amp;' in the element's text
1573
- if '&' in image_loc.text:
1574
- image_loc.text = image_loc.text.replace('&', '&amp;')
1575
-
1576
- # Convert the corrected XML back to string
1577
- corrected_xml = ET.tostring(root, encoding='utf-8').decode('utf-8')
1578
- return corrected_xml
1579
-
1580
-
1581
- def determine_values(self):
1582
- # This is just a mockup. In a real application, you'd analyze the URL or its content.
1583
-
1584
- # Assuming a blog site
1585
- if 'blog' in self.url:
1586
- if '2023' in self.url: # Assuming it's a current year article
1587
- return ('weekly', '0.8')
1588
- else:
1589
- return ('monthly', '0.6')
1590
- elif 'contact' in self.url:
1591
- return ('yearly', '0.3')
1592
- else: # Homepage or main categories
1593
- return ('weekly', '1.0')
1594
- def crawl(url, max_depth=3, depth=1):
1595
-
1596
- if depth > max_depth:
1597
- return []
1598
-
1599
- if url in visited:
1600
- return []
1601
-
1602
- visited.add(url)
1603
-
1604
- try:
1605
-
1606
- links = [a['href'] for a in self.soup.find_all('a', href=True)]
1607
- valid_links = []
1608
-
1609
- for link in links:
1610
- parsed_link = urlparse(link)
1611
- base_url = "{}://{}".format(parsed_link.scheme, parsed_link.netloc)
1612
-
1613
- if base_url == url: # Avoiding external URLs
1614
- final_link = urljoin(url, parsed_link.path)
1615
- if final_link not in valid_links:
1616
- valid_links.append(final_link)
1617
-
1618
- for link in valid_links:
1619
- crawl(link, max_depth, depth+1)
1620
-
1621
- return valid_links
1622
-
1623
- except Exception as e:
1624
- print(f"Error crawling {url}: {e}")
1625
- return []
1626
-
1627
-
1628
- # Define or import required functions here, like get_all_website_links, determine_values,
1629
- # discover_classes_and_meta_images, and extract_links_from_url.
1630
- def get_meta_info(self):
1631
-
1632
- meta_info = {}
1633
- # Fetch the title if available
1634
- title_tag = parse_title()
1635
- if title_tag:
1636
- meta_info["title"] = title_tag
1637
- # Fetch meta tags
1638
- for meta_tag in soup.find_all('meta'):
1639
- name = meta_tag.get('name') or meta_tag.get('property')
1640
- if name:
1641
- content = meta_tag.get('content')
1642
- if content:
1643
- meta_info[name] = content
1644
-
1645
- return meta_info
1646
- def generate_sitemap(self,domain):
1647
-
1648
- with open('sitemap.xml', 'w', encoding='utf-8') as f:
1649
- string = '<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">\n'
1650
-
1651
- for url in self.all_site_links:
1652
- string += f' <url>\n <loc>{url}</loc>\n'
1653
- preprocess=[]
1654
- self.get_new_source_and_url(url=url)
1655
- links = extract_links_from_url(url)
1656
-
1657
- for img in links['images']:
1658
- if str(img).lower() not in preprocess:
1659
- try:
1660
- escaped_img = img.replace('&', '&amp;')
1661
-
1662
- str_write = f' <image:image>\n <image:loc>{escaped_img}</image:loc>\n </image:image>\n'
1663
- string += str_write
1664
- except:
1665
- pass
1666
- preprocess.append(str(img).lower())
1667
- frequency, priority = determine_values(url)
1668
- string += f' <changefreq>{frequency}</changefreq>\n'
1669
- string += f' <priority>{priority}</priority>\n'
1670
- string += f' </url>\n'
1671
-
1672
- string += '</urlset>\n'
1673
- f.write(string)
1674
- # Output summary
1675
- print(f'Sitemap saved to sitemap.xml with {len(urls)} URLs.')
1676
-
1677
- # Output class and link details
1678
- for url in urls:
1679
- print(f"\nDetails for {url}:")
1680
- classes, meta_img_refs = discover_classes_and_meta_images(url)
1681
-
1682
- print("\nClasses with href or src attributes:")
1683
- for class_name in classes:
1684
- print(f"\t{class_name}")
1685
-
1686
- print("\nMeta Image References:")
1687
- for img_ref in meta_img_refs:
1688
- print(f"\t{img_ref}")
1689
-
1690
- links = extract_links_from_url(url)
1691
-
1692
- print("\nImages:")
1693
- for img in links['images']:
1694
- print(f"\t{img}")
1695
-
1696
- print("\nExternal Links:")
1697
- for ext_link in links['external_links']:
1698
- print(f"\t{ext_link}")
1699
- class CrawlManagerSingleton():
1700
- _instance = None
1701
- @staticmethod
1702
- def get_instance(url=None,source_code=None,parse_type="html.parser"):
1703
- if CrawlManagerSingleton._instance is None:
1704
- CrawlManagerSingleton._instance = CrawlManager(url=url,parse_type=parse_type,source_code=source_code)
1705
- elif parse_type != CrawlManagerSingleton._instance.parse_type or url != CrawlManagerSingleton._instance.url or source_code != CrawlManagerSingleton._instance.source_code:
1706
- CrawlManagerSingleton._instance = CrawlManager(url=url,parse_type=parse_type,source_code=source_code)
1707
- return CrawlManagerSingleton._instance
1708
- import time
1709
- import requests
1710
-
1711
- class DynamicRateLimiterManager:
1712
- def __init__(self, service_name='ethereum'):
1713
- self.services = {}
1714
- self.service_name = service_name
1715
- self.add_service(service_name)
1716
-
1717
- def add_service(self, service_name="default", low_limit=10, high_limit=30, limit_epoch=60, starting_tokens=10, epoch_cycle_adjustment=True):
1718
- if service_name in self.services:
1719
- print(f"Service {service_name} already exists!")
1720
- return
1721
- self.services[service_name] = DynamicRateLimiter(low_limit=low_limit, high_limit=high_limit, limit_epoch=limit_epoch, starting_tokens=starting_tokens, epoch_cycle_adjustment=epoch_cycle_adjustment)
1722
-
1723
- def request(self, request_url, service_name=None):
1724
- service_name = service_name or self.service_name
1725
- if service_name not in self.services:
1726
- self.add_service(service_name)
1727
-
1728
- limiter = self.services[service_name]
1729
-
1730
- while True:
1731
- if limiter.request():
1732
- response = requests.get(request_url) # Actual request
1733
- if response.status_code == 200:
1734
- limiter.request_tracker(True)
1735
- return response.json()
1736
- elif response.status_code == 429:
1737
- limiter.request_tracker(False)
1738
- print(f"Rate limited by {service_name}. Adjusting limit and retrying...")
1739
- time.sleep(limiter.get_sleep()["current_sleep"])
1740
- else:
1741
- print(f"Unexpected response: {response.status_code}. Message: {response.text}")
1742
- return None
1743
- else:
1744
- print(f"Rate limit reached for {service_name}. Waiting for the next epoch...")
1745
- time.sleep(limiter.get_sleep()["current_sleep"])
1746
-
1747
- def log_request(self, service_name, success):
1748
- print(f"[{service_name}] Request {'succeeded' if success else 'denied'}. Current tokens: {self.services[service_name].get_current_tokens()}")
1749
-
1750
- class DynamicRateLimiter:
1751
- def __init__(self, low_limit, high_limit, limit_epoch, starting_tokens=None,epoch_cycle_adjustment:int=None):
1752
- self.low_limit = low_limit
1753
- self.high_limit = high_limit
1754
- self.limit_epoch = limit_epoch # in seconds
1755
- self.request_status_json = {"succesful":[],"unsuccesful":[],"last_requested":get_time_stamp(),"first_requested":get_time_stamp(),"epoch_left":self.limit_epoch,"last_fail":get_time_stamp(),"count_since_fail":0}
1756
- self.current_limit = starting_tokens or low_limit # Default to high_limit if starting_tokens isn't provided
1757
- self.epoch_cycle_adjustment = epoch_cycle_adjustment
1758
- # Additional attributes for tracking adjustment logic
1759
- self.last_adjusted_time = get_time_stamp()
1760
- self.successful_epochs_since_last_adjustment = 0
1761
- self.request_count_in_current_epoch = 0
1762
-
1763
- def _refill_tokens(self):
1764
- time_since_last_request = get_time_stamp() - self.request_status_json["last_requested"]
1765
- new_tokens = (time_since_last_request / self.limit_epoch) * self.current_limit
1766
- self.tokens = min(self.current_limit, self.get_current_tokens())
1767
- def request_tracker(self,success):
1768
- if success:
1769
- self.request_status_json["succesful"].append(get_time_stamp())
1770
- else:
1771
- self.request_status_json["unsuccesful"].append(get_time_stamp())
1772
- self.request_status_json["last_fail"]=get_time_stamp()
1773
- self.request_status_json["count_since_fail"]=0
1774
- self.adjust_limit()
1775
- self.request_status_json["last_requested"]=get_time_stamp()
1776
- def calculate_tokens(self):
1777
- successful = []
1778
- for each in self.request_status_json["succesful"]:
1779
- if (get_time_stamp() - each)<self.limit_epoch:
1780
- successful.append(each)
1781
- self.request_status_json["succesful"]=successful
1782
- unsuccessful = []
1783
- for each in self.request_status_json["unsuccesful"]:
1784
- if (get_time_stamp() - each)<self.limit_epoch:
1785
- unsuccessful.append(each)
1786
- self.request_status_json["unsuccesful"]=unsuccessful
1787
- if len(successful)==0 and len(unsuccessful)==0:
1788
- pass
1789
- elif len(successful)!=0 and len(unsuccessful)==0:
1790
- self.request_status_json["first_requested"] = successful[0]
1791
- elif len(successful)==0 and len(unsuccessful)!=0:
1792
- self.request_status_json["first_requested"] = unsuccessful[0]
1793
- else:
1794
- self.request_status_json["first_requested"] = min(unsuccessful[0],successful[0])
1795
- self.request_status_json["epoch_left"]=self.limit_epoch-(self.request_status_json["last_requested"]-self.request_status_json["first_requested"])
1796
-
1797
- return self.request_status_json
1798
- def get_current_tokens(self):
1799
- self.request_status_json = self.calculate_tokens()
1800
- total_requests = len(self.request_status_json["succesful"])+len(self.request_status_json["unsuccesful"])
1801
- return max(0,self.current_limit-total_requests)
1802
- def get_sleep(self):
1803
- self.request_status_json = self.calculate_tokens()
1804
- self.request_status_json["current_sleep"]=self.request_status_json["epoch_left"]/max(1,self.get_current_tokens())
1805
- return self.request_status_json
1806
- def request(self):
1807
- self._refill_tokens()
1808
- if self.tokens > 0:
1809
- return True # The request can be made
1810
- else:
1811
- if self.tokens == 0:
1812
- self.request_status_json["count_since_fail"]+=1
1813
- if self.epoch_cycle_adjustment != None:
1814
- if self.request_status_json["count_since_fail"] >=self.epoch_cycle_adjustment:
1815
- self.current_limit=min(self.current_limit+1,self.high_limit)
1816
- return False # The request cannot be made
1817
- def _adjust_limit(self):
1818
- current_time = get_time_stamp()
1819
- if current_time - self.last_adjusted_time >= self.limit_epoch:
1820
- if len(self.clear_epoch()["succesful"]) >= self.tokens:
1821
- # We hit the rate limit this epoch, decrease our limit
1822
- self.tokens = max(1, self.tokens - 1)
1823
- else:
1824
- self.successful_epochs_since_last_adjustment += 1
1825
- if self.successful_epochs_since_last_adjustment >= 5:
1826
- # We've had 5 successful epochs, increase our limit
1827
- self.current_limit = min(self.high_limit, self.tokens + 1)
1828
- self.successful_epochs_since_last_adjustment = 0
1829
-
1830
- # Reset our counters for the new epoch
1831
- self.last_adjusted_time = current_time
1832
- self.request_count_in_current_epoch = 0
1833
- def adjust_limit(self):
1834
- # Set the tokens to succesful requests_made - 1
1835
- self.tokens = len(self.calculate_tokens()["succesful"])
1836
-
1837
- # Adjust the high_limit
1838
- self.current_limit = self.tokens
1839
-
1840
- # Log the adjustment
1841
- print(f"Adjusted tokens to: {self.tokens} and high_limit to: {self.current_limit}")
1842
- class DynamicRateLimiterManagerSingleton:
1843
- _instance = None
1844
- @staticmethod
1845
- def get_instance(service_name="default", low_limit=10, high_limit=30, limit_epoch=60,starting_tokens=10,epoch_cycle_adjustment=True):
1846
- if DynamicRateLimiterManagerSingleton._instance is None:
1847
- DynamicRateLimiterManagerSingleton._instance = DynamicRateLimiterManager(service_name=service_name, low_limit=low_limit, high_limit=limit_epoch, limit_epoch=60,starting_tokens=starting_tokens,epoch_cycle_adjustment=epoch_cycle_adjustment)
1848
- return DynamicRateLimiterManagerSingleton._instance
1849
-
1850
-
1851
-
1852
-