abstract-webtools 0.1.5.8__py3-none-any.whl → 0.1.5.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -84,1778 +84,17 @@ from requests.adapters import HTTPAdapter
84
84
  from urllib.parse import urlparse, urljoin
85
85
  from requests.packages.urllib3.util import ssl_
86
86
  from requests.packages.urllib3.poolmanager import PoolManager
87
+ from urllib.parse import urlparse, parse_qs
88
+ import time
89
+ import requests
90
+ from .managers import *
87
91
  from abstract_utilities import get_time_stamp,get_sleep,sleep_count_down,eatInner,eatAll,eatOuter,ThreadManager
88
92
  logging.basicConfig(level=logging.INFO)
93
+ def try_request(request):
94
+ try:
95
+ respnse = requests.get(url)
96
+ except Exception as e:
97
+ print(f'request for url failed: {e}')
98
+ response = None
99
+ return response
89
100
 
90
-
91
-
92
- class CipherManager:
93
- @staticmethod
94
- def get_default_ciphers()-> list:
95
- return [
96
- "ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-ECDSA-AES256-GCM-SHA384",
97
- "ECDHE-RSA-AES256-SHA384", "ECDHE-ECDSA-AES256-SHA384",
98
- "ECDHE-RSA-AES256-SHA", "ECDHE-ECDSA-AES256-SHA",
99
- "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-SHA256",
100
- "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES128-SHA256",
101
- "AES256-SHA", "AES128-SHA"
102
- ]
103
-
104
- def __init__(self,cipher_list=None):
105
- if cipher_list == None:
106
- cipher_list=self.get_default_ciphers()
107
- self.cipher_list = cipher_list
108
- self.create_list()
109
- self.ciphers_string = self.add_string_list()
110
- def add_string_list(self):
111
- if len(self.cipher_list)==0:
112
- return ''
113
- return','.join(self.cipher_list)
114
- def create_list(self):
115
- if self.cipher_list == None:
116
- self.cipher_list= []
117
- elif isinstance(self.cipher_list, str):
118
- self.cipher_list=self.cipher_list.split(',')
119
- if isinstance(self.cipher_list, str):
120
- self.cipher_list=[self.cipher_list]
121
- class CipherManagerSingleton:
122
- _instance = None
123
- @staticmethod
124
- def get_instance(cipher_list=None):
125
- if CipherManagerSingleton._instance is None:
126
- CipherManagerSingleton._instance = CipherManager(cipher_list=cipher_list)
127
- elif CipherManagerSingleton._instance.cipher_list != cipher_list:
128
- CipherManagerSingleton._instance = CipherManager(cipher_list=cipher_list)
129
- return CipherManagerSingleton._instance
130
- class SSLManager:
131
- def __init__(self, ciphers=None, ssl_options=None, certification=None):
132
- self.ciphers = ciphers or CipherManager().ciphers_string
133
- self.ssl_options = ssl_options or self.get_default_ssl_settings()
134
- self.certification = certification or ssl.CERT_REQUIRED
135
- self.ssl_context = self.get_context()
136
- def get_default_ssl_settings(self):
137
- return ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
138
- def get_context(self):
139
- return ssl_.create_urllib3_context(ciphers=self.ciphers, cert_reqs=self.certification, options=self.ssl_options)
140
-
141
- class SSLManagerSingleton:
142
- _instance = None
143
- @staticmethod
144
- def get_instance(ciphers=None, ssl_options_list=None, certification=None):
145
- if SSLManagerSingleton._instance is None:
146
- SSLManagerSingleton._instance = SSLManager(ciphers=ciphers, ssl_options_list=ssl_options_list, certification=certification)
147
- elif SSLManagerSingleton._instance.cipher_manager.ciphers_string != ciphers or SSLManagerSingleton._instance.ssl_options_list !=ssl_options_list or SSLManagerSingleton._instance.certification !=certification:
148
- SSLManagerSingleton._instance = SSLManager(ciphers=ciphers, ssl_options_list=ssl_options_list, certification=certification)
149
- return SSLManagerSingleton._instance
150
- class TLSAdapter(HTTPAdapter):
151
- def __init__(self, ssl_manager=None,ciphers=None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None):
152
- if ssl_manager == None:
153
- ssl_manager = SSLManager(ciphers=ciphers, ssl_options=ssl_options, certification=certification)
154
- self.ssl_manager = ssl_manager
155
- self.ciphers = ssl_manager.ciphers
156
- self.certification = ssl_manager.certification
157
- self.ssl_options = ssl_manager.ssl_options
158
- self.ssl_context = self.ssl_manager.ssl_context
159
- super().__init__()
160
-
161
- def init_poolmanager(self, *args, **kwargs):
162
- kwargs['ssl_context'] = self.ssl_context
163
- return super().init_poolmanager(*args, **kwargs)
164
- class TLSAdapterSingleton:
165
- _instance: Optional[TLSAdapter] = None
166
-
167
- @staticmethod
168
- def get_instance(ciphers: Optional[List[str]] = None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None) -> TLSAdapter:
169
- if (not TLSAdapterSingleton._instance) or (
170
- TLSAdapterSingleton._instance.ciphers != ciphers or
171
- TLSAdapterSingleton._instance.certification != certification or
172
- TLSAdapterSingleton._instance.ssl_options != ssl_options
173
- ):
174
- TLSAdapterSingleton._instance = TLSAdapter(ciphers=ciphers, certification=certification, ssl_options=ssl_options)
175
- return TLSAdapterSingleton._instance
176
- class UserAgentManager:
177
- def __init__(self, os=None, browser=None, version=None,user_agent=None):
178
- self.os = os or 'Windows'
179
- self.browser = browser or "Firefox"
180
- self.version = version or '42.0'
181
- self.user_agent = user_agent or self.get_user_agent()
182
- self.header = self.user_agent_header()
183
- @staticmethod
184
- def user_agent_db():
185
- from .big_user_agent_list import big_user_agent_dict
186
- return big_user_agent_dict
187
-
188
- def get_user_agent(self):
189
- ua_db = self.user_agent_db()
190
-
191
- if self.os and self.os in ua_db:
192
- os_db = ua_db[self.os]
193
- else:
194
- os_db = random.choice(list(ua_db.values()))
195
-
196
- if self.browser and self.browser in os_db:
197
- browser_db = os_db[self.browser]
198
- else:
199
- browser_db = random.choice(list(os_db.values()))
200
-
201
- if self.version and self.version in browser_db:
202
- return browser_db[self.version]
203
- else:
204
- return random.choice(list(browser_db.values()))
205
-
206
- def user_agent_header(self):
207
- return {"user-agent": self.user_agent}
208
- class UserAgentManagerSingleton:
209
- _instance = None
210
- @staticmethod
211
- def get_instance(user_agent=UserAgentManager().get_user_agent()[0]):
212
- if UserAgentManagerSingleton._instance is None:
213
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
214
- elif UserAgentManagerSingleton._instance.user_agent != user_agent:
215
- UserAgentManagerSingleton._instance = UserAgentManager(user_agent=user_agent)
216
- return UserAgentManagerSingleton._instance
217
- class NetworkManager:
218
- def __init__(self, user_agent_manager=None,ssl_manager=None, tls_adapter=None,user_agent=None,proxies=None,cookies=None,ciphers=None, certification: Optional[str] = None, ssl_options: Optional[List[str]] = None):
219
- if ssl_manager == None:
220
- ssl_manager = SSLManager(ciphers=ciphers, ssl_options=ssl_options, certification=certification)
221
- self.ssl_manager=ssl_manager
222
- if tls_adapter == None:
223
- tls_adapter=TLSAdapter(ssl_manager=ssl_manager,ciphers=ciphers, certification=certification, ssl_options=ssl_options)
224
- self.tls_adapter=tls_adapter
225
- self.ciphers=tls_adapter.ciphers
226
- self.certification=tls_adapter.certification
227
- self.ssl_options=tls_adapter.ssl_options
228
- self.proxies=None or {}
229
- self.cookies=cookies or "cb4c883efc59d0e990caf7508902591f4569e7bf-1617321078-0-150"
230
- class MySocketClient:
231
- def __init__(self, ip_address=None, port=None,domain=None):
232
- self.sock
233
- self.ip_address= ip_address or None
234
- self.port = port or None
235
-
236
- self.domain = domain or None
237
- def receive_data(self):
238
- chunks = []
239
- while True:
240
- chunk = self.sock.recv(4096)
241
- if chunk:
242
- chunks.append(chunk)
243
- else:
244
- break
245
- return b''.join(chunks).decode('utf-8')
246
- def _parse_socket_response_as_json(self, data, *args, **kwargs):
247
- return self._parse_json(data[data.find('{'):data.rfind('}') + 1], *args, **kwargs)
248
- def process_data(self):
249
- data = self.receive_data()
250
- return self._parse_socket_response_as_json(data)
251
- def _parse_json(self,json_string):
252
- return json.loads(json_string)
253
- def get_ip(self,domain=None):
254
- try:
255
- return self.sock.gethostbyname(domain if domain != None else self.domain)
256
- except self.sock.gaierror:
257
- return None
258
- def grt_host_name(self,ip_address=None):
259
- return self.sock.gethostbyaddr(ip_address if ip_address != None else self.ip_address)
260
- def toggle_sock(self):
261
- if self.sock != None:
262
- self.sock.close()
263
- else:
264
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
265
- if host and socket:
266
- self.sock.connect((host, port))
267
- class MySocketClient():
268
- _instance = None
269
- @staticmethod
270
- def get_instance(ip_address='local_host',port=22,domain="example.com"):
271
- if MySocketClientSingleton._instance is None:
272
- MySocketClientSingleton._instance = MySocketClient(ip_address=ip_address,port=port,domain=domain)
273
- elif MySocketClientSingleton._instance.ip_address != ip_address or MySocketClientSingleton._instance.port != port or UrlManagerSingleton._instance.domain != domain:
274
- MySocketClientSingleton._instance = MySocketClient(ip_address=ip_address,port=port,domain=domain)
275
- return MySocketClient
276
-
277
- class UrlManager:
278
- """
279
- UrlManager is a class for managing URLs, including cleaning, validating, and finding the correct version.
280
-
281
- Args:
282
- url (str or None): The URL to manage (default is None).
283
- session (requests.Session): A custom requests session (default is the requests module's session).
284
-
285
- Attributes:
286
- session (requests.Session): The requests session used for making HTTP requests.
287
- clean_urls (list): List of cleaned URL variations.
288
- url (str): The current URL.
289
- protocol (str): The protocol part of the URL (e.g., "https").
290
- domain (str): The domain part of the URL (e.g., "example.com").
291
- path (str): The path part of the URL (e.g., "/path/to/resource").
292
- query (str): The query part of the URL (e.g., "?param=value").
293
- all_urls (list): List of all URLs (not used in the provided code).
294
-
295
- Methods:
296
- url_to_pieces(url): Split a URL into its protocol, domain, path, and query components.
297
- clean_url(url): Return a list of potential URL versions with and without 'www' and 'http(s)'.
298
- get_correct_url(url): Get the correct version of the URL from possible variations.
299
- update_url(url): Update the URL and related attributes.
300
- get_domain(url): Get the domain name from a URL.
301
- url_join(url, path): Join a base URL with a path.
302
- is_valid_url(url): Check if a URL is valid.
303
- make_valid(href, url): Make a URL valid by joining it with a base URL.
304
- get_relative_href(url, href): Get the relative href URL by joining it with a base URL.
305
-
306
- Note:
307
- - The UrlManager class provides methods for managing URLs, including cleaning and validating them.
308
- - It also includes methods for joining and validating relative URLs.
309
- """
310
-
311
- def __init__(self, url=None, session=None):
312
- """
313
- Initialize a UrlManager instance.
314
-
315
- Args:
316
- url (str or None): The URL to manage (default is None).
317
- session (requests.Session): A custom requests session (default is the requests module's session).
318
- """
319
- self._url=url or 'www.example.com'
320
- self.url = url or 'www.example.com'
321
- self.session= session or requests
322
- self.clean_urls = self.clean_url(url=url)
323
- self.url = self.get_correct_url(clean_urls=self.clean_urls)
324
- url_pieces = self.url_to_pieces(url=self.url)
325
- self.protocol,self.domain,self.path,self.query=url_pieces
326
- self.all_urls = []
327
- def url_to_pieces(self, url):
328
-
329
- try:
330
- match = re.match(r'^(https?)?://?([^/]+)(/[^?]+)?(\?.+)?', url)
331
- if match:
332
- protocol = match.group(1) if match.group(1) else None
333
- domain = match.group(2) if match.group(1) else None
334
- path = match.group(3) if match.group(3) else "" # Handle None
335
- query = match.group(4) if match.group(4) else "" # Handle None
336
- except:
337
- print(f'the url {url} was not reachable')
338
- protocol,domain,path,query=None,None,"",""
339
- return protocol, domain, path, query
340
-
341
- def clean_url(self,url=None) -> list:
342
- """
343
- Given a URL, return a list with potential URL versions including with and without 'www.',
344
- and with 'http://' and 'https://'.
345
- """
346
- if url == None:
347
- url=self.url
348
- urls=[]
349
- if url:
350
- # Remove http:// or https:// prefix
351
- cleaned = url.replace("http://", "").replace("https://", "")
352
- no_subdomain = cleaned.replace("www.", "", 1)
353
-
354
- urls = [
355
- f"https://{cleaned}",
356
- f"http://{cleaned}",
357
- ]
358
-
359
- # Add variants without 'www' if it was present
360
- if cleaned != no_subdomain:
361
- urls.extend([
362
- f"https://{no_subdomain}",
363
- f"http://{no_subdomain}",
364
- ])
365
-
366
- # Add variants with 'www' if it wasn't present
367
- else:
368
- urls.extend([
369
- f"https://www.{cleaned}",
370
- f"http://www.{cleaned}",
371
- ])
372
-
373
- return urls
374
-
375
- def get_correct_url(self,url=None,clean_urls=None) -> (str or None):
376
- """
377
- Gets the correct URL from the possible variations by trying each one with an HTTP request.
378
-
379
- Args:
380
- url (str): The URL to find the correct version of.
381
- session (type(requests.Session), optional): The requests session to use for making HTTP requests.
382
- Defaults to requests.
383
-
384
- Returns:
385
- str: The correct version of the URL if found, or None if none of the variations are valid.
386
- """
387
- self.url = url
388
- if url==None and clean_urls != None:
389
- if self.url:
390
- url=self.url or clean_urls[0]
391
- if url!=None and clean_urls==None:
392
- clean_urls=self.clean_url(url)
393
- elif url==None and clean_urls==None:
394
- url=self.url
395
- clean_urls=self.clean_urls
396
- # Get the correct URL from the possible variations
397
- for url in clean_urls:
398
- try:
399
- source = self.session.get(url)
400
- return url
401
- except requests.exceptions.RequestException as e:
402
- print(e)
403
- return None
404
- def update_url(self,url):
405
- # These methods seem essential for setting up the UrlManager object.
406
- self.url = url
407
- self.clean_urls = self.clean_url()
408
- self.correct_url = self.get_correct_url()
409
- self.url =self.correct_url
410
- self.protocol,self.domain,self.path,self.query=self.url_to_pieces(url=self.url)
411
- self.all_urls = []
412
- def get_domain(self,url):
413
- return urlparse(url).netloc
414
- def url_join(self,url,path):
415
- url = eatOuter(url,['/'])
416
- path = eatInner(path,['/'])
417
- slash=''
418
- if path[0] not in ['?','&']:
419
- slash = '/'
420
- url = url+slash+path
421
- return url
422
- @property
423
- def url(self):
424
- return self._url
425
- @url.setter
426
- def url(self, new_url):
427
- self._url = new_url
428
- @staticmethod
429
- def is_valid_url(url):
430
- """
431
- Check if the given URL is valid.
432
- """
433
- parsed = urlparse(url)
434
- return bool(parsed.netloc) and bool(parsed.scheme)
435
- @staticmethod
436
- def make_valid(href,url):
437
- def is_valid_url(url):
438
- """
439
- Check if the given URL is valid.
440
- """
441
- parsed = urlparse(url)
442
- return bool(parsed.netloc) and bool(parsed.scheme)
443
- if is_valid_url(href):
444
- return href
445
- new_link=urljoin(url,href)
446
- if is_valid_url(new_link):
447
- return new_link
448
- return False
449
- @staticmethod
450
- def get_relative_href(url,href):
451
- # join the URL if it's relative (not an absolute link)
452
- href = urljoin(url, href)
453
- parsed_href = urlparse(href)
454
- # remove URL GET parameters, URL fragments, etc.
455
- href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
456
- return href
457
- def url_basename(url):
458
- path = urllib.parse.urlparse(url).path
459
- return path.strip('/').split('/')[-1]
460
-
461
-
462
- def base_url(url):
463
- return re.match(r'https?://[^?#]+/', url).group()
464
-
465
-
466
- def urljoin(base, path):
467
- if isinstance(path, bytes):
468
- path = path.decode()
469
- if not isinstance(path, str) or not path:
470
- return None
471
- if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
472
- return path
473
- if isinstance(base, bytes):
474
- base = base.decode()
475
- if not isinstance(base, str) or not re.match(
476
- r'^(?:https?:)?//', base):
477
- return None
478
- return urllib.parse.urljoin(base, path)
479
- class UrlManagerSingleton:
480
- _instance = None
481
- @staticmethod
482
- def get_instance(url=None,session=requests):
483
- if UrlManagerSingleton._instance is None:
484
- UrlManagerSingleton._instance = UrlManager(url,session=session)
485
- elif UrlManagerSingleton._instance.session != session or UrlManagerSingleton._instance.url != url:
486
- UrlManagerSingleton._instance = UrlManager(url,session=session)
487
- return UrlManagerSingleton._instance
488
- class SafeRequest:
489
- """
490
- SafeRequest is a class for making HTTP requests with error handling and retries.
491
-
492
- Args:
493
- url (str or None): The URL to make requests to (default is None).
494
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
495
- network_manager (NetworkManager or None): An instance of NetworkManager (default is None).
496
- user_agent_manager (UserAgentManager or None): An instance of UserAgentManager (default is None).
497
- ssl_manager (SSlManager or None): An instance of SSLManager (default is None).
498
- tls_adapter (TLSAdapter or None): An instance of TLSAdapter (default is None).
499
- user_agent (str or None): The user agent string to use for requests (default is None).
500
- proxies (dict or None): Proxy settings for requests (default is None).
501
- headers (dict or None): Additional headers for requests (default is None).
502
- cookies (dict or None): Cookie settings for requests (default is None).
503
- session (requests.Session or None): A custom requests session (default is None).
504
- adapter (str or None): A custom adapter for requests (default is None).
505
- protocol (str or None): The protocol to use for requests (default is 'https://').
506
- ciphers (str or None): Cipher settings for requests (default is None).
507
- auth (tuple or None): Authentication credentials (default is None).
508
- login_url (str or None): The URL for authentication (default is None).
509
- email (str or None): Email for authentication (default is None).
510
- password (str or None): Password for authentication (default is None).
511
- certification (str or None): Certification settings for requests (default is None).
512
- ssl_options (str or None): SSL options for requests (default is None).
513
- stream (bool): Whether to stream the response content (default is False).
514
- timeout (float or None): Timeout for requests (default is None).
515
- last_request_time (float or None): Timestamp of the last request (default is None).
516
- max_retries (int or None): Maximum number of retries for requests (default is None).
517
- request_wait_limit (float or None): Wait time between requests (default is None).
518
-
519
- Methods:
520
- update_url_manager(url_manager): Update the URL manager and reinitialize the SafeRequest.
521
- update_url(url): Update the URL and reinitialize the SafeRequest.
522
- re_initialize(): Reinitialize the SafeRequest with the current settings.
523
- authenticate(s, login_url=None, email=None, password=None, checkbox=None, dropdown=None): Authenticate and make a request.
524
- fetch_response(): Fetch the response from the server.
525
- initialize_session(): Initialize the requests session with custom settings.
526
- process_response_data(): Process the fetched response data.
527
- get_react_source_code(): Extract JavaScript and JSX source code from <script> tags.
528
- get_status(url=None): Get the HTTP status code of a URL.
529
- wait_between_requests(): Wait between requests based on the request_wait_limit.
530
- make_request(): Make a request and handle potential errors.
531
- try_request(): Try to make an HTTP request using the provided session.
532
-
533
- Note:
534
- - The SafeRequest class is designed for making HTTP requests with error handling and retries.
535
- - It provides methods for authentication, response handling, and error management.
536
- """
537
- def __init__(self,
538
- url=None,
539
- source_code=None,
540
- url_manager=None,
541
- network_manager=None,
542
- user_agent_manager=None,
543
- ssl_manager=None,
544
- ssl_options=None,
545
- tls_adapter=None,
546
- user_agent=None,
547
- proxies=None,
548
- headers=None,
549
- cookies=None,
550
- session=None,
551
- adapter=None,
552
- protocol=None,
553
- ciphers=None,
554
- spec_login=False,
555
- login_referer=None,
556
- login_user_agent=None,
557
- auth=None,
558
- login_url=None,
559
- email = None,
560
- password=None,
561
- checkbox=None,
562
- dropdown=None,
563
- certification=None,
564
- stream=False,
565
- timeout = None,
566
- last_request_time=None,
567
- max_retries=None,
568
- request_wait_limit=None):
569
- self.url_manager=url_manager or UrlManager(url=url)
570
- self._url=url
571
- self.url=url
572
-
573
-
574
- self.url_manager=url_manager
575
- self._url_manager = self.url_manager
576
- self.user_agent = user_agent
577
- self.user_agent_manager = user_agent_manager or UserAgentManager(user_agent=self.user_agent)
578
- self.headers= headers or self.user_agent_manager.header or {'Accept': '*/*'}
579
- self.user_agent= self.user_agent_manager.user_agent
580
- self.ciphers=ciphers or CipherManager().ciphers_string
581
- self.certification=certification
582
- self.ssl_options=ssl_options
583
- self.ssl_manager = ssl_manager or SSLManager(ciphers=self.ciphers, ssl_options=self.ssl_options, certification=self.certification)
584
- self.tls_adapter=tls_adapter or TLSAdapter(ssl_manager=self.ssl_manager,certification=self.certification,ssl_options=self.ssl_manager.ssl_options)
585
- self.network_manager= network_manager or NetworkManager(user_agent_manager=self.user_agent_manager,ssl_manager=self.ssl_manager, tls_adapter=self.tls_adapter,user_agent=user_agent,proxies=proxies,cookies=cookies,ciphers=ciphers, certification=certification, ssl_options=ssl_options)
586
- self.stream=stream
587
- self.tls_adapter=self.network_manager.tls_adapter
588
- self.ciphers=self.network_manager.ciphers
589
- self.certification=self.network_manager.certification
590
- self.ssl_options=self.network_manager.ssl_options
591
- self.proxies=self.network_manager.proxies
592
- self.timeout=timeout
593
- self.cookies=self.network_manager.cookies
594
- self.session = session or requests.session()
595
- self.auth = auth
596
- self.spec_login=spec_login
597
- self.password=password
598
- self.email = email
599
- self.checkbox=checkbox
600
- self.dropdown=dropdown
601
- self.login_url=login_url
602
- self.login_user_agent=login_user_agent
603
- self.login_referer=login_referer
604
- self.protocol=protocol or 'https://'
605
-
606
- self.stream=stream if isinstance(stream,bool) else False
607
- self.initialize_session()
608
- self.last_request_time=last_request_time
609
- self.max_retries = max_retries or 3
610
- self.request_wait_limit = request_wait_limit or 1.5
611
- self._response=None
612
- self.make_request()
613
- self.source_code = None
614
- self.source_code_bytes=None
615
- self.source_code_json = {}
616
- self.react_source_code=[]
617
- self._response_data = None
618
- self.process_response_data()
619
- def update_url_manager(self,url_manager):
620
- self.url_manager=url_manager
621
- self.re_initialize()
622
- def update_url(self,url):
623
- self.url_manager.update_url(url=url)
624
- self.re_initialize()
625
- def re_initialize(self):
626
- self._response=None
627
- self.make_request()
628
- self.source_code = None
629
- self.source_code_bytes=None
630
- self.source_code_json = {}
631
- self.react_source_code=[]
632
- self._response_data = None
633
- self.process_response_data()
634
- @property
635
- def response(self):
636
- """Lazy-loading of response."""
637
- if self._response is None:
638
- self._response = self.fetch_response()
639
- return self._response
640
- def authenticate(self,session, login_url=None, email=None, password=None,checkbox=None,dropdown=None):
641
- login_urls = login_url or [self.url_manager.url,self.url_manager.domain,self.url_manager.url_join(url=self.url_manager.domain,path='login'),self.url_manager.url_join(url=self.url_manager.domain,path='auth')]
642
- s = session
643
- if not isinstance(login_urls,list):
644
- login_urls=[login_urls]
645
- for login_url in login_urls:
646
- login_url_manager = UrlManager(login_url)
647
- login_url = login_url_manager.url
648
-
649
- r = s.get(login_url)
650
- soup = BeautifulSoup(r.content, "html.parser")
651
- # Find the token or any CSRF protection token
652
- token = soup.find('input', {'name': 'token'}).get('value') if soup.find('input', {'name': 'token'}) else None
653
- if token != None:
654
- break
655
- login_data = {}
656
- if email != None:
657
- login_data['email']=email
658
- if password != None:
659
- login_data['password'] = password
660
- if checkbox != None:
661
- login_data['checkbox'] = checkbox
662
- if dropdown != None:
663
- login_data['dropdown']=dropdown
664
- if token != None:
665
- login_data['token'] = token
666
- s.post(login_url, data=login_data)
667
- return s
668
-
669
- def fetch_response(self) -> Union[requests.Response, None]:
670
- """Actually fetches the response from the server."""
671
- # You can further adapt this method to use retries or other logic you had
672
- # in your original code, but the main goal here is to fetch and return the response
673
- return self.try_request()
674
- def spec_auth(self, session=None, email=None, password=None, login_url=None, login_referer=None, login_user_agent=None):
675
- s = session or requests.session()
676
-
677
- domain = self.url_manager.url_join(self.url_manager.get_correct_url(self.url_manager.domain),'login') if login_url is None else login_url
678
- login_url = self.url_manager.get_correct_url(url=domain)
679
-
680
- login_referer = login_referer or self.url_manager.url_join(url=login_url, path='?role=fast&to=&s=1&m=1&email=YOUR_EMAIL')
681
- login_user_agent = login_user_agent or 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0'
682
-
683
- headers = {"Referer": login_referer, 'User-Agent': login_user_agent}
684
- payload = {'email': email, 'pass': password}
685
-
686
- page = s.get(login_url)
687
- soup = BeautifulSoup(page.content, 'lxml')
688
- action_url = soup.find('form')['action']
689
- s.post(action_url, data=payload, headers=headers)
690
- return s
691
- def initialize_session(self):
692
- s = self.session
693
- if self.auth:
694
- s= self.auth
695
- elif self.spec_login:
696
- s=self.spec_auth(session=s,email=self.email, password=self.password, login_url=self.login_url, login_referer=self.login_referer, login_user_agent=self.login_user_agent)
697
- elif any([self.password, self.email, self.login_url, self.checkbox, self.dropdown]):
698
- s=self.authenticate(session=s, login_url=self.login_url, email=self.email, password=self.password, checkbox=self.checkbox, dropdown=self.dropdown)
699
- s.proxies = self.proxies
700
- s.cookies["cf_clearance"] = self.network_manager.cookies
701
- s.headers.update(self.headers)
702
- s.mount(self.protocol, self.network_manager.tls_adapter)
703
- return s
704
- def process_response_data(self):
705
- """Processes the fetched response data."""
706
- if not self.response:
707
- return # No data to process
708
-
709
- self.source_code = self.response.text
710
- self.source_code_bytes = self.response.content
711
-
712
- if self.response.headers.get('content-type') == 'application/json':
713
- data = convert_to_json(self.source_code)
714
- if data:
715
- self.source_code_json = data.get("response", data)
716
-
717
- self.get_react_source_code()
718
- def get_react_source_code(self) -> list:
719
- """
720
- Fetches the source code of the specified URL and extracts JavaScript and JSX source code (React components).
721
-
722
- Args:
723
- url (str): The URL to fetch the source code from.
724
-
725
- Returns:
726
- list: A list of strings containing JavaScript and JSX source code found in <script> tags.
727
- """
728
- if self.url_manager.url is None:
729
- return []
730
- soup = BeautifulSoup(self.source_code_bytes,"html.parser")
731
- script_tags = soup.find_all('script', type=lambda t: t and ('javascript' in t or 'jsx' in t))
732
- for script_tag in script_tags:
733
- self.react_source_code.append(script_tag.string)
734
-
735
-
736
- def get_status(url:str=None) -> int:
737
- """
738
- Gets the HTTP status code of the given URL.
739
-
740
- Args:
741
- url (str): The URL to check the status of.
742
-
743
- Returns:
744
- int: The HTTP status code of the URL, or None if the request fails.
745
- """
746
- # Get the status code of the URL
747
- return try_request(url=url).status_code
748
- def wait_between_requests(self):
749
- """
750
- Wait between requests based on the request_wait_limit.
751
- """
752
- if self.last_request_time:
753
- sleep_time = self.request_wait_limit - (get_time_stamp() - self.last_request_time)
754
- if sleep_time > 0:
755
- logging.info(f"Sleeping for {sleep_time:.2f} seconds.")
756
- get_sleep(sleep_time)
757
-
758
- def make_request(self):
759
- """
760
- Make a request and handle potential errors.
761
- """
762
- # Update the instance attributes if they are passed
763
-
764
- self.wait_between_requests()
765
- for _ in range(self.max_retries):
766
- try:
767
- self.try_request() # 10 seconds timeout
768
- if self.response:
769
- if self.response.status_code == 200:
770
- self.last_request_time = get_time_stamp()
771
- return self.response
772
- elif self.response.status_code == 429:
773
- logging.warning(f"Rate limited by {self.url_manager.url}. Retrying...")
774
- get_sleep(5) # adjust this based on the server's rate limit reset time
775
- except requests.Timeout as e:
776
- logging.error(f"Request to {cleaned_url} timed out: {e}")
777
- except requests.ConnectionError:
778
- logging.error(f"Connection error for URL {self.url_manager.url}.")
779
- except requests.Timeout:
780
- logging.error(f"Request timeout for URL {self.url_manager.url}.")
781
- except requests.RequestException as e:
782
- logging.error(f"Request exception for URL {self.url_manager.url}: {e}")
783
-
784
- logging.error(f"Failed to retrieve content from {self.url_manager.url} after {self.max_retries} retries.")
785
- return None
786
- def try_request(self) -> Union[requests.Response, None]:
787
- """
788
- Tries to make an HTTP request to the given URL using the provided session.
789
-
790
- Args:
791
- timeout (int): Timeout for the request.
792
-
793
- Returns:
794
- requests.Response or None: The response object if the request is successful, or None if the request fails.
795
- """
796
- try:
797
- return self.session.get(url=self.url_manager.url, timeout=self.timeout,stream=self.stream)
798
- except requests.exceptions.RequestException as e:
799
- print(e)
800
- return None
801
-
802
- @property
803
- def url(self):
804
- return self.url_manager.url
805
-
806
- @url.setter
807
- def url(self, new_url):
808
- self._url = new_url
809
- class SafeRequestSingleton:
810
- _instance = None
811
- @staticmethod
812
- def get_instance(url=None,headers:dict=None,max_retries=3,last_request_time=None,request_wait_limit=1.5):
813
- if SafeRequestSingleton._instance is None:
814
- SafeRequestSingleton._instance = SafeRequest(url,url_manager=UrlManagerSingleton,headers=headers,max_retries=max_retries,last_request_time=last_request_time,request_wait_limit=request_wait_limit)
815
- elif SafeRequestSingleton._instance.url != url or SafeRequestSingleton._instance.headers != headers or SafeRequestSingleton._instance.max_retries != max_retries or SafeRequestSingleton._instance.request_wait_limit != request_wait_limit:
816
- SafeRequestSingleton._instance = SafeRequest(url,url_manager=UrlManagerSingleton,headers=headers,max_retries=max_retries,last_request_time=last_request_time,request_wait_limit=request_wait_limit)
817
- return SafeRequestSingleton._instance
818
- class SoupManager:
819
- """
820
- SoupManager is a class for managing and parsing HTML source code using BeautifulSoup.
821
-
822
- Args:
823
- url (str or None): The URL to be parsed (default is None).
824
- source_code (str or None): The HTML source code (default is None).
825
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
826
- request_manager (SafeRequest or None): An instance of SafeRequest (default is None).
827
- parse_type (str): The type of parser to be used by BeautifulSoup (default is "html.parser").
828
-
829
- Methods:
830
- re_initialize(): Reinitialize the SoupManager with the current settings.
831
- update_url(url): Update the URL and reinitialize the SoupManager.
832
- update_source_code(source_code): Update the source code and reinitialize the SoupManager.
833
- update_request_manager(request_manager): Update the request manager and reinitialize the SoupManager.
834
- update_url_manager(url_manager): Update the URL manager and reinitialize the SoupManager.
835
- update_parse_type(parse_type): Update the parsing type and reinitialize the SoupManager.
836
- all_links: A property that provides access to all discovered links.
837
- _all_links_get(): A method to load all discovered links.
838
- get_all_website_links(tag="a", attr="href"): Get all URLs belonging to the same website.
839
- meta_tags: A property that provides access to all discovered meta tags.
840
- _meta_tags_get(): A method to load all discovered meta tags.
841
- get_meta_tags(): Get all meta tags in the source code.
842
- find_all(element, soup=None): Find all instances of an HTML element in the source code.
843
- get_class(class_name, soup=None): Get the specified class from the HTML source code.
844
- has_attributes(tag, *attrs): Check if an HTML tag has the specified attributes.
845
- get_find_all_with_attributes(*attrs): Find all HTML tags with specified attributes.
846
- get_all_desired_soup(tag=None, attr=None, attr_value=None): Get HTML tags based on specified criteria.
847
- extract_elements(url, tag=None, class_name=None, class_value=None): Extract portions of source code based on filters.
848
- find_all_with_attributes(class_name=None, *attrs): Find classes with associated href or src attributes.
849
- get_images(tag_name, class_name, class_value): Get images with specific class and attribute values.
850
- discover_classes_and_meta_images(tag_name, class_name_1, class_name_2, class_value, attrs): Discover classes and meta images.
851
-
852
- Note:
853
- - The SoupManager class is designed for parsing HTML source code using BeautifulSoup.
854
- - It provides various methods to extract data and discover elements within the source code.
855
- """
856
- def __init__(self,url=None,source_code=None,url_manager=None,request_manager=None, parse_type="html.parser"):
857
- self.soup=[]
858
- self.url=url
859
- if url_manager == None:
860
- url_manager=UrlManager(url=self.url)
861
- if self.url != None and url_manager != None and url_manager.url != UrlManager(url=url).url:
862
- url_manager.update_url(url=self.url)
863
- self.url_manager= url_manager
864
- self.url=self.url_manager.url
865
- if request_manager == None:
866
- request_manager = SafeRequest(url_manager=self.url_manager)
867
- self.request_manager = request_manager
868
- if self.request_manager.url_manager != self.url_manager:
869
- self.request_manager.update_url_manager(url_manager=self.url_manager)
870
- self.parse_type = parse_type
871
- if source_code != None:
872
- self.source_code = source_code
873
- else:
874
- self.source_code = self.request_manager.source_code_bytes
875
- self.soup= BeautifulSoup(self.source_code, self.parse_type)
876
- self._all_links_data = None
877
- self._meta_tags_data = None
878
- def re_initialize(self):
879
- self.soup= BeautifulSoup(self.source_code, self.parse_type)
880
- self._all_links_data = None
881
- self._meta_tags_data = None
882
- def update_url(self,url):
883
- self.url_manager.update_url(url=url)
884
- self.url=self.url_manager.url
885
- self.request_manager.update_url(url=url)
886
- self.source_code = self.request_manager.source_code_bytes
887
- self.re_initialize()
888
- def update_source_code(self,source_code):
889
- self.source_code = source_code
890
- self.re_initialize()
891
- def update_request_manager(self,request_manager):
892
- self.request_manager = request_manager
893
- self.url_manager=self.request_manager.url_manager
894
- self.url=self.url_manager.url
895
- self.source_code = self.request_manager.source_code_bytes
896
- self.re_initialize()
897
- def update_url_manager(self,url_manager):
898
- self.url_manager=url_manager
899
- self.url=self.url_manager.url
900
- self.request_manager.update_url_manager(url_manager=self.url_manager)
901
- self.source_code = self.request_manager.source_code_bytes
902
- self.re_initialize()
903
- def update_parse_type(self,parse_type):
904
- self.parse_type=parse_type
905
- self.re_initialize()
906
- @property
907
- def all_links(self):
908
- """This is a property that provides access to the _all_links_data attribute.
909
- The first time it's accessed, it will load the data."""
910
- if self._all_links_data is None:
911
- print("Loading all links for the first time...")
912
- self._all_links_data = self._all_links_get()
913
- return self._all_links_data
914
- def _all_links_get(self):
915
- """A method that loads the data (can be replaced with whatever data loading logic you have)."""
916
- return self.get_all_website_links()
917
- def get_all_website_links(self,tag="a",attr="href") -> list:
918
- """
919
- Returns all URLs that are found on the specified URL and belong to the same website.
920
-
921
- Args:
922
- url (str): The URL to search for links.
923
-
924
- Returns:
925
- list: A list of URLs that belong to the same website as the specified URL.
926
- """
927
- all_urls=[self.url_manager.url]
928
- domain = self.url_manager.domain
929
- all_desired=self.get_all_desired_soup(tag=tag,attr=attr)
930
- for tag in all_desired:
931
- href = tag.attrs.get(attr)
932
- if href == "" or href is None:
933
- # href empty tag
934
- continue
935
- href=self.url_manager.get_relative_href(self.url_manager.url,href)
936
- if not self.url_manager.is_valid_url(href):
937
- # not a valid URL
938
- continue
939
- if href in all_urls:
940
- # already in the set
941
- continue
942
- if domain not in href:
943
- # external link
944
- continue
945
- all_urls.append(href)
946
-
947
- return all_urls
948
-
949
-
950
- @property
951
- def meta_tags(self):
952
- """This is a property that provides access to the _all_links_data attribute.
953
- The first time it's accessed, it will load the data."""
954
- if self._meta_tags_data is None:
955
- print("Loading all links for the first time...")
956
- self._meta_tags_data = self._all_links_get()
957
- return self._meta_tags_data
958
- def _meta_tags_get(self):
959
- """A method that loads the data (can be replaced with whatever data loading logic you have)."""
960
- return self.get_meta_tags()
961
- def get_meta_tags(self):
962
- tags = self.find_all("meta")
963
- for meta_tag in tags:
964
- for attr, values in meta_tag.attrs.items():
965
- if attr not in self.meta_tags:
966
- self.meta_tags[attr] = []
967
- if values not in self.meta_tags[attr]:
968
- self.meta_tags[attr].append(values)
969
-
970
-
971
- def find_all(self,element,soup=None):
972
- soup = self.soup if soup == None else soup
973
- return soup.find_all(element)
974
- def get_class(self,class_name,soup=None):
975
- soup = self.soup if soup == None else soup
976
- return soup.get(class_name)
977
- @staticmethod
978
- def has_attributes(tag, *attrs):
979
- return any(tag.has_attr(attr) for attr in attrs)
980
- def get_find_all_with_attributes(self, *attrs):
981
- return self.soup.find_all(lambda t: self.has_attributes(t, *attrs))
982
- def find_tags_by_attributes(self, tag: str = None, attr: str = None, attr_values: List[str] = None) ->List:
983
- if not tag:
984
- tags = self.soup.find_all(True) # get all tags
985
- else:
986
- tags = self.soup.find_all(tag) # get specific tags
987
-
988
- extracted_tags = []
989
- for t in tags:
990
- if attr:
991
- attribute_value = t.get(attr)
992
- if not attribute_value: # skip tags without the desired attribute
993
- continue
994
- if attr_values and not any(value in attribute_value for value in attr_values): # skip tags without any of the desired attribute values
995
- continue
996
- extracted_tags.append(t)
997
- return extracted_tags
998
-
999
-
1000
- def extract_elements(self,url:str=None, tag:str=None, class_name:str=None, class_value:str=None) -> list:
1001
- """
1002
- Extracts portions of the source code from the specified URL based on provided filters.
1003
-
1004
- Args:
1005
- url (str): The URL to fetch the source code from.
1006
- element_type (str, optional): The HTML element type to filter by. Defaults to None.
1007
- attribute_name (str, optional): The attribute name to filter by. Defaults to None.
1008
- class_name (str, optional): The class name to filter by. Defaults to None.
1009
-
1010
- Returns:
1011
- list: A list of strings containing portions of the source code that match the provided filters.
1012
- """
1013
- elements = []
1014
- # If no filters are provided, return the entire source code
1015
- if not tag and not class_name and not class_value:
1016
- elements.append(str(self.soup))
1017
- return elements
1018
- # Find elements based on the filters provided
1019
- if tag:
1020
- elements.extend([str(tags) for tags in self.get_all_desired(tag)])
1021
- if class_name:
1022
- elements.extend([str(tags) for tags in self.get_all_desired(tag={class_name: True})])
1023
- if class_value:
1024
- elements.extend([str(tags) for tags in self.get_all_desired(class_name=class_name)])
1025
- return elements
1026
- def find_all_with_attributes(self, class_name=None, *attrs):
1027
- """
1028
- Discovers classes in the HTML content of the provided URL
1029
- that have associated href or src attributes.
1030
-
1031
- Args:
1032
- base_url (str): The URL from which to discover classes.
1033
-
1034
- Returns:
1035
- set: A set of unique class names.
1036
- """
1037
-
1038
-
1039
- unique_classes = set()
1040
- for tag in self.get_find_all_with_attributes(*attrs):
1041
- class_list = self.get_class(class_name=class_name, soup=tag)
1042
- unique_classes.update(class_list)
1043
- return unique_classes
1044
- def get_images(self, tag_name, class_name, class_value):
1045
- images = []
1046
- for tag in self.soup.find_all(tag_name):
1047
- if class_name in tag.attrs and tag.attrs[class_name] == class_value:
1048
- content = tag.attrs.get('content', '')
1049
- if content:
1050
- images.append(content)
1051
- return images
1052
- def extract_text_sections(self) -> list:
1053
- """
1054
- Extract all sections of text from an HTML content using BeautifulSoup.
1055
-
1056
- Args:
1057
- html_content (str): The HTML content to be parsed.
1058
-
1059
- Returns:
1060
- list: A list containing all sections of text.
1061
- """
1062
- # Remove any script or style elements to avoid extracting JavaScript or CSS code
1063
- for script in self.soup(['script', 'style']):
1064
- script.decompose()
1065
-
1066
- # Extract text from the remaining elements
1067
- text_sections = self.soup.stripped_strings
1068
- return [text for text in text_sections if text]
1069
- def discover_classes_and_meta_images(self, tag_name, class_name_1, class_name_2, class_value, attrs):
1070
- """
1071
- Discovers classes in the HTML content of the provided URL
1072
- that have associated href or src attributes. Also, fetches
1073
- image references from meta tags.
1074
-
1075
- Args:
1076
- base_url (str): The URL from which to discover classes and meta images.
1077
-
1078
- Returns:
1079
- tuple: A set of unique class names and a list of meta images.
1080
- """
1081
-
1082
- unique_classes = self.find_all_with_attributes(class_name=class_name_1, *attrs)
1083
- images = self.get_images(tag_name=tag_name, class_name=class_name_2, class_value=class_value)
1084
- return unique_classes, images
1085
- def get_all_tags_and_attribute_names(self):
1086
- tag_names = set() # Using a set to ensure uniqueness
1087
- attribute_names = set()
1088
- get_all = self.find_tags_by_attributes()
1089
- for tag in get_all: # True matches all tags
1090
- tag_names.add(tag.name)
1091
- for attr in tag.attrs:
1092
- attribute_names.add(attr)
1093
- tag_names_list = list(tag_names)
1094
- attribute_names_list = list(attribute_names)
1095
- return {"tags":tag_names_list,"attributes":attribute_names_list}
1096
-
1097
- def get_all_attribute_values(self):
1098
- attribute_values={}
1099
- get_all = self.find_tags_by_attributes()
1100
- for tag in get_all: # True matches all tags
1101
- for attr, value in tag.attrs.items():
1102
- # If attribute is not yet in the dictionary, add it with an empty set
1103
- if attr not in attribute_values:
1104
- attribute_values[attr] = set()
1105
- # If the attribute value is a list (e.g., class), extend the set with the list
1106
- if isinstance(value, list):
1107
- attribute_values[attr].update(value)
1108
- else:
1109
- attribute_values[attr].add(value)
1110
- for attr, values in attribute_values.items():
1111
- attribute_values[attr] = list(values)
1112
- return attribute_values
1113
-
1114
- @property
1115
- def url(self):
1116
- return self._url
1117
- @url.setter
1118
- def url(self, new_url):
1119
- self._url = new_url
1120
-
1121
- class SoupManagerSingleton():
1122
- _instance = None
1123
- @staticmethod
1124
- def get_instance(url_manager,request_manager,parse_type="html.parser",source_code=None):
1125
- if SoupManagerSingleton._instance is None:
1126
- SoupManagerSingleton._instance = SoupManager(url_manager,request_manager,parse_type=parse_type,source_code=source_code)
1127
- elif parse_type != SoupManagerSingleton._instance.parse_type or source_code != SoupManagerSingleton._instance.source_code:
1128
- SoupManagerSingleton._instance = SoupManager(url_manager,request_manager,parse_type=parse_type,source_code=source_code)
1129
- return SoupManagerSingleton._instance
1130
- class VideoDownloader:
1131
- """
1132
- VideoDownloader is a class for downloading videos from URLs using YouTube-DL.
1133
-
1134
- Args:
1135
- link (str or list): The URL(s) of the video(s) to be downloaded.
1136
- temp_directory (str or None): The directory to store temporary video files (default is None, uses video_directory/temp_files).
1137
- video_directory (str or None): The directory to store downloaded videos (default is None, uses 'videos' in the current working directory).
1138
- remove_existing (bool): Whether to remove existing video files with the same name (default is True).
1139
-
1140
- Methods:
1141
- count_outliers(speed, threshold): Count speed outliers below the threshold.
1142
- filter_outliers(speeds): Filter out speed outliers in the list of speeds.
1143
- remove_temps(file_name): Remove temporary video files based on the file name.
1144
- move_video(): Move the downloaded video to the final directory.
1145
- yt_dlp_downloader(url, ydl_opts={}, download=True): Download video information using YouTube-DL.
1146
- progress_callback(d): Callback function to monitor download progress.
1147
- download(): Download video(s) based on the provided URL(s).
1148
- monitor(): Monitor the download progress.
1149
- start(): Start the download and monitoring threads.
1150
-
1151
- Note:
1152
- - The VideoDownloader class uses YouTube-DL to download videos.
1153
- - It allows downloading from multiple URLs.
1154
- - You need to have YouTube-DL installed to use this class.
1155
- """
1156
- def __init__(self, link,temp_directory=None,video_directory=None,remove_existing=True):
1157
- if video_directory==None:
1158
- video_directory=os.path.join(os.getcwd(),'videos')
1159
- if temp_directory == None:
1160
- temp_directory=os.path.join(video_directory,'temp_files')
1161
- self.thread_manager = ThreadManager()
1162
- self.pause_event = self.thread_manager.add_thread('pause_event')
1163
- self.link = link
1164
- self.temp_directory = temp_directory
1165
- self.video_directory = video_directory
1166
- self.remove_existing=remove_existing
1167
- self.video_urls=self.link if isinstance(self.link,list) else [self.link]
1168
- self.starttime = None
1169
- self.downloaded = 0
1170
- self.time_interval=60
1171
- self.monitoring=True
1172
- self.temp_file_name = None
1173
- self.file_name = None
1174
- self.dl_speed = None
1175
- self.dl_eta=None
1176
- self.total_bytes_est=None
1177
- self.percent_speed=None
1178
- self.percent=None
1179
- self.speed_track = []
1180
- self.video_url=None
1181
- self.last_checked = get_time_stamp()
1182
- self.num=0
1183
- self.start()
1184
- def count_outliers(self,speed,threshold):
1185
- if speed < threshold:
1186
- self.outlier_count+=1
1187
- else:
1188
- self.outlier_count=0
1189
- def filter_outliers(self,speeds):
1190
- # Step 1: Compute initial average
1191
- initial_avg = sum(speeds) / len(speeds)
1192
-
1193
- # Step 2: Remove speeds 25% under the average
1194
- threshold = initial_avg * 0.75 # 25% under average
1195
- filtered_speeds = [speed for speed in speeds if speed >= threshold]
1196
-
1197
- # Step 3: Compute the new average of the filtered list
1198
- if filtered_speeds: # Ensure the list is not empty
1199
- self.count_outliers(speeds[-1],threshold)
1200
- return filtered_speeds
1201
- else:
1202
- # This can happen if all values are outliers, it's up to you how to handle it
1203
- self.outlier_count=0
1204
- return speeds
1205
- def remove_temps(self,file_name):
1206
- for temp_vid in os.listdir(self.temp_directory):
1207
- if len(file_name)<=len(temp_vid):
1208
- if temp_vid[:len(file_name)] == file_name:
1209
- os.remove(os.path.join(self.temp_directory,temp_vid))
1210
- print(f"removing {temp_vid} from {self.temp_directory}")
1211
- def move_video(self):
1212
- if os.path.exists(self.temp_file_path):
1213
- shutil.move(self.temp_file_path, self.video_directory)
1214
- print(f"moving {self.file_name} from {self.temp_directory} to {self.video_directory}")
1215
- self.remove_temps(self.file_name)
1216
- return True
1217
- if os.path.exists(self.complete_file_path):
1218
- print(f"{self.file_name} already existed in {self.video_directory}; removing it from {self.temp_directory}")
1219
- self.remove_temps(self.file_name)
1220
- return True
1221
- return False
1222
- def yt_dlp_downloader(self,url,ydl_opts={},download=True):
1223
- try:
1224
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
1225
- self.info_dict=ydl.extract_info(url=url, download=download)
1226
- return True
1227
- except:
1228
- return False
1229
- def progress_callback(self, d):
1230
- self.status_dict = d
1231
- keys = ['status',
1232
- 'downloaded_bytes',
1233
- 'fragment_index',
1234
- 'fragment_count',
1235
- 'filename',
1236
- 'tmpfilename',
1237
- 'max_progress',
1238
- 'progress_idx',
1239
- 'elapsed',
1240
- 'total_bytes_estimate',
1241
- 'speed',
1242
- 'eta',
1243
- '_eta_str',
1244
- '_speed_str',
1245
- '_percent_str',
1246
- '_total_bytes_str',
1247
- '_total_bytes_estimate_str',
1248
- '_downloaded_bytes_str',
1249
- '_elapsed_str',
1250
- '_default_template']
1251
- if self.status_dict['status'] == 'finished':
1252
- print("Done downloading, moving video to final directory...")
1253
- self.move_video()
1254
- return
1255
- if get_time_stamp()-self.last_checked>5:
1256
- print(self.status_dict['_default_template'])
1257
- self.last_checked = get_time_stamp()
1258
- if (get_time_stamp()-self.start_time/5)>6:
1259
- self.speed_track.append(self.status_dict['speed'])
1260
- self.speed_track=self.filter_outliers(self.speed_track)
1261
-
1262
- def download(self):
1263
- if not os.path.exists(self.video_directory):
1264
- os.makedirs(self.video_directory,exist_ok=True)
1265
- if not os.path.exists(self.temp_directory):
1266
- os.makedirs(self.temp_directory,exist_ok=True)
1267
- for self.num,video_url in enumerate(self.video_urls):
1268
- if video_url != self.video_url or self.video_url == None:
1269
- self.video_url=video_url
1270
- self.info_dict=None
1271
- result = self.yt_dlp_downloader(url=self.video_url,ydl_opts={'quiet': True, 'no_warnings': True},download=False)
1272
- if self.info_dict != None and result:
1273
- self.start_time = get_time_stamp()
1274
- self.downloaded = 0
1275
- self.video_title = self.info_dict.get('title', None)
1276
- self.video_ext = self.info_dict.get('ext', 'mp4')
1277
- self.file_name =f"{self.video_title}.{self.video_ext}"
1278
- self.temp_file_path = os.path.join(self.temp_directory, self.file_name)
1279
- self.complete_file_path = os.path.join(self.video_directory, self.file_name)
1280
- if not self.move_video():
1281
- self.dl_speed = []
1282
- self.percent=None
1283
- self.dl_eta=None
1284
- self.total_bytes_est=None
1285
- self.percent_speed=None
1286
- self.speed_track = []
1287
- self.outlier_count=0
1288
- ydl_opts = {
1289
- 'outtmpl': self.temp_file_path,
1290
- 'noprogress':True,
1291
- 'progress_hooks': [self.progress_callback]
1292
- }
1293
-
1294
-
1295
- print("Starting download...") # Check if this point in code is reached
1296
- result = self.yt_dlp_downloader(url=self.video_url,ydl_opts=ydl_opts,download=True)
1297
- if result:
1298
- print("Download finished!") # Check if download completes
1299
- else:
1300
- print(f'error downloding {self.video_url}')
1301
- self.move_video()
1302
- else:
1303
- print(f"The video from {self.video_url} already exists in the directory {self.video_directory}. Skipping download.")
1304
- else:
1305
- print(f"could not find video info from {self.video_url} Skipping download.")
1306
- if self.num==len(self.video_urls)-1:
1307
- self.monitoring=False
1308
- self.time_interval=0
1309
-
1310
- def monitor(self):
1311
- while self.monitoring:
1312
- self.thread_manager.wait(name='pause_event',n=self.time_interval)# check every minute
1313
- if self.monitoring:
1314
- if 'eta' in self.status_dict:
1315
- if self.outlier_count>=3 and (self.status_dict['eta']/60)>10:
1316
- self.start()
1317
-
1318
- def start(self):
1319
- download_thread = self.thread_manager.add_thread(name='download_thread',target=self.download)
1320
- monitor_thread = self.thread_manager.add_thread(name='monitor_thread',target_function=self.monitor)
1321
- self.thread_manager.start(name='download_thread')
1322
- self.thread_manager.start(name='monitor_thread')
1323
- self.thread_manager.join(name='download_thread')
1324
- self.thread_manager.join(name='monitor_thread')
1325
- class VideoDownloaderSingleton():
1326
- _instance = None
1327
- @staticmethod
1328
- def get_instance(url_manager,request_manager,title=None,video_extention='mp4',download_directory=os.getcwd(),user_agent=None,download=True,get_info=False):
1329
- if VideoDownloaderSingleton._instance is None:
1330
- VideoDownloaderSingleton._instance = VideoDownloader(url=url,title=title,video_extention=video_extention,download_directory=download_directory,download=download,get_info=get_info,user_agent=user_agent)
1331
- elif VideoDownloaderSingleton._instance.title != title or video_extention != VideoDownloaderSingleton._instance.video_extention or url != VideoDownloaderSingleton._instance.url or download_directory != VideoDownloaderSingleton._instance.download_directory or user_agent != VideoDownloaderSingleton._instance.user_agent:
1332
- VideoDownloaderSingleton._instance = VideoDownloader(url=url,title=title,video_extention=video_extention,download_directory=download_directory,download=download,get_info=get_info,user_agent=user_agent)
1333
- return VideoDownloaderSingleton._instance
1334
-
1335
- class LinkManager:
1336
- """
1337
- LinkManager is a class for managing and extracting links and image links from a web page.
1338
-
1339
- Args:
1340
- url (str): The URL of the web page (default is "https://example.com").
1341
- source_code (str or None): The source code of the web page (default is None).
1342
- url_manager (UrlManager or None): An instance of UrlManager (default is None).
1343
- request_manager (SafeRequest or None): An instance of SafeRequest (default is None).
1344
- soup_manager (SoupManager or None): An instance of SoupManager (default is None).
1345
- image_link_tags (str): HTML tags to identify image links (default is 'img').
1346
- img_link_attrs (str): HTML attributes to identify image link URLs (default is 'src').
1347
- link_tags (str): HTML tags to identify links (default is 'a').
1348
- link_attrs (str): HTML attributes to identify link URLs (default is 'href').
1349
- strict_order_tags (bool): Flag to indicate if tags and attributes should be matched strictly (default is False).
1350
- img_attr_value_desired (list or None): Desired attribute values for image links (default is None).
1351
- img_attr_value_undesired (list or None): Undesired attribute values for image links (default is None).
1352
- link_attr_value_desired (list or None): Desired attribute values for links (default is None).
1353
- link_attr_value_undesired (list or None): Undesired attribute values for links (default is None).
1354
- associated_data_attr (list): HTML attributes to associate with the extracted links (default is ["data-title", 'alt', 'title']).
1355
- get_img (list): HTML attributes used to identify associated images (default is ["data-title", 'alt', 'title']).
1356
-
1357
- Methods:
1358
- re_initialize(): Reinitialize the LinkManager with the current settings.
1359
- update_url_manager(url_manager): Update the URL manager with a new instance.
1360
- update_url(url): Update the URL and reinitialize the LinkManager.
1361
- update_source_code(source_code): Update the source code and reinitialize the LinkManager.
1362
- update_soup_manager(soup_manager): Update the SoupManager and reinitialize the LinkManager.
1363
- update_desired(...): Update the desired settings and reinitialize the LinkManager.
1364
- find_all_desired(...): Find all desired links or image links based on the specified criteria.
1365
- find_all_domain(): Find all unique domain names in the extracted links.
1366
-
1367
- Note:
1368
- - The LinkManager class helps manage and extract links and image links from web pages.
1369
- - The class provides flexibility in specifying criteria for link extraction.
1370
- """
1371
- def __init__(self,url="https://example.com",source_code=None,url_manager=None,request_manager=None,soup_manager=None,image_link_tags='img',img_link_attrs='src',link_tags='a',link_attrs='href',strict_order_tags=False,img_attr_value_desired=None,img_attr_value_undesired=None,link_attr_value_desired=None,link_attr_value_undesired=None,associated_data_attr=["data-title",'alt','title'],get_img=["data-title",'alt','title']):
1372
- if url_manager==None:
1373
- url_manager=UrlManager(url=url)
1374
- self.url_manager= url_manager
1375
- self.url=self.url_manager.url
1376
- if request_manager==None:
1377
- request_manager = SafeRequest(url_manager=self.url_manager)
1378
- self.request_manager=request_manager
1379
- if soup_manager == None:
1380
- soup_manager = SoupManager(url_manager=self.url_manager,request_manager=self.request_manager)
1381
- self.soup_manager = soup_manager
1382
- if source_code != None:
1383
- self.source_code=source_code
1384
- else:
1385
- self.source_code=self.request_manager.source_code_bytes
1386
- if self.source_code != self.soup_manager.source_code:
1387
- self.soup_manager.update_source_code(source_code=self.source_code)
1388
- self.strict_order_tags=strict_order_tags
1389
- self.image_link_tags=image_link_tags
1390
- self.img_link_attrs=img_link_attrs
1391
- self.link_tags=link_tags
1392
- self.link_attrs=link_attrs
1393
- self.img_attr_value_desired=img_attr_value_desired
1394
- self.img_attr_value_undesired=img_attr_value_undesired
1395
- self.link_attr_value_desired=link_attr_value_desired
1396
- self.link_attr_value_undesired=link_attr_value_undesired
1397
- self.associated_data_attr=associated_data_attr
1398
- self.get_img=get_img
1399
- self.all_desired_image_links=self.find_all_desired_links(tag=self.image_link_tags,
1400
- attr=self.img_link_attrs,
1401
- attr_value_desired=self.img_attr_value_desired,
1402
- attr_value_undesired=self.img_attr_value_undesired)
1403
- self.all_desired_links=self.find_all_desired_links(tag=self.link_tags,
1404
- attr=self.link_attrs,
1405
- attr_value_desired=self.link_attr_value_desired,
1406
- attr_value_undesired=self.link_attr_value_undesired,
1407
- associated_data_attr=self.associated_data_attr,
1408
- get_img=get_img)
1409
- def re_initialize(self):
1410
- self.all_desired_image_links=self.find_all_desired_links(tag=self.image_link_tags,attr=self.img_link_attrs,strict_order_tags=self.strict_order_tags,attr_value_desired=self.img_attr_value_desired,attr_value_undesired=self.img_attr_value_undesired)
1411
- self.all_desired_links=self.find_all_desired_links(tag=self.link_tags,attr=self.link_attrs,strict_order_tags=self.strict_order_tags,attr_value_desired=self.link_attr_value_desired,attr_value_undesired=self.link_attr_value_undesired,associated_data_attr=self.associated_data_attr,get_img=self.get_img)
1412
- def update_url_manager(self,url_manager):
1413
- self.url_manager=url_manager
1414
- self.url=self.url_manager.url
1415
- self.request_manager.update_url_manager(url_manager=self.url_manager)
1416
- self.soup_manager.update_url_manager(url_manager=self.url_manager)
1417
- self.source_code=self.soup_manager.source_code
1418
- self.re_initialize()
1419
- def update_url(self,url):
1420
- self.url=url
1421
- self.url_manager.update_url(url=self.url)
1422
- self.url=self.url_manager.url
1423
- self.request_manager.update_url(url=self.url)
1424
- self.soup_manager.update_url(url=self.url)
1425
- self.source_code=self.soup_manager.source_code
1426
- self.re_initialize()
1427
- def update_source_code(self,source_code):
1428
- self.source_code=source_code
1429
- if self.source_code != self.soup_manager.source_code:
1430
- self.soup_manager.update_source_code(source_code=self.source_code)
1431
- self.re_initialize()
1432
- def update_soup_manager(self,soup_manager):
1433
- self.soup_manager=soup_manager
1434
- self.source_code=self.soup_manager.source_code
1435
- self.re_initialize()
1436
- def update_desired(self,img_attr_value_desired=None,img_attr_value_undesired=None,link_attr_value_desired=None,link_attr_value_undesired=None,image_link_tags=None,img_link_attrs=None,link_tags=None,link_attrs=None,strict_order_tags=None,associated_data_attr=None,get_img=None):
1437
- self.strict_order_tags = strict_order_tags or self.strict_order_tags
1438
- self.img_attr_value_desired=img_attr_value_desired or self.img_attr_value_desired
1439
- self.img_attr_value_undesired=img_attr_value_undesired or self.img_attr_value_undesired
1440
- self.link_attr_value_desired=link_attr_value_desired or self.link_attr_value_desired
1441
- self.link_attr_value_undesired=link_attr_value_undesired or self.link_attr_value_undesired
1442
- self.image_link_tags=image_link_tags or self.image_link_tags
1443
- self.img_link_attrs=img_link_attrs or self.img_link_attrs
1444
- self.link_tags=link_tags or self.link_tags
1445
- self.link_attrs=link_attrs or self.link_attrs
1446
- self.associated_data_attr=associated_data_attr or self.associated_data_attr
1447
- self.get_img=get_img or self.get_img
1448
- self.re_initialize()
1449
- def find_all_desired(self,tag='img',attr='src',strict_order_tags=False,attr_value_desired=None,attr_value_undesired=None,associated_data_attr=None,get_img=None):
1450
- def make_list(obj):
1451
- if isinstance(obj,list) or obj==None:
1452
- return obj
1453
- return [obj]
1454
- def get_desired_value(attr,attr_value_desired=None,attr_value_undesired=None):
1455
- if attr_value_desired:
1456
- for value in attr_value_desired:
1457
- if value not in attr:
1458
- return False
1459
- if attr_value_undesired:
1460
- for value in attr_value_undesired:
1461
- if value in attr:
1462
- return False
1463
- return True
1464
- attr_value_desired,attr_value_undesired,associated_data_attr,tags,attribs=make_list(attr_value_desired),make_list(attr_value_undesired),make_list(associated_data_attr),make_list(tag),make_list(attr)
1465
- desired_ls = []
1466
- assiciated_data=[]
1467
- for i,tag in enumerate(tags):
1468
- attribs_list=attribs
1469
- if strict_order_tags:
1470
- if len(attribs)<=i:
1471
- attribs_list=[None]
1472
- else:
1473
- attribs_list=make_list(attribs[i])
1474
- for attr in attribs_list:
1475
- for component in self.soup_manager.soup.find_all(tag):
1476
- if attr in component.attrs and get_desired_value(attr=component[attr],attr_value_desired=attr_value_desired,attr_value_undesired=attr_value_undesired):
1477
- if component[attr] not in desired_ls:
1478
- desired_ls.append(component[attr])
1479
- assiciated_data.append({"value":component[attr]})
1480
- if associated_data_attr:
1481
- for data in associated_data_attr:
1482
- if data in component.attrs:
1483
- assiciated_data[-1][data]=component.attrs[data]
1484
- if get_img and component.attrs[data]:
1485
- if data in get_img and len(component.attrs[data])!=0:
1486
- for each in self.soup_manager.soup.find_all('img'):
1487
- if 'alt' in each.attrs:
1488
- if each.attrs['alt'] == component.attrs[data] and 'src' in each.attrs:
1489
- assiciated_data[-1]['image']=each.attrs['src']
1490
- desired_ls.append(assiciated_data)
1491
- return desired_ls
1492
- def find_all_domain(self):
1493
- domains_ls=[self.url_manager.protocol+'://'+self.url_manager.domain]
1494
- for desired in all_desired[:-1]:
1495
- if url_manager.is_valid_url(desired):
1496
- parse = urlparse(desired)
1497
- domain = parse.scheme+'://'+parse.netloc
1498
- if domain not in domains_ls:
1499
- domains_ls.append(domain)
1500
- def find_all_desired_links(self,tag='img', attr='src',attr_value_desired=None,strict_order_tags=False,attr_value_undesired=None,associated_data_attr=None,all_desired=None,get_img=None):
1501
- all_desired = all_desired or self.find_all_desired(tag=tag,attr=attr,strict_order_tags=strict_order_tags,attr_value_desired=attr_value_desired,attr_value_undesired=attr_value_undesired,associated_data_attr=associated_data_attr,get_img=get_img)
1502
- assiciated_attrs = all_desired[-1]
1503
- valid_assiciated_attrs = []
1504
- desired_links=[]
1505
- for i,attr in enumerate(all_desired[:-1]):
1506
- valid_attr=self.url_manager.make_valid(attr,self.url_manager.protocol+'://'+self.url_manager.domain)
1507
- if valid_attr:
1508
- desired_links.append(valid_attr)
1509
- valid_assiciated_attrs.append(assiciated_attrs[i])
1510
- valid_assiciated_attrs[-1]["link"]=valid_attr
1511
- desired_links.append(valid_assiciated_attrs)
1512
- return desired_links
1513
-
1514
- def CrawlManager():
1515
- def __init__(self,url=None,source_code=None,parse_type="html.parser"):
1516
- self.url=url
1517
- self.source_code=source_code
1518
- self.parse_type=parse_type
1519
- get_new_source_and_url(self,url)
1520
- def get_new_source_and_url(self,url=None):
1521
- if url == None:
1522
- url = self.url
1523
- self.response = self.response_manager.response
1524
- self.source_code=self.response_manager.source_code
1525
- def get_classes_and_meta_info():
1526
- class_name_1,class_name_2, class_value = 'meta','class','property','og:image'
1527
- attrs = 'href','src'
1528
- unique_classes, images=discover_classes_and_images(self,tag_name,class_name_1,class_name_2,class_value,attrs)
1529
- return unique_classes, images
1530
- def extract_links_from_url(self):
1531
- """
1532
- Extracts all href and src links from a given URL's source code.
1533
-
1534
- Args:
1535
- base_url (str): The URL from which to extract links.
1536
-
1537
- Returns:
1538
- dict: Dictionary containing image links and external links under the parent page.
1539
- """
1540
- agg_js = {'images':[],'external_links':[]}
1541
-
1542
- if self.response != None:
1543
- attrs = 'href','src'
1544
- href_links,src_links='',''
1545
- links = [href_links,src_links]
1546
- for i,each in enumerate(attrs):
1547
- links[i]= [a[attr[i]] for a in get_find_all_with_attributes(self, attrs[i])]
1548
- # Convert all links to absolute links
1549
- absolute_links = [(url, link) for link in links[0] + links[1]]
1550
- # Separate images and external links
1551
- images = [link for link in absolute_links if link.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.svg', '.webp'))]
1552
- external_links = [link for link in absolute_links if urlparse(link).netloc != urlparse(url).netloc]
1553
- agg_js['images']=images
1554
- agg_js['external_links']=external_links
1555
-
1556
- return agg_js
1557
-
1558
-
1559
- def correct_xml(xml_string):
1560
- # Parse the XML string
1561
- root = ET.fromstring(xml_string)
1562
-
1563
- # Loop through each <image:loc> element and correct its text if needed
1564
- for image_loc in root.findall(".//image:loc", namespaces={'image': 'http://www.google.com/schemas/sitemap-image/1.1'}):
1565
- # Replace '&' with '&amp;' in the element's text
1566
- if '&' in image_loc.text:
1567
- image_loc.text = image_loc.text.replace('&', '&amp;')
1568
-
1569
- # Convert the corrected XML back to string
1570
- corrected_xml = ET.tostring(root, encoding='utf-8').decode('utf-8')
1571
- return corrected_xml
1572
-
1573
-
1574
- def determine_values(self):
1575
- # This is just a mockup. In a real application, you'd analyze the URL or its content.
1576
-
1577
- # Assuming a blog site
1578
- if 'blog' in self.url:
1579
- if '2023' in self.url: # Assuming it's a current year article
1580
- return ('weekly', '0.8')
1581
- else:
1582
- return ('monthly', '0.6')
1583
- elif 'contact' in self.url:
1584
- return ('yearly', '0.3')
1585
- else: # Homepage or main categories
1586
- return ('weekly', '1.0')
1587
- def crawl(url, max_depth=3, depth=1):
1588
-
1589
- if depth > max_depth:
1590
- return []
1591
-
1592
- if url in visited:
1593
- return []
1594
-
1595
- visited.add(url)
1596
-
1597
- try:
1598
-
1599
- links = [a['href'] for a in self.soup.find_all('a', href=True)]
1600
- valid_links = []
1601
-
1602
- for link in links:
1603
- parsed_link = urlparse(link)
1604
- base_url = "{}://{}".format(parsed_link.scheme, parsed_link.netloc)
1605
-
1606
- if base_url == url: # Avoiding external URLs
1607
- final_link = urljoin(url, parsed_link.path)
1608
- if final_link not in valid_links:
1609
- valid_links.append(final_link)
1610
-
1611
- for link in valid_links:
1612
- crawl(link, max_depth, depth+1)
1613
-
1614
- return valid_links
1615
-
1616
- except Exception as e:
1617
- print(f"Error crawling {url}: {e}")
1618
- return []
1619
-
1620
-
1621
- # Define or import required functions here, like get_all_website_links, determine_values,
1622
- # discover_classes_and_meta_images, and extract_links_from_url.
1623
- def get_meta_info(self):
1624
-
1625
- meta_info = {}
1626
- # Fetch the title if available
1627
- title_tag = parse_title()
1628
- if title_tag:
1629
- meta_info["title"] = title_tag
1630
- # Fetch meta tags
1631
- for meta_tag in soup.find_all('meta'):
1632
- name = meta_tag.get('name') or meta_tag.get('property')
1633
- if name:
1634
- content = meta_tag.get('content')
1635
- if content:
1636
- meta_info[name] = content
1637
-
1638
- return meta_info
1639
- def generate_sitemap(self,domain):
1640
-
1641
- with open('sitemap.xml', 'w', encoding='utf-8') as f:
1642
- string = '<?xml version="1.0" encoding="UTF-8"?>\n<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">\n'
1643
-
1644
- for url in self.all_site_links:
1645
- string += f' <url>\n <loc>{url}</loc>\n'
1646
- preprocess=[]
1647
- self.get_new_source_and_url(url=url)
1648
- links = extract_links_from_url(url)
1649
-
1650
- for img in links['images']:
1651
- if str(img).lower() not in preprocess:
1652
- try:
1653
- escaped_img = img.replace('&', '&amp;')
1654
-
1655
- str_write = f' <image:image>\n <image:loc>{escaped_img}</image:loc>\n </image:image>\n'
1656
- string += str_write
1657
- except:
1658
- pass
1659
- preprocess.append(str(img).lower())
1660
- frequency, priority = determine_values(url)
1661
- string += f' <changefreq>{frequency}</changefreq>\n'
1662
- string += f' <priority>{priority}</priority>\n'
1663
- string += f' </url>\n'
1664
-
1665
- string += '</urlset>\n'
1666
- f.write(string)
1667
- # Output summary
1668
- print(f'Sitemap saved to sitemap.xml with {len(urls)} URLs.')
1669
-
1670
- # Output class and link details
1671
- for url in urls:
1672
- print(f"\nDetails for {url}:")
1673
- classes, meta_img_refs = discover_classes_and_meta_images(url)
1674
-
1675
- print("\nClasses with href or src attributes:")
1676
- for class_name in classes:
1677
- print(f"\t{class_name}")
1678
-
1679
- print("\nMeta Image References:")
1680
- for img_ref in meta_img_refs:
1681
- print(f"\t{img_ref}")
1682
-
1683
- links = extract_links_from_url(url)
1684
-
1685
- print("\nImages:")
1686
- for img in links['images']:
1687
- print(f"\t{img}")
1688
-
1689
- print("\nExternal Links:")
1690
- for ext_link in links['external_links']:
1691
- print(f"\t{ext_link}")
1692
- class CrawlManagerSingleton():
1693
- _instance = None
1694
- @staticmethod
1695
- def get_instance(url=None,source_code=None,parse_type="html.parser"):
1696
- if CrawlManagerSingleton._instance is None:
1697
- CrawlManagerSingleton._instance = CrawlManager(url=url,parse_type=parse_type,source_code=source_code)
1698
- elif parse_type != CrawlManagerSingleton._instance.parse_type or url != CrawlManagerSingleton._instance.url or source_code != CrawlManagerSingleton._instance.source_code:
1699
- CrawlManagerSingleton._instance = CrawlManager(url=url,parse_type=parse_type,source_code=source_code)
1700
- return CrawlManagerSingleton._instance
1701
- class DynamicRateLimiterManager:
1702
- def __init__(self,service_name='ethereum'):
1703
- # Key: Service Name, Value: DynamicRateLimiter instance
1704
- self.services = {}
1705
- self.service_name = service_name
1706
- self.add_service(service_name='ethereum')
1707
- def add_service(self, service_name="default", low_limit=10, high_limit=30, limit_epoch=60,starting_tokens=10,epoch_cycle_adjustment=True):
1708
- if service_name in self.services:
1709
- print(f"Service {service_name} already exists!")
1710
- return
1711
- self.services[service_name] = DynamicRateLimiter(low_limit=low_limit, high_limit=limit_epoch, limit_epoch=60,starting_tokens=starting_tokens,epoch_cycle_adjustment=epoch_cycle_adjustment)
1712
-
1713
- def request(self,request_url, service_name=None):
1714
- self.service_name = service_name or self.service_name
1715
- if self.service_name not in self.services:
1716
- self.add_service(self.service_name)
1717
-
1718
- limiter = self.services[self.service_name]
1719
- can_request = self.get_limited_request(limiter,request_url)
1720
-
1721
- # Log the outcome of the request attempt
1722
- self.log_request(service_name, can_request)
1723
-
1724
- return can_request
1725
-
1726
- def log_request(self, service_name, success):
1727
- # Placeholder logging method, replace with actual logging implementation
1728
- print(f"[{self.service_name}] Request {'succeeded' if success else 'denied'}. Current tokens: {self.services[self.service_name].get_current_tokens()}")
1729
-
1730
- def get_limited_request(self,limiter,request_url):
1731
- unwanted_response=True
1732
- # Check with the rate limiter if we can make a request
1733
- while True:
1734
- print(limiter)
1735
- if not limiter.request():
1736
- print("Rate limit reached for coin_gecko. Waiting for the next epoch...")
1737
- sleep_count_down(self.services[self.service_name].get_sleep()["current_sleep"]) # Wait for the limit_epoch duration
1738
- # Make the actual request
1739
- url_manager = UrlManager(url=request_url)
1740
- request_manager = SafeRequest(url_manager=url_manager)
1741
-
1742
- # If you get a rate-limit error (usually 429 status code but can vary), adjust the rate limiter
1743
- if request_manager.response.status_code ==429:
1744
- print(response.json())
1745
- self.services[self.service_name].request_tracker(False)
1746
- print("Rate limited by coin_gecko. Adjusted limit. Retrying...")
1747
- if len(manager.services[self.service_name].calculate_tokens()["succesful"])<2:
1748
- sleep_count_down(self.services[self.service_name].limit_epoch) # Wait for the limit_epoch duration
1749
- else:
1750
- self.services[self.service_name].current_limit-=1
1751
- sleep_count_down(self.services[self.service_name].limit_epoch/len(self.services[self.service_name].calculate_tokens()["succesful"])) # Wait for the limit_epoch duration
1752
- # Return the data if the request was successful
1753
- if request_manager.response.status_code == 200:
1754
- self.services[self.service_name].request_tracker(True)
1755
- return request_manager.response.json()
1756
- elif response.status_code not in [200,429]:
1757
- print(f"Unexpected response: {response.status_code}. Message: {response.text}")
1758
- return None
1759
- class DynamicRateLimiter:
1760
- def __init__(self, low_limit, high_limit, limit_epoch, starting_tokens=None,epoch_cycle_adjustment:int=None):
1761
- self.low_limit = low_limit
1762
- self.high_limit = high_limit
1763
- self.limit_epoch = limit_epoch # in seconds
1764
- self.request_status_json = {"succesful":[],"unsuccesful":[],"last_requested":get_time_stamp(),"first_requested":get_time_stamp(),"epoch_left":self.limit_epoch,"last_fail":get_time_stamp(),"count_since_fail":0}
1765
- self.current_limit = starting_tokens or low_limit # Default to high_limit if starting_tokens isn't provided
1766
- self.epoch_cycle_adjustment = epoch_cycle_adjustment
1767
- # Additional attributes for tracking adjustment logic
1768
- self.last_adjusted_time = get_time_stamp()
1769
- self.successful_epochs_since_last_adjustment = 0
1770
- self.request_count_in_current_epoch = 0
1771
-
1772
- def _refill_tokens(self):
1773
- time_since_last_request = get_time_stamp() - self.request_status_json["last_requested"]
1774
- new_tokens = (time_since_last_request / self.limit_epoch) * self.current_limit
1775
- self.tokens = min(self.current_limit, self.get_current_tokens())
1776
- def request_tracker(self,success):
1777
- if success:
1778
- self.request_status_json["succesful"].append(get_time_stamp())
1779
- else:
1780
- self.request_status_json["unsuccesful"].append(get_time_stamp())
1781
- self.request_status_json["last_fail"]=get_time_stamp()
1782
- self.request_status_json["count_since_fail"]=0
1783
- self.adjust_limit()
1784
- self.request_status_json["last_requested"]=get_time_stamp()
1785
- def calculate_tokens(self):
1786
- successful = []
1787
- for each in self.request_status_json["succesful"]:
1788
- if (get_time_stamp() - each)<self.limit_epoch:
1789
- successful.append(each)
1790
- self.request_status_json["succesful"]=successful
1791
- unsuccessful = []
1792
- for each in self.request_status_json["unsuccesful"]:
1793
- if (get_time_stamp() - each)<self.limit_epoch:
1794
- unsuccessful.append(each)
1795
- self.request_status_json["unsuccesful"]=unsuccessful
1796
- if len(successful)==0 and len(unsuccessful)==0:
1797
- pass
1798
- elif len(successful)!=0 and len(unsuccessful)==0:
1799
- self.request_status_json["first_requested"] = successful[0]
1800
- elif len(successful)==0 and len(unsuccessful)!=0:
1801
- self.request_status_json["first_requested"] = unsuccessful[0]
1802
- else:
1803
- self.request_status_json["first_requested"] = min(unsuccessful[0],successful[0])
1804
- self.request_status_json["epoch_left"]=self.limit_epoch-(self.request_status_json["last_requested"]-self.request_status_json["first_requested"])
1805
-
1806
- return self.request_status_json
1807
- def get_current_tokens(self):
1808
- self.request_status_json = self.calculate_tokens()
1809
- total_requests = len(self.request_status_json["succesful"])+len(self.request_status_json["unsuccesful"])
1810
- return max(0,self.current_limit-total_requests)
1811
- def get_sleep(self):
1812
- self.request_status_json = self.calculate_tokens()
1813
- self.request_status_json["current_sleep"]=self.request_status_json["epoch_left"]/max(1,self.get_current_tokens())
1814
- return self.request_status_json
1815
- def request(self):
1816
- self._refill_tokens()
1817
- if self.tokens > 0:
1818
- return True # The request can be made
1819
- else:
1820
- if self.tokens == 0:
1821
- self.request_status_json["count_since_fail"]+=1
1822
- if self.epoch_cycle_adjustment != None:
1823
- if self.request_status_json["count_since_fail"] >=self.epoch_cycle_adjustment:
1824
- self.current_limit=min(self.current_limit+1,self.high_limit)
1825
- return False # The request cannot be made
1826
- def _adjust_limit(self):
1827
- current_time = get_time_stamp()
1828
- if current_time - self.last_adjusted_time >= self.limit_epoch:
1829
- if len(self.clear_epoch()["succesful"]) >= self.tokens:
1830
- # We hit the rate limit this epoch, decrease our limit
1831
- self.tokens = max(1, self.tokens - 1)
1832
- else:
1833
- self.successful_epochs_since_last_adjustment += 1
1834
- if self.successful_epochs_since_last_adjustment >= 5:
1835
- # We've had 5 successful epochs, increase our limit
1836
- self.current_limit = min(self.high_limit, self.tokens + 1)
1837
- self.successful_epochs_since_last_adjustment = 0
1838
-
1839
- # Reset our counters for the new epoch
1840
- self.last_adjusted_time = current_time
1841
- self.request_count_in_current_epoch = 0
1842
- def adjust_limit(self):
1843
- # Set the tokens to succesful requests_made - 1
1844
- self.tokens = len(self.calculate_tokens()["succesful"])
1845
-
1846
- # Adjust the high_limit
1847
- self.current_limit = self.tokens
1848
-
1849
- # Log the adjustment
1850
- print(f"Adjusted tokens to: {self.tokens} and high_limit to: {self.current_limit}")
1851
- class DynamicRateLimiterManagerSingleton:
1852
- _instance = None
1853
- @staticmethod
1854
- def get_instance(service_name="default", low_limit=10, high_limit=30, limit_epoch=60,starting_tokens=10,epoch_cycle_adjustment=True):
1855
- if DynamicRateLimiterManagerSingleton._instance is None:
1856
- DynamicRateLimiterManagerSingleton._instance = DynamicRateLimiterManager(service_name=service_name, low_limit=low_limit, high_limit=limit_epoch, limit_epoch=60,starting_tokens=starting_tokens,epoch_cycle_adjustment=epoch_cycle_adjustment)
1857
- return DynamicRateLimiterManagerSingleton._instance
1858
-
1859
-
1860
-
1861
-