StreamingCommunity 1.9.8__py3-none-any.whl → 1.9.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (93) hide show
  1. StreamingCommunity/Api/Player/Helper/Vixcloud/js_parser.py +143 -0
  2. StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +145 -0
  3. StreamingCommunity/Api/Player/ddl.py +89 -0
  4. StreamingCommunity/Api/Player/maxstream.py +151 -0
  5. StreamingCommunity/Api/Player/supervideo.py +194 -0
  6. StreamingCommunity/Api/Player/vixcloud.py +273 -0
  7. StreamingCommunity/Api/Site/1337xx/__init__.py +51 -0
  8. StreamingCommunity/Api/Site/1337xx/costant.py +15 -0
  9. StreamingCommunity/Api/Site/1337xx/site.py +86 -0
  10. StreamingCommunity/Api/Site/1337xx/title.py +66 -0
  11. StreamingCommunity/Api/Site/altadefinizione/__init__.py +51 -0
  12. StreamingCommunity/Api/Site/altadefinizione/costant.py +15 -0
  13. StreamingCommunity/Api/Site/altadefinizione/film.py +74 -0
  14. StreamingCommunity/Api/Site/altadefinizione/site.py +89 -0
  15. StreamingCommunity/Api/Site/animeunity/__init__.py +51 -0
  16. StreamingCommunity/Api/Site/animeunity/costant.py +15 -0
  17. StreamingCommunity/Api/Site/animeunity/film_serie.py +135 -0
  18. StreamingCommunity/Api/Site/animeunity/site.py +167 -0
  19. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +97 -0
  20. StreamingCommunity/Api/Site/cb01new/__init__.py +52 -0
  21. StreamingCommunity/Api/Site/cb01new/costant.py +15 -0
  22. StreamingCommunity/Api/Site/cb01new/film.py +73 -0
  23. StreamingCommunity/Api/Site/cb01new/site.py +76 -0
  24. StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +58 -0
  25. StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +16 -0
  26. StreamingCommunity/Api/Site/ddlstreamitaly/series.py +146 -0
  27. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +95 -0
  28. StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +85 -0
  29. StreamingCommunity/Api/Site/guardaserie/__init__.py +53 -0
  30. StreamingCommunity/Api/Site/guardaserie/costant.py +15 -0
  31. StreamingCommunity/Api/Site/guardaserie/series.py +199 -0
  32. StreamingCommunity/Api/Site/guardaserie/site.py +86 -0
  33. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +110 -0
  34. StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +52 -0
  35. StreamingCommunity/Api/Site/ilcorsaronero/costant.py +15 -0
  36. StreamingCommunity/Api/Site/ilcorsaronero/site.py +63 -0
  37. StreamingCommunity/Api/Site/ilcorsaronero/title.py +46 -0
  38. StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +141 -0
  39. StreamingCommunity/Api/Site/mostraguarda/__init__.py +49 -0
  40. StreamingCommunity/Api/Site/mostraguarda/costant.py +15 -0
  41. StreamingCommunity/Api/Site/mostraguarda/film.py +99 -0
  42. StreamingCommunity/Api/Site/streamingcommunity/__init__.py +56 -0
  43. StreamingCommunity/Api/Site/streamingcommunity/costant.py +15 -0
  44. StreamingCommunity/Api/Site/streamingcommunity/film.py +75 -0
  45. StreamingCommunity/Api/Site/streamingcommunity/series.py +206 -0
  46. StreamingCommunity/Api/Site/streamingcommunity/site.py +137 -0
  47. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +123 -0
  48. StreamingCommunity/Api/Template/Class/SearchType.py +101 -0
  49. StreamingCommunity/Api/Template/Util/__init__.py +5 -0
  50. StreamingCommunity/Api/Template/Util/get_domain.py +173 -0
  51. StreamingCommunity/Api/Template/Util/manage_ep.py +179 -0
  52. StreamingCommunity/Api/Template/Util/recall_search.py +37 -0
  53. StreamingCommunity/Api/Template/__init__.py +3 -0
  54. StreamingCommunity/Api/Template/site.py +87 -0
  55. StreamingCommunity/Lib/Downloader/HLS/downloader.py +946 -0
  56. StreamingCommunity/Lib/Downloader/HLS/proxyes.py +110 -0
  57. StreamingCommunity/Lib/Downloader/HLS/segments.py +561 -0
  58. StreamingCommunity/Lib/Downloader/MP4/downloader.py +155 -0
  59. StreamingCommunity/Lib/Downloader/TOR/downloader.py +296 -0
  60. StreamingCommunity/Lib/Downloader/__init__.py +5 -0
  61. StreamingCommunity/Lib/FFmpeg/__init__.py +4 -0
  62. StreamingCommunity/Lib/FFmpeg/capture.py +170 -0
  63. StreamingCommunity/Lib/FFmpeg/command.py +296 -0
  64. StreamingCommunity/Lib/FFmpeg/util.py +249 -0
  65. StreamingCommunity/Lib/M3U8/__init__.py +6 -0
  66. StreamingCommunity/Lib/M3U8/decryptor.py +164 -0
  67. StreamingCommunity/Lib/M3U8/estimator.py +176 -0
  68. StreamingCommunity/Lib/M3U8/parser.py +666 -0
  69. StreamingCommunity/Lib/M3U8/url_fixer.py +52 -0
  70. StreamingCommunity/Lib/TMBD/__init__.py +2 -0
  71. StreamingCommunity/Lib/TMBD/obj_tmbd.py +39 -0
  72. StreamingCommunity/Lib/TMBD/tmdb.py +346 -0
  73. StreamingCommunity/Upload/update.py +68 -0
  74. StreamingCommunity/Upload/version.py +5 -0
  75. StreamingCommunity/Util/_jsonConfig.py +204 -0
  76. StreamingCommunity/Util/call_stack.py +42 -0
  77. StreamingCommunity/Util/color.py +20 -0
  78. StreamingCommunity/Util/console.py +12 -0
  79. StreamingCommunity/Util/ffmpeg_installer.py +311 -0
  80. StreamingCommunity/Util/headers.py +147 -0
  81. StreamingCommunity/Util/logger.py +53 -0
  82. StreamingCommunity/Util/message.py +64 -0
  83. StreamingCommunity/Util/os.py +554 -0
  84. StreamingCommunity/Util/table.py +229 -0
  85. StreamingCommunity/__init__.py +0 -0
  86. StreamingCommunity/run.py +2 -2
  87. {StreamingCommunity-1.9.8.dist-info → StreamingCommunity-1.9.90.dist-info}/METADATA +7 -10
  88. StreamingCommunity-1.9.90.dist-info/RECORD +92 -0
  89. {StreamingCommunity-1.9.8.dist-info → StreamingCommunity-1.9.90.dist-info}/WHEEL +1 -1
  90. {StreamingCommunity-1.9.8.dist-info → StreamingCommunity-1.9.90.dist-info}/entry_points.txt +0 -1
  91. StreamingCommunity-1.9.8.dist-info/RECORD +0 -7
  92. {StreamingCommunity-1.9.8.dist-info → StreamingCommunity-1.9.90.dist-info}/LICENSE +0 -0
  93. {StreamingCommunity-1.9.8.dist-info → StreamingCommunity-1.9.90.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,137 @@
1
+ # 10.12.23
2
+
3
+ import sys
4
+ import json
5
+ import logging
6
+ import secrets
7
+
8
+
9
+ # External libraries
10
+ import httpx
11
+ from bs4 import BeautifulSoup
12
+
13
+
14
+ # Internal utilities
15
+ from StreamingCommunity.Util.console import console
16
+ from StreamingCommunity.Util._jsonConfig import config_manager
17
+ from StreamingCommunity.Util.headers import get_headers
18
+ from StreamingCommunity.Util.table import TVShowManager
19
+
20
+
21
+
22
+ # Logic class
23
+ from StreamingCommunity.Api.Template import get_select_title
24
+ from StreamingCommunity.Api.Template.Util import search_domain
25
+ from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
26
+
27
+
28
+ # Config
29
+ from .costant import SITE_NAME
30
+
31
+
32
+ # Variable
33
+ media_search_manager = MediaManager()
34
+ table_show_manager = TVShowManager()
35
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
36
+
37
+
38
+ def get_version(text: str):
39
+ """
40
+ Extracts the version from the HTML text of a webpage.
41
+
42
+ Parameters:
43
+ - text (str): The HTML text of the webpage.
44
+
45
+ Returns:
46
+ str: The version extracted from the webpage.
47
+ list: Top 10 titles headlines for today.
48
+ """
49
+ try:
50
+
51
+ # Parse request to site
52
+ soup = BeautifulSoup(text, "html.parser")
53
+
54
+ # Extract version
55
+ version = json.loads(soup.find("div", {"id": "app"}).get("data-page"))['version']
56
+ console.print(f"[cyan]Get version [white]=> [red]{version} \n")
57
+
58
+ return version
59
+
60
+ except Exception as e:
61
+ logging.error(f"Error extracting version: {e}")
62
+ raise
63
+
64
+
65
+ def get_version_and_domain():
66
+ """
67
+ Retrieve the current version and domain of the site.
68
+
69
+ This function performs the following steps:
70
+ - Determines the correct domain to use for the site by searching for a specific meta tag.
71
+ - Fetches the content of the site to extract the version information.
72
+ """
73
+
74
+ # Find new domain if prev dont work
75
+ domain_to_use, base_url = search_domain(SITE_NAME, f"https://{SITE_NAME}")
76
+
77
+ # Extract version from the response
78
+ try:
79
+ version = get_version(httpx.get(
80
+ url=base_url,
81
+ headers={
82
+ 'user-agent': get_headers()
83
+ },
84
+ timeout=max_timeout
85
+ ).text)
86
+ except:
87
+ console.print("[green]Auto generate version ...")
88
+ version = secrets.token_hex(32 // 2)
89
+
90
+ return version, domain_to_use
91
+
92
+
93
+ def title_search(title_search: str, domain: str) -> int:
94
+ """
95
+ Search for titles based on a search query.
96
+
97
+ Parameters:
98
+ - title_search (str): The title to search for.
99
+ - domain (str): The domain to search on.
100
+
101
+ Returns:
102
+ int: The number of titles found.
103
+ """
104
+ media_search_manager.clear()
105
+ table_show_manager.clear()
106
+
107
+ try:
108
+ response = httpx.get(
109
+ url=f"https://{SITE_NAME}.{domain}/api/search?q={title_search.replace(' ', '+')}",
110
+ headers={'user-agent': get_headers()},
111
+ timeout=max_timeout
112
+ )
113
+ response.raise_for_status()
114
+
115
+ except Exception as e:
116
+ console.print(f"Site: {SITE_NAME}, request search error: {e}")
117
+
118
+ # Add found titles to media search manager
119
+ for dict_title in response.json()['data']:
120
+ media_search_manager.add_media({
121
+ 'id': dict_title.get('id'),
122
+ 'slug': dict_title.get('slug'),
123
+ 'name': dict_title.get('name'),
124
+ 'type': dict_title.get('type'),
125
+ 'date': dict_title.get('last_air_date'),
126
+ 'score': dict_title.get('score')
127
+ })
128
+
129
+ # Return the number of titles found
130
+ return media_search_manager.get_length()
131
+
132
+
133
+ def run_get_select_title():
134
+ """
135
+ Display a selection of titles and prompt the user to choose one.
136
+ """
137
+ return get_select_title(table_show_manager, media_search_manager)
@@ -0,0 +1,123 @@
1
+ # 01.03.24
2
+
3
+ import logging
4
+
5
+
6
+ # External libraries
7
+ import httpx
8
+
9
+
10
+ # Internal utilities
11
+ from StreamingCommunity.Util.headers import get_headers
12
+ from StreamingCommunity.Util._jsonConfig import config_manager
13
+ from StreamingCommunity.Api.Player.Helper.Vixcloud.util import Season, EpisodeManager
14
+
15
+
16
+ # Variable
17
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
18
+
19
+
20
+ class ScrapeSerie:
21
+ def __init__(self, site_name: str):
22
+ """
23
+ Initialize the ScrapeSerie class for scraping TV series information.
24
+
25
+ Args:
26
+ site_name (str): Name of the streaming site to scrape from
27
+ """
28
+ self.is_series = False
29
+ self.headers = {'user-agent': get_headers()}
30
+ self.base_name = site_name
31
+ self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
32
+
33
+ def setup(self, version: str = None, media_id: int = None, series_name: str = None):
34
+ """
35
+ Set up the scraper with specific media details.
36
+
37
+ Args:
38
+ version (str, optional): Site version for request headers
39
+ media_id (int, optional): Unique identifier for the media
40
+ series_name (str, optional): Name of the TV series
41
+ """
42
+ self.version = version
43
+ self.media_id = media_id
44
+
45
+ # If series name is provided, initialize series-specific managers
46
+ if series_name is not None:
47
+ self.is_series = True
48
+ self.series_name = series_name
49
+ self.season_manager = None
50
+ self.episode_manager: EpisodeManager = EpisodeManager()
51
+
52
+ def collect_info_title(self) -> None:
53
+ """
54
+ Retrieve season information for a TV series from the streaming site.
55
+
56
+ Raises:
57
+ Exception: If there's an error fetching season information
58
+ """
59
+ self.headers = {
60
+ 'user-agent': get_headers(),
61
+ 'x-inertia': 'true',
62
+ 'x-inertia-version': self.version,
63
+ }
64
+
65
+ try:
66
+
67
+ response = httpx.get(
68
+ url=f"https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}",
69
+ headers=self.headers,
70
+ timeout=max_timeout
71
+ )
72
+ response.raise_for_status()
73
+
74
+ # Extract seasons from JSON response
75
+ json_response = response.json().get('props')
76
+
77
+ # Collect info about season
78
+ self.season_manager = Season(json_response.get('title'))
79
+ self.season_manager.collect_images(self.base_name, self.domain)
80
+
81
+ # Collect first episode info
82
+ for i, ep in enumerate(json_response.get('loadedSeason').get('episodes')):
83
+ self.season_manager.episodes.add(ep)
84
+ self.season_manager.episodes.get(i).collect_image(self.base_name, self.domain)
85
+
86
+ except Exception as e:
87
+ logging.error(f"Error collecting season info: {e}")
88
+ raise
89
+
90
+ def collect_info_season(self, number_season: int) -> None:
91
+ """
92
+ Retrieve episode information for a specific season.
93
+
94
+ Args:
95
+ number_season (int): Season number to fetch episodes for
96
+
97
+ Raises:
98
+ Exception: If there's an error fetching episode information
99
+ """
100
+ self.headers = {
101
+ 'user-agent': get_headers(),
102
+ 'x-inertia': 'true',
103
+ 'x-inertia-version': self.version,
104
+ }
105
+
106
+ try:
107
+ response = httpx.get(
108
+ url=f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}',
109
+ headers=self.headers,
110
+ timeout=max_timeout
111
+ )
112
+ response.raise_for_status()
113
+
114
+ # Extract episodes from JSON response
115
+ json_response = response.json().get('props').get('loadedSeason').get('episodes')
116
+
117
+ # Add each episode to the episode manager
118
+ for dict_episode in json_response:
119
+ self.episode_manager.add(dict_episode)
120
+
121
+ except Exception as e:
122
+ logging.error(f"Error collecting title season info: {e}")
123
+ raise
@@ -0,0 +1,101 @@
1
+ # 07.07.24
2
+
3
+ from typing import List, TypedDict
4
+
5
+
6
+ class MediaItemData(TypedDict, total=False):
7
+ id: int # GENERAL
8
+ name: str # GENERAL
9
+ type: str # GENERAL
10
+ url: str # GENERAL
11
+ size: str # GENERAL
12
+ score: str # GENERAL
13
+ date: str # GENERAL
14
+ desc: str # GENERAL
15
+
16
+ seeder: int # TOR
17
+ leecher: int # TOR
18
+
19
+ slug: str # SC
20
+
21
+
22
+
23
+ class MediaItemMeta(type):
24
+ def __new__(cls, name, bases, dct):
25
+ def init(self, **kwargs):
26
+ for key, value in kwargs.items():
27
+ setattr(self, key, value)
28
+
29
+ dct['__init__'] = init
30
+
31
+ def get_attr(self, item):
32
+ return self.__dict__.get(item, None)
33
+
34
+ dct['__getattr__'] = get_attr
35
+
36
+ def set_attr(self, key, value):
37
+ self.__dict__[key] = value
38
+
39
+ dct['__setattr__'] = set_attr
40
+
41
+ return super().__new__(cls, name, bases, dct)
42
+
43
+
44
+ class MediaItem(metaclass=MediaItemMeta):
45
+ id: int # GENERAL
46
+ name: str # GENERAL
47
+ type: str # GENERAL
48
+ url: str # GENERAL
49
+ size: str # GENERAL
50
+ score: str # GENERAL
51
+ date: str # GENERAL
52
+ desc: str # GENERAL
53
+
54
+ seeder: int # TOR
55
+ leecher: int # TOR
56
+
57
+ slug: str # SC
58
+
59
+
60
+ class MediaManager:
61
+ def __init__(self):
62
+ self.media_list: List[MediaItem] = []
63
+
64
+ def add_media(self, data: dict) -> None:
65
+ """
66
+ Add media to the list.
67
+
68
+ Args:
69
+ data (dict): Media data to add.
70
+ """
71
+ self.media_list.append(MediaItem(**data))
72
+
73
+ def get(self, index: int) -> MediaItem:
74
+ """
75
+ Get a media item from the list by index.
76
+
77
+ Args:
78
+ index (int): The index of the media item to retrieve.
79
+
80
+ Returns:
81
+ MediaItem: The media item at the specified index.
82
+ """
83
+ return self.media_list[index]
84
+
85
+ def get_length(self) -> int:
86
+ """
87
+ Get the number of media items in the list.
88
+
89
+ Returns:
90
+ int: Number of media items.
91
+ """
92
+ return len(self.media_list)
93
+
94
+ def clear(self) -> None:
95
+ """
96
+ This method clears the media list.
97
+ """
98
+ self.media_list.clear()
99
+
100
+ def __str__(self):
101
+ return f"MediaManager(num_media={len(self.media_list)})"
@@ -0,0 +1,5 @@
1
+ # 23.11.24
2
+
3
+ from .recall_search import execute_search
4
+ from .get_domain import search_domain
5
+ from .manage_ep import manage_selection, map_episode_title, validate_episode_selection, validate_selection
@@ -0,0 +1,173 @@
1
+ # 18.06.24
2
+
3
+ import sys
4
+ from urllib.parse import urlparse
5
+
6
+
7
+ # External libraries
8
+ import httpx
9
+ from googlesearch import search
10
+
11
+
12
+ # Internal utilities
13
+ from StreamingCommunity.Util.headers import get_headers
14
+ from StreamingCommunity.Util.console import console, msg
15
+ from StreamingCommunity.Util._jsonConfig import config_manager
16
+
17
+
18
+ def google_search(query):
19
+ """
20
+ Perform a Google search and return the first result.
21
+
22
+ Args:
23
+ query (str): The search query to execute on Google.
24
+
25
+ Returns:
26
+ str: The first URL result from the search, or None if no result is found.
27
+ """
28
+ # Perform the search on Google and limit to 1 result
29
+ search_results = search(query, num_results=1)
30
+
31
+ # Extract the first result
32
+ first_result = next(search_results, None)
33
+
34
+ if not first_result:
35
+ console.print("[red]No results found.[/red]")
36
+
37
+ return first_result
38
+
39
+ def get_final_redirect_url(initial_url, max_timeout):
40
+ """
41
+ Follow redirects from the initial URL and return the final URL after all redirects.
42
+
43
+ Args:
44
+ initial_url (str): The URL to start with and follow redirects.
45
+
46
+ Returns:
47
+ str: The final URL after all redirects are followed.
48
+ """
49
+
50
+ # Create a client with redirects enabled
51
+ try:
52
+ with httpx.Client(
53
+ headers={
54
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
55
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
56
+ 'User-Agent': get_headers()
57
+ },
58
+ follow_redirects=True,
59
+ timeout=max_timeout
60
+
61
+ ) as client:
62
+ response = client.get(initial_url)
63
+
64
+ if response.status_code == 403:
65
+ console.print("[bold red]The owner of this website has banned your IP[/bold red]")
66
+ raise
67
+
68
+ response.raise_for_status()
69
+
70
+ # Capture the final URL after all redirects
71
+ final_url = response.url
72
+
73
+ return final_url
74
+
75
+ except Exception as e:
76
+ console.print(f"\n[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
77
+ return None
78
+
79
+ def search_domain(site_name: str, base_url: str):
80
+ """
81
+ Search for a valid domain for the given site name and base URL.
82
+
83
+ Parameters:
84
+ - site_name (str): The name of the site to search the domain for.
85
+ - base_url (str): The base URL to construct complete URLs.
86
+
87
+ Returns:
88
+ tuple: The found domain and the complete URL.
89
+ """
90
+ # Extract config domain
91
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
92
+ domain = str(config_manager.get_dict("SITE", site_name)['domain'])
93
+
94
+ try:
95
+ # Test the current domain
96
+ with httpx.Client(
97
+ headers={
98
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
99
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
100
+ 'User-Agent': get_headers()
101
+ },
102
+ follow_redirects=True,
103
+ timeout=max_timeout
104
+ ) as client:
105
+ response_follow = client.get(f"{base_url}.{domain}")
106
+ response_follow.raise_for_status()
107
+
108
+ except Exception as e:
109
+ query = base_url.split("/")[-1]
110
+
111
+ # Perform a Google search with multiple results
112
+ search_results = list(search(query, num_results=10, lang="it"))
113
+ console.print(f"\nGoogle search results: {search_results}")
114
+
115
+ def normalize_for_comparison(url):
116
+ """Normalize URL by removing protocol, www, and trailing slashes"""
117
+ url = url.lower()
118
+ url = url.replace("https://", "").replace("http://", "")
119
+ url = url.replace("www.", "")
120
+ return url.rstrip("/")
121
+
122
+ # Normalize the base_url we're looking for
123
+ target_url = normalize_for_comparison(base_url)
124
+
125
+ # Iterate through search results
126
+ for first_url in search_results:
127
+ console.print(f"[green]Checking url[white]: [red]{first_url}")
128
+
129
+ # Get just the domain part of the search result
130
+ parsed_result = urlparse(first_url)
131
+ result_domain = normalize_for_comparison(parsed_result.netloc)
132
+
133
+ # Compare with our target URL (without the protocol part)
134
+ if result_domain.startswith(target_url.split("/")[-1]):
135
+ try:
136
+ final_url = get_final_redirect_url(first_url, max_timeout)
137
+
138
+ if final_url is not None:
139
+ def extract_domain(url):
140
+ parsed_url = urlparse(url)
141
+ domain = parsed_url.netloc
142
+ return domain.split(".")[-1]
143
+
144
+ new_domain_extract = extract_domain(str(final_url))
145
+
146
+ if msg.ask(f"\n[cyan]Do you want to auto update site[white] [red]'{site_name}'[cyan] with domain[white] [red]'{new_domain_extract}'.", choices=["y", "n"], default="y").lower() == "y":
147
+
148
+ # Update domain in config.json
149
+ config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
150
+ config_manager.write_config()
151
+
152
+ return new_domain_extract, f"{base_url}.{new_domain_extract}"
153
+
154
+ except Exception as redirect_error:
155
+ console.print(f"[red]Error following redirect for {first_url}: {redirect_error}")
156
+ continue
157
+
158
+ # If no matching URL is found
159
+ console.print("[bold red]No valid URL found matching the base URL.[/bold red]")
160
+ raise Exception("No matching domain found")
161
+
162
+ # Handle successful initial domain check
163
+ parsed_url = urlparse(str(response_follow.url))
164
+ parse_domain = parsed_url.netloc
165
+ tld = parse_domain.split('.')[-1]
166
+
167
+ if tld is not None:
168
+ # Update domain in config.json
169
+ config_manager.config['SITE'][site_name]['domain'] = tld
170
+ config_manager.write_config()
171
+
172
+ # Return config domain
173
+ return tld, f"{base_url}.{tld}"