StreamingCommunity 3.0.9__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (28) hide show
  1. StreamingCommunity/Api/Player/ddl.py +4 -2
  2. StreamingCommunity/Api/Player/hdplayer.py +2 -1
  3. StreamingCommunity/Api/Player/maxstream.py +7 -7
  4. StreamingCommunity/Api/Player/mediapolisvod.py +4 -4
  5. StreamingCommunity/Api/Player/mixdrop.py +6 -5
  6. StreamingCommunity/Api/Player/supervideo.py +7 -5
  7. StreamingCommunity/Api/Player/sweetpixel.py +4 -3
  8. StreamingCommunity/Api/Player/vixcloud.py +20 -14
  9. StreamingCommunity/Api/Site/altadefinizione/film.py +9 -1
  10. StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py +35 -21
  11. StreamingCommunity/Api/Site/animeunity/site.py +43 -71
  12. StreamingCommunity/Api/Site/streamingcommunity/film.py +4 -0
  13. StreamingCommunity/Lib/Downloader/HLS/downloader.py +17 -8
  14. StreamingCommunity/Lib/Downloader/HLS/segments.py +5 -4
  15. StreamingCommunity/Lib/Downloader/MP4/downloader.py +3 -3
  16. StreamingCommunity/Upload/update.py +8 -3
  17. StreamingCommunity/Upload/version.py +2 -2
  18. StreamingCommunity/Util/config_json.py +5 -7
  19. StreamingCommunity/Util/message.py +1 -3
  20. StreamingCommunity/Util/os.py +5 -2
  21. StreamingCommunity/global_search.py +2 -2
  22. StreamingCommunity/run.py +71 -36
  23. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/METADATA +43 -58
  24. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/RECORD +28 -28
  25. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/WHEEL +0 -0
  26. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/entry_points.txt +0 -0
  27. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/licenses/LICENSE +0 -0
  28. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,9 @@
1
1
  # 10.12.23
2
2
 
3
- import logging
4
-
5
-
6
3
  # External libraries
4
+ import urllib.parse
7
5
  import httpx
8
- from bs4 import BeautifulSoup
6
+ from curl_cffi import requests
9
7
  from rich.console import Console
10
8
 
11
9
 
@@ -20,92 +18,67 @@ from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
20
18
  from StreamingCommunity.Api.Template.config_loader import site_constant
21
19
  from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
22
20
 
23
-
24
- # Variable
25
21
  console = Console()
26
22
  media_search_manager = MediaManager()
27
23
  table_show_manager = TVShowManager()
28
24
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
29
25
 
30
26
 
31
- def get_token() -> dict:
27
+ def get_token(user_agent: str) -> dict:
32
28
  """
33
- Function to retrieve session tokens from a specified website.
34
-
35
- Parameters:
36
- - site_name (str): The name of the site.
37
- - domain (str): The domain of the site.
38
-
39
- Returns:
40
- - dict: A dictionary containing session tokens. The keys are 'XSRF_TOKEN', 'animeunity_session', and 'csrf_token'.
29
+ Retrieve session cookies from the site.
41
30
  """
42
- response = httpx.get(
43
- url=site_constant.FULL_URL,
44
- timeout=max_timeout
31
+ response = requests.get(
32
+ site_constant.FULL_URL,
33
+ headers={'user-agent': user_agent},
34
+ impersonate="chrome120"
45
35
  )
46
36
  response.raise_for_status()
37
+ all_cookies = {name: value for name, value in response.cookies.items()}
47
38
 
48
- # Initialize variables to store CSRF token
49
- find_csrf_token = None
50
- soup = BeautifulSoup(response.text, "html.parser")
51
-
52
- for html_meta in soup.find_all("meta"):
53
- if html_meta.get('name') == "csrf-token":
54
- find_csrf_token = html_meta.get('content')
55
-
56
- return {
57
- 'animeunity_session': response.cookies['animeunity_session'],
58
- 'csrf_token': find_csrf_token
59
- }
39
+ return {k: urllib.parse.unquote(v) for k, v in all_cookies.items()}
60
40
 
61
41
 
62
- def get_real_title(record):
42
+ def get_real_title(record: dict) -> str:
63
43
  """
64
- Get the real title from a record.
65
-
66
- Parameters:
67
- - record (dict): A dictionary representing a row of JSON data.
68
-
69
- Returns:
70
- - str: The title found in the record. If no title is found, returns None.
44
+ Return the most appropriate title from the record.
71
45
  """
72
- if record['title_eng'] is not None:
46
+ if record.get('title_eng'):
73
47
  return record['title_eng']
74
- elif record['title'] is not None:
48
+ elif record.get('title'):
75
49
  return record['title']
76
50
  else:
77
- return record['title_it']
51
+ return record.get('title_it', '')
78
52
 
79
53
 
80
54
  def title_search(query: str) -> int:
81
55
  """
82
- Function to perform an anime search using both APIs and combine results.
83
-
84
- Parameters:
85
- - query (str): The query to search for.
86
-
87
- Returns:
88
- - int: A number containing the length of media search manager.
56
+ Perform anime search on animeunity.so.
89
57
  """
90
- if site_constant.TELEGRAM_BOT:
58
+ if site_constant.TELEGRAM_BOT:
91
59
  bot = get_bot_instance()
92
-
60
+
93
61
  media_search_manager.clear()
94
62
  table_show_manager.clear()
95
63
  seen_titles = set()
96
64
  choices = [] if site_constant.TELEGRAM_BOT else None
97
65
 
98
- # Create parameter for request
99
- data = get_token()
66
+ user_agent = get_userAgent()
67
+ data = get_token(user_agent)
68
+
100
69
  cookies = {
101
- 'animeunity_session': data.get('animeunity_session')
70
+ 'XSRF-TOKEN': data.get('XSRF-TOKEN', ''),
71
+ 'animeunity_session': data.get('animeunity_session', ''),
102
72
  }
73
+
103
74
  headers = {
104
- 'user-agent': get_userAgent(),
105
- 'x-csrf-token': data.get('csrf_token')
75
+ 'origin': site_constant.FULL_URL,
76
+ 'referer': f"{site_constant.FULL_URL}/",
77
+ 'user-agent': user_agent,
78
+ 'x-xsrf-token': data.get('XSRF-TOKEN', ''),
106
79
  }
107
80
 
108
- # First API call - livesearch
81
+ # First call: /livesearch
109
82
  try:
110
83
  response1 = httpx.post(
111
84
  f'{site_constant.FULL_URL}/livesearch',
@@ -114,15 +87,14 @@ def title_search(query: str) -> int:
114
87
  json={'title': query},
115
88
  timeout=max_timeout
116
89
  )
117
-
118
90
  response1.raise_for_status()
119
- process_results(response1.json()['records'], seen_titles, media_search_manager, choices)
91
+ process_results(response1.json().get('records', []), seen_titles, media_search_manager, choices)
120
92
 
121
93
  except Exception as e:
122
94
  console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
123
95
  return 0
124
96
 
125
- # Second API call - archivio
97
+ # Second call: /archivio/get-animes
126
98
  try:
127
99
  json_data = {
128
100
  'title': query,
@@ -135,7 +107,6 @@ def title_search(query: str) -> int:
135
107
  'dubbed': False,
136
108
  'season': False
137
109
  }
138
-
139
110
  response2 = httpx.post(
140
111
  f'{site_constant.FULL_URL}/archivio/get-animes',
141
112
  cookies=cookies,
@@ -143,30 +114,32 @@ def title_search(query: str) -> int:
143
114
  json=json_data,
144
115
  timeout=max_timeout
145
116
  )
146
-
147
117
  response2.raise_for_status()
148
- process_results(response2.json()['records'], seen_titles, media_search_manager, choices)
118
+ process_results(response2.json().get('records', []), seen_titles, media_search_manager, choices)
149
119
 
150
120
  except Exception as e:
151
121
  console.print(f"Site: {site_constant.SITE_NAME}, archivio search error: {e}")
152
122
 
153
123
  if site_constant.TELEGRAM_BOT and choices and len(choices) > 0:
154
- bot.send_message(f"Lista dei risultati:", choices)
155
-
124
+ bot.send_message("List of results:", choices)
125
+
156
126
  result_count = media_search_manager.get_length()
157
127
  if result_count == 0:
158
128
  console.print(f"Nothing matching was found for: {query}")
159
-
129
+
160
130
  return result_count
161
131
 
132
+
162
133
  def process_results(records: list, seen_titles: set, media_manager: MediaManager, choices: list = None) -> None:
163
- """Helper function to process search results and add unique entries."""
134
+ """
135
+ Add unique results to the media manager and to choices.
136
+ """
164
137
  for dict_title in records:
165
138
  try:
166
139
  title_id = dict_title.get('id')
167
140
  if title_id in seen_titles:
168
141
  continue
169
-
142
+
170
143
  seen_titles.add(title_id)
171
144
  dict_title['name'] = get_real_title(dict_title)
172
145
 
@@ -179,10 +152,9 @@ def process_results(records: list, seen_titles: set, media_manager: MediaManager
179
152
  'episodes_count': dict_title.get('episodes_count'),
180
153
  'image': dict_title.get('imageurl')
181
154
  })
182
-
155
+
183
156
  if choices is not None:
184
- choice_text = f"{len(choices)} - {dict_title.get('name')} ({dict_title.get('type')}) - Episodi: {dict_title.get('episodes_count')}"
157
+ choice_text = f"{len(choices)} - {dict_title.get('name')} ({dict_title.get('type')}) - Episodes: {dict_title.get('episodes_count')}"
185
158
  choices.append(choice_text)
186
-
187
159
  except Exception as e:
188
- print(f"Error parsing a title entry: {e}")
160
+ print(f"Error parsing a title entry: {e}")
@@ -62,6 +62,10 @@ def download_film(select_title: MediaItem, proxy: str = None) -> str:
62
62
  video_source.get_content()
63
63
  master_playlist = video_source.get_playlist()
64
64
 
65
+ if master_playlist is None:
66
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, error: No master playlist found[/red]")
67
+ return None
68
+
65
69
  # Define the filename and path for the downloaded film
66
70
  title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
67
71
  mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
@@ -156,7 +156,7 @@ class M3U8Manager:
156
156
  If it's a master playlist, only selects video stream.
157
157
  """
158
158
  if not self.is_master:
159
- self.video_url, self.video_res = self.m3u8_url, "0p"
159
+ self.video_url, self.video_res = self.m3u8_url, "undefined"
160
160
  self.audio_streams = []
161
161
  self.sub_streams = []
162
162
 
@@ -165,8 +165,9 @@ class M3U8Manager:
165
165
  self.video_url, self.video_res = self.parser._video.get_best_uri()
166
166
  elif str(FILTER_CUSTOM_REOLUTION) == "worst":
167
167
  self.video_url, self.video_res = self.parser._video.get_worst_uri()
168
- elif "p" in str(FILTER_CUSTOM_REOLUTION):
169
- self.video_url, self.video_res = self.parser._video.get_custom_uri(int(FILTER_CUSTOM_REOLUTION.replace("p", "")))
168
+ elif str(FILTER_CUSTOM_REOLUTION).replace("p", "").replace("px", "").isdigit():
169
+ resolution_value = int(str(FILTER_CUSTOM_REOLUTION).replace("p", "").replace("px", ""))
170
+ self.video_url, self.video_res = self.parser._video.get_custom_uri(resolution_value)
170
171
  else:
171
172
  logging.error("Resolution not recognized.")
172
173
  self.video_url, self.video_res = self.parser._video.get_best_uri()
@@ -180,10 +181,14 @@ class M3U8Manager:
180
181
 
181
182
  self.sub_streams = []
182
183
  if ENABLE_SUBTITLE:
183
- self.sub_streams = [
184
- s for s in (self.parser._subtitle.get_all_uris_and_names() or [])
185
- if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
186
- ]
184
+ if "*" in DOWNLOAD_SPECIFIC_SUBTITLE:
185
+ self.sub_streams = self.parser._subtitle.get_all_uris_and_names() or []
186
+
187
+ else:
188
+ self.sub_streams = [
189
+ s for s in (self.parser._subtitle.get_all_uris_and_names() or [])
190
+ if s.get('language') in DOWNLOAD_SPECIFIC_SUBTITLE
191
+ ]
187
192
 
188
193
  def log_selection(self):
189
194
  tuple_available_resolution = self.parser._video.get_list_resolution()
@@ -209,9 +214,13 @@ class M3U8Manager:
209
214
  f"[red]Set:[/red] {set_codec_info}"
210
215
  )
211
216
 
217
+ # Get available subtitles and their languages
212
218
  available_subtitles = self.parser._subtitle.get_all_uris_and_names() or []
213
219
  available_sub_languages = [sub.get('language') for sub in available_subtitles]
214
- downloadable_sub_languages = list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE))
220
+
221
+ # If "*" is in DOWNLOAD_SPECIFIC_SUBTITLE, all languages are downloadable
222
+ downloadable_sub_languages = available_sub_languages if "*" in DOWNLOAD_SPECIFIC_SUBTITLE else list(set(available_sub_languages) & set(DOWNLOAD_SPECIFIC_SUBTITLE))
223
+
215
224
  if available_sub_languages:
216
225
  console.print(
217
226
  f"[cyan bold]Subtitle [/cyan bold] [green]Available:[/green] [purple]{', '.join(available_sub_languages)}[/purple] | "
@@ -110,7 +110,7 @@ class M3U8_Segments:
110
110
  self.key_base_url = f"{parsed_url.scheme}://{parsed_url.netloc}/"
111
111
 
112
112
  try:
113
- client_params = {'headers': {'User-Agent': get_userAgent()}, 'timeout': MAX_TIMEOOUT}
113
+ client_params = {'headers': {'User-Agent': get_userAgent()}, 'timeout': MAX_TIMEOOUT, 'verify': REQUEST_VERIFY}
114
114
  response = httpx.get(url=key_uri, **client_params)
115
115
  response.raise_for_status()
116
116
 
@@ -158,7 +158,7 @@ class M3U8_Segments:
158
158
  """
159
159
  if self.is_index_url:
160
160
  try:
161
- client_params = {'headers': {'User-Agent': get_userAgent()}, 'timeout': MAX_TIMEOOUT}
161
+ client_params = {'headers': {'User-Agent': get_userAgent()}, 'timeout': MAX_TIMEOOUT, 'verify': REQUEST_VERIFY}
162
162
  response = httpx.get(self.url, **client_params, follow_redirects=True)
163
163
  response.raise_for_status()
164
164
 
@@ -202,7 +202,8 @@ class M3U8_Segments:
202
202
  'headers': {'User-Agent': get_userAgent()},
203
203
  'timeout': SEGMENT_MAX_TIMEOUT,
204
204
  'follow_redirects': True,
205
- 'http2': False
205
+ 'http2': False,
206
+ 'verify': REQUEST_VERIFY
206
207
  }
207
208
  return httpx.Client(**client_params)
208
209
 
@@ -463,4 +464,4 @@ class M3U8_Segments:
463
464
  f"[white]Failed segments: [red]{self.info_nFailed}")
464
465
 
465
466
  if self.info_nRetry > len(self.segments) * 0.3:
466
- console.print("[yellow]Warning: High retry count detected. Consider reducing worker count in config.")
467
+ console.print("[yellow]Warning: High retry count detected. Consider reducing worker count in config.")
@@ -88,7 +88,7 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
88
88
  return None, False
89
89
 
90
90
  if GET_ONLY_LINK:
91
- console.print(f"URL: {url}[/bold red]")
91
+ console.print(f"[bold red]URL: {url}[/bold red]")
92
92
  return path, True
93
93
 
94
94
  if not (url.lower().startswith('http://') or url.lower().startswith('https://')):
@@ -115,7 +115,7 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
115
115
  os.makedirs(os.path.dirname(path), exist_ok=True)
116
116
 
117
117
  try:
118
- with httpx.Client() as client:
118
+ with httpx.Client(verify=REQUEST_VERIFY) as client:
119
119
  with client.stream("GET", url, headers=headers) as response:
120
120
  response.raise_for_status()
121
121
  total = int(response.headers.get('content-length', 0))
@@ -188,4 +188,4 @@ def MP4_downloader(url: str, path: str, referer: str = None, headers_: dict = No
188
188
  return None, interrupt_handler.kill_download
189
189
 
190
190
  finally:
191
- signal.signal(signal.SIGINT, original_handler)
191
+ signal.signal(signal.SIGINT, original_handler)
@@ -4,6 +4,7 @@ import os
4
4
  import sys
5
5
  import time
6
6
  import asyncio
7
+ import importlib.metadata
7
8
 
8
9
  # External library
9
10
  import httpx
@@ -11,7 +12,7 @@ from rich.console import Console
11
12
 
12
13
 
13
14
  # Internal utilities
14
- from .version import __version__, __author__, __title__
15
+ from .version import __version__ as source_code_version, __author__, __title__
15
16
  from StreamingCommunity.Util.config_json import config_manager
16
17
  from StreamingCommunity.Util.headers import get_userAgent
17
18
 
@@ -75,7 +76,11 @@ def update():
75
76
  percentual_stars = 0
76
77
 
77
78
  # Get the current version (installed version)
78
- current_version = __version__
79
+ try:
80
+ current_version = importlib.metadata.version(__title__)
81
+ except importlib.metadata.PackageNotFoundError:
82
+ #console.print(f"[yellow]Warning: Could not determine installed version for '{__title__}' via importlib.metadata. Falling back to source version.[/yellow]")
83
+ current_version = source_code_version
79
84
 
80
85
  # Get commit details
81
86
  latest_commit = response_commits[0] if response_commits else None
@@ -93,4 +98,4 @@ def update():
93
98
  console.print(f"\n[red]{__title__} has been downloaded [yellow]{total_download_count} [red]times, but only [yellow]{percentual_stars}% [red]of users have starred it.\n\
94
99
  [cyan]Help the repository grow today by leaving a [yellow]star [cyan]and [yellow]sharing [cyan]it with others online!")
95
100
 
96
- time.sleep(4)
101
+ time.sleep(4)
@@ -1,5 +1,5 @@
1
1
  __title__ = 'StreamingCommunity'
2
- __version__ = '3.0.9'
2
+ __version__ = '3.2.0'
3
3
  __author__ = 'Arrowar'
4
4
  __description__ = 'A command-line program to download film'
5
- __copyright__ = 'Copyright 2024'
5
+ __copyright__ = 'Copyright 2025'
@@ -36,10 +36,8 @@ class ConfigManager:
36
36
  base_path = os.path.dirname(sys.executable)
37
37
 
38
38
  else:
39
-
40
- # Get the actual path of the module file
41
- current_file_path = os.path.abspath(__file__)
42
- base_path = os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))
39
+ # Use the current working directory where the script is executed
40
+ base_path = os.getcwd()
43
41
 
44
42
  # Initialize file paths
45
43
  self.file_path = os.path.join(base_path, file_name)
@@ -275,7 +273,7 @@ class ConfigManager:
275
273
  }
276
274
 
277
275
  try:
278
- console.print(f"[bold cyan]Retrieving site data from GitHub:[/bold cyan] [green]{domains_github_url}[/green]")
276
+ console.print("[bold cyan]Retrieving site data from GitHub:[/bold cyan]")
279
277
  response = requests.get(domains_github_url, timeout=8, headers=headers)
280
278
 
281
279
  if response.ok:
@@ -344,7 +342,7 @@ class ConfigManager:
344
342
  try:
345
343
  logging.info(f"Downloading {filename} from {url}...")
346
344
  console.print(f"[bold cyan]File download:[/bold cyan] {os.path.basename(filename)}")
347
- response = requests.get(url, timeout=8, headers={'User-Agent': get_userAgent()})
345
+ response = requests.get(url, timeout=8, headers={'User-Agent': get_userAgent()}, verify=self.get_bool('REQUESTS', 'verify'))
348
346
 
349
347
  if response.status_code == 200:
350
348
  with open(filename, 'wb') as f:
@@ -570,4 +568,4 @@ def get_use_large_bar():
570
568
 
571
569
 
572
570
  # Initialize the ConfigManager when the module is imported
573
- config_manager = ConfigManager()
571
+ config_manager = ConfigManager()
@@ -35,7 +35,5 @@ def start_message():
35
35
 
36
36
  if SHOW:
37
37
  console.print(f"[purple]{msg}")
38
-
39
- # Print a decorative separator line using asterisks
40
- separator = "_" * (console.width - 2) # Ridotto di 2 per il padding
38
+ separator = "_" * (console.width - 2)
41
39
  console.print(f"[cyan]{separator}[/cyan]\n")
@@ -320,16 +320,19 @@ class InternManager():
320
320
  # except Exception:
321
321
  # return False
322
322
 
323
- def check_dns_resolve(self):
323
+ def check_dns_resolve(self, domains_list: list = None):
324
324
  """
325
325
  Check if the system's current DNS server can resolve a domain name.
326
326
  Works on both Windows and Unix-like systems.
327
327
 
328
+ Args:
329
+ domains_list (list, optional): List of domains to test. Defaults to common domains.
330
+
328
331
  Returns:
329
332
  bool: True if the current DNS server can resolve a domain name,
330
333
  False if can't resolve or in case of errors
331
334
  """
332
- test_domains = ["github.com", "google.com", "microsoft.com", "amazon.com"]
335
+ test_domains = domains_list or ["github.com", "google.com", "microsoft.com", "amazon.com"]
333
336
 
334
337
  try:
335
338
  for domain in test_domains:
@@ -157,7 +157,7 @@ def global_search(search_terms: str = None, selected_sites: list = None):
157
157
 
158
158
  # Display progress information
159
159
  console.print(f"\n[bold green]Searching for:[/bold green] [yellow]{search_terms}[/yellow]")
160
- console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites")
160
+ console.print(f"[bold green]Searching across:[/bold green] {len(selected_sites)} sites \n")
161
161
 
162
162
  with Progress() as progress:
163
163
  search_task = progress.add_task("[cyan]Searching...", total=len(selected_sites))
@@ -188,7 +188,7 @@ def global_search(search_terms: str = None, selected_sites: list = None):
188
188
  item_dict['source_alias'] = alias
189
189
  all_results[alias].append(item_dict)
190
190
 
191
- console.print(f"[green]Found {len(database.media_list)} results from {site_name}")
191
+ console.print(f"\n[green]Found {len(database.media_list)} results from {site_name}")
192
192
 
193
193
  except Exception as e:
194
194
  console.print(f"[bold red]Error searching {site_name}:[/bold red] {str(e)}")
StreamingCommunity/run.py CHANGED
@@ -9,6 +9,7 @@ import platform
9
9
  import argparse
10
10
  import importlib
11
11
  import threading, asyncio
12
+ from urllib.parse import urlparse
12
13
  from typing import Callable
13
14
 
14
15
 
@@ -153,6 +154,7 @@ def initialize():
153
154
  except:
154
155
  console.log("[red]Error with loading github.")
155
156
 
157
+
156
158
  def restart_script():
157
159
  """Riavvia lo script con gli stessi argomenti della riga di comando."""
158
160
  print("\nRiavvio dello script...\n")
@@ -191,6 +193,11 @@ def force_exit():
191
193
  os._exit(0)
192
194
 
193
195
 
196
+ def _extract_hostname(url_string: str) -> str:
197
+ """Safely extracts the hostname from a URL string."""
198
+ return urlparse(url_string).hostname
199
+
200
+
194
201
  def main(script_id = 0):
195
202
 
196
203
  color_map = {
@@ -200,6 +207,13 @@ def main(script_id = 0):
200
207
  "torrent": "white"
201
208
  }
202
209
 
210
+ category_map = {
211
+ 1: "anime",
212
+ 2: "film_&_serie",
213
+ 3: "serie",
214
+ 4: "torrent"
215
+ }
216
+
203
217
  if TELEGRAM_BOT:
204
218
  bot = get_bot_instance()
205
219
  bot.send_message(f"Avviato script {script_id}", None)
@@ -209,20 +223,11 @@ def main(script_id = 0):
209
223
  # Create logger
210
224
  log_not = Logger()
211
225
  initialize()
212
-
213
- # if not internet_manager.check_dns_provider():
214
- # print()
215
- # console.print("[red]❌ ERROR: DNS configuration is required!")
216
- # console.print("[red]The program cannot function correctly without proper DNS settings.")
217
- # console.print("[yellow]Please configure one of these DNS servers:")
218
- # console.print("[blue]• Cloudflare (1.1.1.1) 'https://developers.cloudflare.com/1.1.1.1/setup/windows/'")
219
- # console.print("[blue]• Quad9 (9.9.9.9) 'https://docs.quad9.net/Setup_Guides/Windows/Windows_10/'")
220
- # console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
221
-
222
- # time.sleep(2)
223
- # msg.ask("[yellow]Press Enter to continue ...")
224
-
225
- if not internet_manager.check_dns_resolve():
226
+
227
+ # Get all site hostname
228
+ hostname_list = [hostname for site_info in config_manager.configSite.values() if (hostname := _extract_hostname(site_info.get('full_url')))]
229
+
230
+ if not internet_manager.check_dns_resolve(hostname_list):
226
231
  print()
227
232
  console.print("[red]❌ ERROR: DNS configuration is required!")
228
233
  console.print("[red]The program cannot function correctly without proper DNS settings.")
@@ -273,6 +278,11 @@ def main(script_id = 0):
273
278
  '--global', action='store_true', help='Perform a global search across multiple sites.'
274
279
  )
275
280
 
281
+ # Add category selection argument
282
+ parser.add_argument(
283
+ '--category', type=int, help='Select category directly (1: anime, 2: film_&_serie, 3: serie, 4: torrent).'
284
+ )
285
+
276
286
  # Add arguments for search functions
277
287
  parser.add_argument('-s', '--search', default=None, help='Search terms')
278
288
 
@@ -322,35 +332,60 @@ def main(script_id = 0):
322
332
  except Exception as e:
323
333
  console.print(f"[red]Error mapping module {module_name}: {str(e)}")
324
334
 
325
- # Display the category legend
326
- legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
327
- console.print(f"\n[bold green]Category Legend:[/bold green] {legend_text}")
335
+ if args.category:
336
+ selected_category = category_map.get(args.category)
337
+ category_sites = []
338
+ for key, label in choice_labels.items():
339
+ if label[1] == selected_category:
340
+ category_sites.append((key, label[0]))
328
341
 
329
- # Construct prompt with proper color mapping
330
- prompt_message = "[green]Insert category [white](" + ", ".join(
331
- [f"[{color_map.get(label[1], 'white')}]{key}: {label[0]}[/{color_map.get(label[1], 'white')}]"
332
- for key, label in choice_labels.items()]
333
- ) + "[white])"
342
+ if len(category_sites) == 1:
343
+ category = category_sites[0][0]
344
+ console.print(f"[green]Selezionato automaticamente: {category_sites[0][1]}[/green]")
334
345
 
335
- if TELEGRAM_BOT:
336
- category_legend_str = "Categorie: \n" + " | ".join([
337
- f"{category.capitalize()}" for category in color_map.keys()
338
- ])
346
+ else:
347
+ sito_prompt_items = [f"[{color_map.get(selected_category, 'white')}]({k}) {v}[/{color_map.get(selected_category, 'white')}]"
348
+ for k, v in category_sites]
349
+ sito_prompt_line = ", ".join(sito_prompt_items)
350
+
351
+ if TELEGRAM_BOT:
352
+ console.print(f"\nInsert site: {sito_prompt_line}")
353
+ category = bot.ask(
354
+ "select_site",
355
+ f"Insert site: {sito_prompt_line}",
356
+ None
357
+ )
358
+ else:
359
+ category = msg.ask(f"\n[cyan]Insert site: {sito_prompt_line}", choices=[k for k, _ in category_sites], show_choices=False)
339
360
 
340
- prompt_message = "Inserisci il sito:\n" + "\n".join(
341
- [f"{key}: {label[0]}" for key, label in choice_labels.items()]
361
+ else:
362
+ legend_text = " | ".join([f"[{color}]{category.capitalize()}[/{color}]" for category, color in color_map.items()])
363
+ console.print(f"\n[bold cyan]Category Legend:[/bold cyan] {legend_text}")
364
+
365
+ prompt_message = "[cyan]Insert site: " + ", ".join(
366
+ [f"[{color_map.get(label[1], 'white')}]({key}) {label[0]}[/{color_map.get(label[1], 'white')}]"
367
+ for key, label in choice_labels.items()]
342
368
  )
343
369
 
344
- console.print(f"\n{prompt_message}")
370
+ if TELEGRAM_BOT:
371
+ category_legend_str = "Categorie: \n" + " | ".join([
372
+ f"{category.capitalize()}" for category in color_map.keys()
373
+ ])
345
374
 
346
- category = bot.ask(
347
- "select_provider",
348
- f"{category_legend_str}\n\n{prompt_message}",
349
- None
350
- )
375
+ prompt_message_telegram = "Inserisci il sito:\n" + "\n".join(
376
+ [f"{key}: {label[0]}" for key, label in choice_labels.items()]
377
+ )
351
378
 
352
- else:
353
- category = msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0", show_choices=False, show_default=False)
379
+ console.print(f"\n{prompt_message_telegram}")
380
+
381
+ category = bot.ask(
382
+ "select_provider",
383
+ f"{category_legend_str}\n\n{prompt_message_telegram}",
384
+ None
385
+ )
386
+
387
+ else:
388
+ category = msg.ask(prompt_message, choices=list(choice_labels.keys()), default="0", show_choices=False, show_default=False)
354
389
 
355
390
  # Run the corresponding function based on user input
356
391
  if category in input_to_function: