StreamingCommunity 2.2.0__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (56) hide show
  1. StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +15 -24
  2. StreamingCommunity/Api/Site/1337xx/site.py +9 -6
  3. StreamingCommunity/Api/Site/1337xx/title.py +2 -2
  4. StreamingCommunity/Api/Site/altadefinizionegratis/costant.py +19 -0
  5. StreamingCommunity/Api/Site/{altadefinizione → altadefinizionegratis}/film.py +2 -2
  6. StreamingCommunity/Api/Site/{altadefinizione → altadefinizionegratis}/site.py +28 -22
  7. StreamingCommunity/Api/Site/animeunity/__init__.py +1 -1
  8. StreamingCommunity/Api/Site/animeunity/costant.py +6 -2
  9. StreamingCommunity/Api/Site/animeunity/film_serie.py +3 -3
  10. StreamingCommunity/Api/Site/animeunity/site.py +29 -21
  11. StreamingCommunity/Api/Site/cb01new/costant.py +6 -2
  12. StreamingCommunity/Api/Site/cb01new/film.py +2 -2
  13. StreamingCommunity/Api/Site/cb01new/site.py +20 -13
  14. StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +6 -2
  15. StreamingCommunity/Api/Site/ddlstreamitaly/series.py +2 -2
  16. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +9 -5
  17. StreamingCommunity/Api/Site/guardaserie/costant.py +6 -2
  18. StreamingCommunity/Api/Site/guardaserie/series.py +2 -3
  19. StreamingCommunity/Api/Site/guardaserie/site.py +10 -6
  20. StreamingCommunity/Api/Site/ilcorsaronero/costant.py +6 -2
  21. StreamingCommunity/Api/Site/ilcorsaronero/site.py +22 -13
  22. StreamingCommunity/Api/Site/ilcorsaronero/title.py +3 -3
  23. StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +1 -1
  24. StreamingCommunity/Api/Site/mostraguarda/costant.py +6 -2
  25. StreamingCommunity/Api/Site/mostraguarda/film.py +2 -2
  26. StreamingCommunity/Api/Site/streamingcommunity/costant.py +7 -3
  27. StreamingCommunity/Api/Site/streamingcommunity/film.py +3 -3
  28. StreamingCommunity/Api/Site/streamingcommunity/series.py +3 -3
  29. StreamingCommunity/Api/Site/streamingcommunity/site.py +30 -26
  30. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +24 -24
  31. StreamingCommunity/Api/Template/Util/get_domain.py +171 -142
  32. StreamingCommunity/Api/Template/site.py +1 -1
  33. StreamingCommunity/Lib/Downloader/HLS/downloader.py +14 -3
  34. StreamingCommunity/Lib/Downloader/HLS/segments.py +36 -22
  35. StreamingCommunity/Lib/Downloader/TOR/downloader.py +3 -3
  36. StreamingCommunity/Lib/M3U8/decryptor.py +1 -0
  37. StreamingCommunity/Lib/M3U8/estimator.py +2 -2
  38. StreamingCommunity/Lib/M3U8/url_fixer.py +6 -0
  39. StreamingCommunity/Lib/TMBD/tmdb.py +1 -1
  40. StreamingCommunity/Upload/version.py +1 -1
  41. StreamingCommunity/Util/_jsonConfig.py +43 -19
  42. StreamingCommunity/Util/ffmpeg_installer.py +31 -14
  43. StreamingCommunity/Util/headers.py +15 -2
  44. StreamingCommunity/Util/logger.py +9 -0
  45. StreamingCommunity/Util/os.py +100 -138
  46. StreamingCommunity/Util/table.py +6 -6
  47. StreamingCommunity/run.py +61 -7
  48. {StreamingCommunity-2.2.0.dist-info → StreamingCommunity-2.4.0.dist-info}/METADATA +116 -35
  49. StreamingCommunity-2.4.0.dist-info/RECORD +92 -0
  50. StreamingCommunity/Api/Site/altadefinizione/costant.py +0 -15
  51. StreamingCommunity-2.2.0.dist-info/RECORD +0 -92
  52. /StreamingCommunity/Api/Site/{altadefinizione → altadefinizionegratis}/__init__.py +0 -0
  53. {StreamingCommunity-2.2.0.dist-info → StreamingCommunity-2.4.0.dist-info}/LICENSE +0 -0
  54. {StreamingCommunity-2.2.0.dist-info → StreamingCommunity-2.4.0.dist-info}/WHEEL +0 -0
  55. {StreamingCommunity-2.2.0.dist-info → StreamingCommunity-2.4.0.dist-info}/entry_points.txt +0 -0
  56. {StreamingCommunity-2.2.0.dist-info → StreamingCommunity-2.4.0.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,8 @@
1
1
  # 18.06.24
2
2
 
3
- import sys
4
- from urllib.parse import urlparse
3
+ import ssl
4
+ import time
5
+ from urllib.parse import urlparse, unquote
5
6
 
6
7
 
7
8
  # External libraries
@@ -15,160 +16,188 @@ from StreamingCommunity.Util.console import console, msg
15
16
  from StreamingCommunity.Util._jsonConfig import config_manager
16
17
 
17
18
 
18
- def google_search(query):
19
- """
20
- Perform a Google search and return the first result.
19
+ base_headers = {
20
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
21
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
22
+ 'dnt': '1',
23
+ 'priority': 'u=0, i',
24
+ 'referer': '',
25
+ 'sec-ch-ua-mobile': '?0',
26
+ 'sec-ch-ua-platform': '"Windows"',
27
+ 'sec-fetch-dest': 'document',
28
+ 'sec-fetch-mode': 'navigate',
29
+ 'sec-fetch-site': 'same-origin',
30
+ 'sec-fetch-user': '?1',
31
+ 'upgrade-insecure-requests': '1',
32
+ 'user-agent': ''
33
+ }
34
+
35
+
36
+ def get_tld(url_str):
37
+ """Extract the TLD (Top-Level Domain) from the URL."""
38
+ try:
39
+ url_str = unquote(url_str)
40
+ parsed = urlparse(url_str)
41
+ domain = parsed.netloc.lower()
21
42
 
22
- Args:
23
- query (str): The search query to execute on Google.
43
+ if domain.startswith('www.'):
44
+ domain = domain[4:]
45
+ parts = domain.split('.')
24
46
 
25
- Returns:
26
- str: The first URL result from the search, or None if no result is found.
27
- """
28
- # Perform the search on Google and limit to 1 result
29
- search_results = search(query, num_results=1)
30
-
31
- # Extract the first result
32
- first_result = next(search_results, None)
33
-
34
- if not first_result:
35
- console.print("[red]No results found.[/red]")
47
+ return parts[-1] if len(parts) >= 2 else None
36
48
 
37
- return first_result
38
-
39
- def get_final_redirect_url(initial_url, max_timeout):
40
- """
41
- Follow redirects from the initial URL and return the final URL after all redirects.
42
-
43
- Args:
44
- initial_url (str): The URL to start with and follow redirects.
45
-
46
- Returns:
47
- str: The final URL after all redirects are followed.
48
- """
49
+ except Exception:
50
+ return None
49
51
 
50
- # Create a client with redirects enabled
52
+ def get_base_domain(url_str):
53
+ """Extract base domain without protocol, www and path."""
51
54
  try:
52
- with httpx.Client(
53
- headers={
54
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
55
- 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
56
- 'User-Agent': get_headers()
57
- },
58
- follow_redirects=True,
59
- timeout=max_timeout
60
-
61
- ) as client:
62
- response = client.get(initial_url)
63
-
64
- if response.status_code == 403:
65
- console.print("[bold red]The owner of this website has banned your IP[/bold red]")
66
- raise
67
-
68
- response.raise_for_status()
69
-
70
- # Capture the final URL after all redirects
71
- final_url = response.url
55
+ parsed = urlparse(url_str)
56
+ domain = parsed.netloc.lower()
57
+ if domain.startswith('www.'):
58
+ domain = domain[4:]
59
+
60
+ # Check if domain has multiple parts separated by dots
61
+ parts = domain.split('.')
62
+ if len(parts) > 2: # Handle subdomains
63
+ return '.'.join(parts[:-1]) # Return everything except TLD
72
64
 
73
- return final_url
65
+ return parts[0] # Return base domain
74
66
 
75
- except Exception as e:
76
- console.print(f"\n[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
67
+ except Exception:
68
+ return None
69
+
70
+ def get_base_url(url_str):
71
+ """Extract base URL including protocol and domain, removing path and query parameters."""
72
+ try:
73
+ parsed = urlparse(url_str)
74
+ return f"{parsed.scheme}://{parsed.netloc}"
75
+
76
+ except Exception:
77
77
  return None
78
78
 
79
- def search_domain(site_name: str, base_url: str, get_first: bool = False):
80
- """
81
- Search for a valid domain for the given site name and base URL.
82
-
83
- Parameters:
84
- - site_name (str): The name of the site to search the domain for.
85
- - base_url (str): The base URL to construct complete URLs.
86
- - get_first (bool): If True, automatically update to the first valid match without user confirmation.
79
+ def validate_url(url, base_url, max_timeout, max_retries=3, sleep=3):
80
+ """Validate if URL is accessible and matches expected base domain."""
81
+ console.print(f"\n[cyan]Starting validation for URL[white]: [yellow]{url}")
82
+
83
+ # Verify URL structure matches base_url structure
84
+ base_domain = get_base_domain(base_url)
85
+ url_domain = get_base_domain(url)
87
86
 
88
- Returns:
89
- tuple: The found domain and the complete URL.
90
- """
87
+ base_headers['referer'] = url
88
+ base_headers['user-agent'] = get_headers()
89
+
90
+ if base_domain != url_domain:
91
+ console.print(f"[red]Domain structure mismatch: {url_domain} != {base_domain}")
92
+ return False, None
91
93
 
92
- # Extract config domain
94
+ # Count dots to ensure we don't have extra subdomains
95
+ base_dots = base_url.count('.')
96
+ url_dots = url.count('.')
97
+ if url_dots > base_dots + 1: # Allow for one extra dot for TLD change
98
+ console.print(f"[red]Too many subdomains in URL")
99
+ return False, None
100
+
101
+ client = httpx.Client(
102
+ verify=False,
103
+ headers=base_headers,
104
+ timeout=max_timeout
105
+ )
106
+
107
+ for retry in range(max_retries):
108
+ try:
109
+ time.sleep(sleep)
110
+
111
+ # Initial check without redirects
112
+ response = client.get(url, follow_redirects=False)
113
+ if response.status_code == 403:
114
+ console.print(f"[red]Check failed (403) - Attempt {retry + 1}/{max_retries}")
115
+ continue
116
+
117
+ if response.status_code >= 400:
118
+ console.print(f"[red]Check failed: HTTP {response.status_code}")
119
+ return False, None
120
+
121
+ # Follow redirects and verify final domain
122
+ final_response = client.get(url, follow_redirects=True)
123
+ final_domain = get_base_domain(str(final_response.url))
124
+ console.print(f"[cyan]Redirect url: [red]{final_response.url}")
125
+
126
+ if final_domain != base_domain:
127
+ console.print(f"[red]Final domain mismatch: {final_domain} != {base_domain}")
128
+ return False, None
129
+
130
+ new_tld = get_tld(str(final_response.url))
131
+ if new_tld != get_tld(url):
132
+ return True, new_tld
133
+
134
+ return True, None
135
+
136
+ except (httpx.RequestError, ssl.SSLError) as e:
137
+ console.print(f"[red]Connection error: {str(e)}")
138
+ time.sleep(sleep)
139
+ continue
140
+
141
+ return False, None
142
+
143
+ def search_domain(site_name: str, base_url: str, get_first: bool = False):
144
+ """Search for valid domain matching site name and base URL."""
93
145
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
94
146
  domain = str(config_manager.get_dict("SITE", site_name)['domain'])
147
+
148
+ # Test initial URL
149
+ try:
150
+ is_correct, redirect_tld = validate_url(base_url, base_url, max_timeout)
151
+
152
+ if is_correct:
153
+ tld = redirect_tld or get_tld(base_url)
154
+ config_manager.config['SITE'][site_name]['domain'] = tld
155
+ config_manager.write_config()
156
+ console.print(f"[green]Successfully validated initial URL")
157
+ return tld, base_url
158
+
159
+ except Exception as e:
160
+ console.print(f"[red]Error testing initial URL: {str(e)}")
95
161
 
162
+ # Google search phase
163
+ base_domain = get_base_domain(base_url)
164
+ console.print(f"\n[cyan]Searching for alternate domains for[white]: [yellow]{base_domain}")
165
+
96
166
  try:
97
- # Test the current domain
98
- with httpx.Client(
99
- headers={
100
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
101
- 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
102
- 'User-Agent': get_headers()
103
- },
104
- follow_redirects=True,
105
- timeout=max_timeout
106
- ) as client:
107
- response_follow = client.get(f"{base_url}.{domain}")
108
- response_follow.raise_for_status()
167
+ search_results = list(search(base_domain, num_results=20, lang="it"))
168
+ base_urls = set()
109
169
 
170
+ for url in search_results:
171
+ base_url = get_base_url(url)
172
+ if base_url:
173
+ base_urls.add(base_url)
174
+
175
+ # Filter URLs based on domain matching and subdomain count
176
+ filtered_results = [
177
+ url for url in base_urls
178
+ if get_base_domain(url) == base_domain
179
+ and url.count('.') <= base_url.count('.') + 1
180
+ ]
181
+
182
+ for idx, result_url in enumerate(filtered_results, 1):
183
+ console.print(f"\n[cyan]Checking result {idx}/{len(filtered_results)}[white]: [yellow]{result_url}")
184
+
185
+ is_valid, new_tld = validate_url(result_url, base_url, max_timeout)
186
+ if is_valid:
187
+ final_tld = new_tld or get_tld(result_url)
188
+
189
+ if get_first or msg.ask(
190
+ f"\n[cyan]Update site[white] [red]'{site_name}'[cyan] with domain[white] [red]'{final_tld}'",
191
+ choices=["y", "n"],
192
+ default="y"
193
+ ).lower() == "y":
194
+
195
+ config_manager.config['SITE'][site_name]['domain'] = final_tld
196
+ config_manager.write_config()
197
+ return final_tld, f"{base_url}.{final_tld}"
198
+
110
199
  except Exception as e:
111
- query = base_url.split("/")[-1]
112
-
113
- # Perform a Google search with multiple results
114
- search_results = list(search(query, num_results=10, lang="it"))
115
- #console.print(f"\nGoogle search results: {search_results}")
116
-
117
- def normalize_for_comparison(url):
118
- """Normalize URL by removing protocol, www, and trailing slashes"""
119
- url = url.lower()
120
- url = url.replace("https://", "").replace("http://", "")
121
- url = url.replace("www.", "")
122
- return url.rstrip("/")
123
-
124
- # Normalize the base_url we're looking for
125
- target_url = normalize_for_comparison(base_url)
126
-
127
- # Iterate through search results
128
- for first_url in search_results:
129
- console.print(f"[green]Checking url[white]: [red]{first_url}")
130
-
131
- # Get just the domain part of the search result
132
- parsed_result = urlparse(first_url)
133
- result_domain = normalize_for_comparison(parsed_result.netloc)
134
-
135
- # Compare with our target URL (without the protocol part)
136
- if result_domain.startswith(target_url.split("/")[-1]):
137
- try:
138
- final_url = get_final_redirect_url(first_url, max_timeout)
139
-
140
- if final_url is not None:
141
- def extract_domain(url):
142
- parsed_url = urlparse(url)
143
- domain = parsed_url.netloc
144
- return domain.split(".")[-1]
145
-
146
- new_domain_extract = extract_domain(str(final_url))
147
-
148
- if get_first or msg.ask(f"\n[cyan]Do you want to auto update site[white] [red]'{site_name}'[cyan] with domain[white] [red]'{new_domain_extract}'.", choices=["y", "n"], default="y").lower() == "y":
149
- # Update domain in config.json
150
- config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
151
- config_manager.write_config()
152
-
153
- return new_domain_extract, f"{base_url}.{new_domain_extract}"
154
-
155
- except Exception as redirect_error:
156
- console.print(f"[red]Error following redirect for {first_url}: {redirect_error}")
157
- continue
158
-
159
- # If no matching URL is found return base domain
160
- console.print("[bold red]No valid URL found matching the base URL.[/bold red]")
161
- return domain, f"{base_url}.{domain}"
162
-
163
- # Handle successful initial domain check
164
- parsed_url = urlparse(str(response_follow.url))
165
- parse_domain = parsed_url.netloc
166
- tld = parse_domain.split('.')[-1]
167
-
168
- if tld is not None:
169
- # Update domain in config.json
170
- config_manager.config['SITE'][site_name]['domain'] = tld
171
- config_manager.write_config()
172
-
173
- # Return config domain
174
- return tld, f"{base_url}.{tld}"
200
+ console.print(f"[red]Error during search: {str(e)}")
201
+
202
+ console.print("[bold red]No valid URLs found matching the base URL.")
203
+ return domain, f"{base_url}.{domain}"
@@ -74,7 +74,7 @@ def get_select_title(table_show_manager, media_search_manager):
74
74
  table_show_manager.clear()
75
75
 
76
76
  # Handle user's quit command
77
- if last_command == "q":
77
+ if last_command == "q" or last_command == "quit":
78
78
  console.print("\n[red]Quit [white]...")
79
79
  sys.exit(0)
80
80
 
@@ -11,7 +11,7 @@ import httpx
11
11
 
12
12
  # Internal utilities
13
13
  from StreamingCommunity.Util._jsonConfig import config_manager
14
- from StreamingCommunity.Util.console import console, Panel, Table
14
+ from StreamingCommunity.Util.console import console, Panel
15
15
  from StreamingCommunity.Util.color import Colors
16
16
  from StreamingCommunity.Util.os import (
17
17
  compute_sha1_hash,
@@ -104,14 +104,15 @@ class HttpClient:
104
104
  response = httpx.get(
105
105
  url=url,
106
106
  headers=self.headers,
107
- timeout=max_timeout
107
+ timeout=max_timeout,
108
+ follow_redirects=True
108
109
  )
109
110
 
110
111
  response.raise_for_status()
111
112
  return response.text
112
113
 
113
114
  except Exception as e:
114
- logging.info(f"Request to {url} failed with error: {e}")
115
+ console.print(f"Request to {url} failed with error: {e}")
115
116
  return 404
116
117
 
117
118
  def get_content(self, url):
@@ -283,6 +284,7 @@ class ContentExtractor:
283
284
 
284
285
  print("")
285
286
 
287
+
286
288
  class DownloadTracker:
287
289
  def __init__(self, path_manager: PathManager):
288
290
  """
@@ -814,6 +816,9 @@ class HLS_Downloader:
814
816
  else:
815
817
  console.log("[red]Error: m3u8_index is None")
816
818
 
819
+ # Reset
820
+ self._reset()
821
+
817
822
  def _clean(self, out_path: str) -> None:
818
823
  """
819
824
  Cleans up temporary files and folders after downloading and processing.
@@ -952,3 +957,9 @@ class HLS_Downloader:
952
957
 
953
958
  # Clean up temporary files and directories
954
959
  self._clean(self.content_joiner.converted_out_path)
960
+
961
+ def _reset(self):
962
+ global list_MissingTs, m3u8_url_fixer
963
+
964
+ m3u8_url_fixer.reset_playlist()
965
+ list_MissingTs = []
@@ -131,6 +131,7 @@ class M3U8_Segments:
131
131
  # Convert the content of the response to hexadecimal and then to bytes
132
132
  hex_content = binascii.hexlify(response.content).decode('utf-8')
133
133
  byte_content = bytes.fromhex(hex_content)
134
+ logging.info(f"URI: Hex content: {hex_content}, Byte content: {byte_content}")
134
135
 
135
136
  #console.print(f"[cyan]Find key: [red]{hex_content}")
136
137
  return byte_content
@@ -160,6 +161,7 @@ class M3U8_Segments:
160
161
 
161
162
  iv = m3u8_parser.keys.get('iv')
162
163
  method = m3u8_parser.keys.get('method')
164
+ logging.info(f"M3U8_Decryption - IV: {iv}, method: {method}")
163
165
 
164
166
  # Create a decryption object with the key and set the method
165
167
  self.decryption = M3U8_Decryption(key, iv, method)
@@ -194,23 +196,28 @@ class M3U8_Segments:
194
196
  """
195
197
  if self.is_index_url:
196
198
 
197
- # Send a GET request to retrieve the index M3U8 file
198
- response = httpx.get(
199
- self.url,
200
- headers={'User-Agent': get_headers()},
201
- timeout=max_timeout
202
- )
203
- response.raise_for_status()
199
+ try:
204
200
 
205
- # Save the M3U8 file to the temporary folder
206
- path_m3u8_file = os.path.join(self.tmp_folder, "playlist.m3u8")
207
- open(path_m3u8_file, "w+").write(response.text)
201
+ # Send a GET request to retrieve the index M3U8 file
202
+ response = httpx.get(
203
+ self.url,
204
+ headers={'User-Agent': get_headers()},
205
+ timeout=max_timeout,
206
+ follow_redirects=True
207
+ )
208
+ response.raise_for_status()
208
209
 
209
- # Parse the text from the M3U8 index file
210
- self.parse_data(response.text)
210
+ # Save the M3U8 file to the temporary folder
211
+ path_m3u8_file = os.path.join(self.tmp_folder, "playlist.m3u8")
212
+ open(path_m3u8_file, "w+").write(response.text)
211
213
 
212
- else:
214
+ # Parse the text from the M3U8 index file
215
+ self.parse_data(response.text)
213
216
 
217
+ except Exception as e:
218
+ print(f"Error during M3U8 index request: {e}")
219
+
220
+ else:
214
221
  # Parser data of content of index pass in input to class
215
222
  self.parse_data(self.url)
216
223
 
@@ -303,7 +310,9 @@ class M3U8_Segments:
303
310
 
304
311
  except Exception as e:
305
312
  logging.error(f"Decryption failed for segment {index}: {str(e)}")
306
- raise
313
+ self.interrupt_flag.set() # Interrupt the download process
314
+ self.stop_event.set() # Trigger the stopping event for all threads
315
+ break # Stop the current task immediately
307
316
 
308
317
  # Update progress and queue
309
318
  self.class_ts_estimator.update_progress_bar(content_size, duration, progress_bar)
@@ -385,7 +394,7 @@ class M3U8_Segments:
385
394
  buffer[index] = segment_content
386
395
 
387
396
  except queue.Empty:
388
- self.current_timeout = min(self.max_timeout, self.current_timeout * 1.5)
397
+ self.current_timeout = min(self.max_timeout, self.current_timeout * 1.25)
389
398
 
390
399
  if self.stop_event.is_set():
391
400
  break
@@ -532,10 +541,14 @@ class M3U8_Segments:
532
541
  progress_bar.close()
533
542
 
534
543
  # Final verification
535
- final_completion = (len(self.downloaded_segments) / total_segments) * 100
536
- if final_completion < 99.9: # Less than 99.9% complete
537
- missing = set(range(total_segments)) - self.downloaded_segments
538
- raise Exception(f"Download incomplete ({final_completion:.1f}%). Missing segments: {sorted(missing)}")
544
+ try:
545
+ final_completion = (len(self.downloaded_segments) / total_segments) * 100
546
+ if final_completion < 99.9: # Less than 99.9% complete
547
+ missing = set(range(total_segments)) - self.downloaded_segments
548
+ raise Exception(f"Download incomplete ({final_completion:.1f}%). Missing segments: {sorted(missing)}")
549
+
550
+ except:
551
+ pass
539
552
 
540
553
  # Verify output file
541
554
  if not os.path.exists(self.tmp_file_path):
@@ -545,15 +558,16 @@ class M3U8_Segments:
545
558
  if file_size == 0:
546
559
  raise Exception("Output file is empty")
547
560
 
548
- # Display additional
549
- if self.info_nRetry >= len(self.segments) * (1/3.33):
561
+ # Display additional info when there is missing stream file
562
+ if self.info_nFailed > 0:
550
563
 
551
564
  # Get expected time
552
565
  ex_hours, ex_minutes, ex_seconds = format_duration(self.expected_real_time_s)
553
566
  ex_formatted_duration = f"[yellow]{int(ex_hours)}[red]h [yellow]{int(ex_minutes)}[red]m [yellow]{int(ex_seconds)}[red]s"
554
567
  console.print(f"[cyan]Max retry per URL[white]: [green]{self.info_maxRetry}[green] [white]| [cyan]Total retry done[white]: [green]{self.info_nRetry}[green] [white]| [cyan]Missing TS: [red]{self.info_nFailed} [white]| [cyan]Duration: {print_duration_table(self.tmp_file_path, None, True)} [white]| [cyan]Expected duation: {ex_formatted_duration} \n")
555
568
 
556
- console.print("[yellow]⚠ Warning:[/yellow] Too many retries detected! Consider reducing the number of [cyan]workers[/cyan] in the [magenta]config.json[/magenta] file. This will impact [bold]performance[/bold]. \n")
569
+ if self.info_nRetry >= len(self.segments) * 0.3:
570
+ console.print("[yellow]⚠ Warning:[/yellow] Too many retries detected! Consider reducing the number of [cyan]workers[/cyan] in the [magenta]config.json[/magenta] file. This will impact [bold]performance[/bold]. \n")
557
571
 
558
572
  # Info to return
559
573
  return {'type': type, 'nFailed': self.info_nFailed}
@@ -139,9 +139,8 @@ class TOR_downloader:
139
139
 
140
140
  # Formatta e stampa le informazioni
141
141
  console.print("\n[bold green]🔗 Dettagli Torrent Aggiunto:[/bold green]")
142
- console.print(f"[yellow]Nome:[/yellow] {torrent_info.get('name', torrent_name)}")
142
+ console.print(f"[yellow]Name:[/yellow] {torrent_info.get('name', torrent_name)}")
143
143
  console.print(f"[yellow]Hash:[/yellow] {torrent_info['hash']}")
144
- console.print(f"[yellow]Dimensione:[/yellow] {internet_manager.format_file_size(torrent_info.get('size'))}")
145
144
  print()
146
145
 
147
146
  # Salva l'hash per usi successivi e il path
@@ -288,7 +287,8 @@ class TOR_downloader:
288
287
  raise
289
288
 
290
289
  # Delete the torrent data
291
- #self.qb.delete_permanently(self.qb.torrents()[-1]['hash'])
290
+ time.sleep(5)
291
+ self.qb.delete_permanently(self.qb.torrents()[-1]['hash'])
292
292
  return True
293
293
 
294
294
  except Exception as e:
@@ -61,6 +61,7 @@ if crypto_installed:
61
61
  bytes: The decrypted content.
62
62
  """
63
63
  start = time.perf_counter_ns()
64
+ #logging.info(f"Ciphertext: {ciphertext}")
64
65
 
65
66
  # Decrypt based on encryption method
66
67
  if self.method in {"AES", "AES-128"}:
@@ -199,7 +199,7 @@ class M3U8_Ts_Estimator:
199
199
 
200
200
  if TQDM_USE_LARGE_BAR:
201
201
  speed_data = self.get_average_speed()
202
- logging.debug(f"Speed data for progress bar: {speed_data}")
202
+ #logging.debug(f"Speed data for progress bar: {speed_data}")
203
203
 
204
204
  if len(speed_data) >= 2:
205
205
  average_internet_speed = speed_data[0]
@@ -223,7 +223,7 @@ class M3U8_Ts_Estimator:
223
223
  )
224
224
 
225
225
  progress_counter.set_postfix_str(progress_str)
226
- logging.debug(f"Updated progress bar: {progress_str}")
226
+ #logging.debug(f"Updated progress bar: {progress_str}")
227
227
 
228
228
  except Exception as e:
229
229
  logging.error(f"Error updating progress bar: {str(e)}")
@@ -49,4 +49,10 @@ class M3U8_UrlFix:
49
49
  full_url = urljoin(base_url, url_resource)
50
50
 
51
51
  return full_url
52
+
53
+ def reset_playlist(self) -> None:
54
+ """
55
+ Reset the M3U8 playlist URL to its default state (None).
56
+ """
57
+ self.url_playlist = None
52
58
 
@@ -75,7 +75,7 @@ def get_select_title(table_show_manager, generic_obj):
75
75
  table_show_manager.clear()
76
76
 
77
77
  # Handle user's quit command
78
- if last_command == "q":
78
+ if last_command == "q" or last_command == "quit":
79
79
  Console.print("\n[red]Quit [white]...")
80
80
  sys.exit(0)
81
81
 
@@ -1,5 +1,5 @@
1
1
  __title__ = 'StreamingCommunity'
2
- __version__ = '2.2.0'
2
+ __version__ = '2.4.0'
3
3
  __author__ = 'Lovi-0'
4
4
  __description__ = 'A command-line program to download film'
5
5
  __copyright__ = 'Copyright 2024'