StreamingCommunity 3.2.1__py3-none-any.whl → 3.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +4 -0
- StreamingCommunity/Api/Player/hdplayer.py +2 -2
- StreamingCommunity/Api/Player/mixdrop.py +1 -1
- StreamingCommunity/Api/Player/vixcloud.py +4 -5
- StreamingCommunity/Api/Site/altadefinizione/film.py +2 -2
- StreamingCommunity/Api/Site/altadefinizione/series.py +1 -1
- StreamingCommunity/Api/Site/animeunity/serie.py +1 -1
- StreamingCommunity/Api/Site/animeworld/film.py +1 -1
- StreamingCommunity/Api/Site/animeworld/serie.py +1 -2
- StreamingCommunity/Api/Site/cb01new/film.py +1 -1
- StreamingCommunity/Api/Site/crunchyroll/__init__.py +103 -0
- StreamingCommunity/Api/Site/crunchyroll/film.py +82 -0
- StreamingCommunity/Api/Site/crunchyroll/series.py +186 -0
- StreamingCommunity/Api/Site/crunchyroll/site.py +113 -0
- StreamingCommunity/Api/Site/crunchyroll/util/ScrapeSerie.py +238 -0
- StreamingCommunity/Api/Site/crunchyroll/util/get_license.py +227 -0
- StreamingCommunity/Api/Site/guardaserie/series.py +1 -2
- StreamingCommunity/Api/Site/guardaserie/site.py +1 -2
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +9 -8
- StreamingCommunity/Api/Site/mediasetinfinity/__init__.py +96 -0
- StreamingCommunity/Api/Site/mediasetinfinity/film.py +85 -0
- StreamingCommunity/Api/Site/mediasetinfinity/series.py +185 -0
- StreamingCommunity/Api/Site/mediasetinfinity/site.py +112 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +259 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/fix_mpd.py +64 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py +214 -0
- StreamingCommunity/Api/Site/raiplay/film.py +2 -2
- StreamingCommunity/Api/Site/raiplay/series.py +2 -1
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +6 -17
- StreamingCommunity/Api/Site/streamingcommunity/film.py +3 -3
- StreamingCommunity/Api/Site/streamingcommunity/series.py +11 -11
- StreamingCommunity/Api/Site/streamingcommunity/site.py +2 -4
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +3 -6
- StreamingCommunity/Api/Site/streamingwatch/__init__.py +6 -14
- StreamingCommunity/Api/Site/streamingwatch/film.py +3 -3
- StreamingCommunity/Api/Site/streamingwatch/series.py +9 -9
- StreamingCommunity/Api/Site/streamingwatch/site.py +5 -7
- StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +2 -2
- StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py +131 -0
- StreamingCommunity/Lib/Downloader/DASH/decrypt.py +79 -0
- StreamingCommunity/Lib/Downloader/DASH/downloader.py +218 -0
- StreamingCommunity/Lib/Downloader/DASH/parser.py +249 -0
- StreamingCommunity/Lib/Downloader/DASH/segments.py +332 -0
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +10 -30
- StreamingCommunity/Lib/Downloader/HLS/segments.py +146 -263
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -5
- StreamingCommunity/Lib/FFmpeg/capture.py +3 -3
- StreamingCommunity/Lib/FFmpeg/command.py +1 -1
- StreamingCommunity/TelegramHelp/config.json +3 -7
- StreamingCommunity/Upload/version.py +1 -1
- StreamingCommunity/Util/bento4_installer.py +191 -0
- StreamingCommunity/Util/config_json.py +1 -1
- StreamingCommunity/Util/headers.py +0 -3
- StreamingCommunity/Util/os.py +36 -46
- StreamingCommunity/__init__.py +2 -1
- StreamingCommunity/run.py +11 -10
- {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.7.dist-info}/METADATA +7 -9
- streamingcommunity-3.2.7.dist-info/RECORD +111 -0
- StreamingCommunity/Api/Site/1337xx/__init__.py +0 -72
- StreamingCommunity/Api/Site/1337xx/site.py +0 -82
- StreamingCommunity/Api/Site/1337xx/title.py +0 -61
- StreamingCommunity/Lib/Proxies/proxy.py +0 -72
- streamingcommunity-3.2.1.dist-info/RECORD +0 -96
- {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.7.dist-info}/WHEEL +0 -0
- {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.7.dist-info}/entry_points.txt +0 -0
- {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.7.dist-info}/licenses/LICENSE +0 -0
- {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
# 25.07.25
|
|
2
|
+
|
|
3
|
+
from urllib.parse import urljoin
|
|
4
|
+
import xml.etree.ElementTree as ET
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External library
|
|
8
|
+
import httpx
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Internal utilities
|
|
13
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Variable
|
|
17
|
+
console = Console()
|
|
18
|
+
max_timeout = config_manager.get_int('REQUESTS', 'timeout')
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MPDParser:
|
|
22
|
+
@staticmethod
|
|
23
|
+
def get_best(representations):
|
|
24
|
+
"""
|
|
25
|
+
Returns the video representation with the highest resolution/bandwidth, or audio with highest bandwidth.
|
|
26
|
+
"""
|
|
27
|
+
videos = [r for r in representations if r['type'] == 'video']
|
|
28
|
+
audios = [r for r in representations if r['type'] == 'audio']
|
|
29
|
+
if videos:
|
|
30
|
+
return max(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
|
|
31
|
+
elif audios:
|
|
32
|
+
return max(audios, key=lambda r: r['bandwidth'])
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
@staticmethod
|
|
36
|
+
def get_worst(representations):
|
|
37
|
+
"""
|
|
38
|
+
Returns the video representation with the lowest resolution/bandwidth, or audio with lowest bandwidth.
|
|
39
|
+
"""
|
|
40
|
+
videos = [r for r in representations if r['type'] == 'video']
|
|
41
|
+
audios = [r for r in representations if r['type'] == 'audio']
|
|
42
|
+
if videos:
|
|
43
|
+
return min(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
|
|
44
|
+
elif audios:
|
|
45
|
+
return min(audios, key=lambda r: r['bandwidth'])
|
|
46
|
+
return None
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def get_list(representations, type_filter=None):
|
|
50
|
+
"""
|
|
51
|
+
Returns the list of representations filtered by type ('video', 'audio', etc.).
|
|
52
|
+
"""
|
|
53
|
+
if type_filter:
|
|
54
|
+
return [r for r in representations if r['type'] == type_filter]
|
|
55
|
+
return representations
|
|
56
|
+
|
|
57
|
+
def __init__(self, mpd_url):
|
|
58
|
+
self.mpd_url = mpd_url
|
|
59
|
+
self.pssh = None
|
|
60
|
+
self.representations = []
|
|
61
|
+
self.base_url = mpd_url.rsplit('/', 1)[0] + '/'
|
|
62
|
+
|
|
63
|
+
def parse(self, custom_headers):
|
|
64
|
+
response = httpx.get(self.mpd_url, headers=custom_headers, timeout=max_timeout)
|
|
65
|
+
response.raise_for_status()
|
|
66
|
+
|
|
67
|
+
root = ET.fromstring(response.content)
|
|
68
|
+
|
|
69
|
+
# Properly handle default namespace
|
|
70
|
+
ns = {}
|
|
71
|
+
if root.tag.startswith('{'):
|
|
72
|
+
uri = root.tag[1:].split('}')[0]
|
|
73
|
+
ns['mpd'] = uri
|
|
74
|
+
ns['cenc'] = 'urn:mpeg:cenc:2013'
|
|
75
|
+
|
|
76
|
+
# Extract PSSH dynamically: take the first <cenc:pssh> found
|
|
77
|
+
for protection in root.findall('.//mpd:ContentProtection', ns):
|
|
78
|
+
pssh_element = protection.find('cenc:pssh', ns)
|
|
79
|
+
if pssh_element is not None and pssh_element.text:
|
|
80
|
+
self.pssh = pssh_element.text
|
|
81
|
+
break
|
|
82
|
+
|
|
83
|
+
if not self.pssh:
|
|
84
|
+
console.print("[bold red]PSSH not found in MPD![/bold red]")
|
|
85
|
+
|
|
86
|
+
# Extract representations
|
|
87
|
+
for adapt_set in root.findall('.//mpd:AdaptationSet', ns):
|
|
88
|
+
mime_type = adapt_set.get('mimeType', '')
|
|
89
|
+
lang = adapt_set.get('lang', '')
|
|
90
|
+
|
|
91
|
+
# Find SegmentTemplate at AdaptationSet level (DASH spec allows this)
|
|
92
|
+
seg_template = adapt_set.find('mpd:SegmentTemplate', ns)
|
|
93
|
+
|
|
94
|
+
for rep in adapt_set.findall('mpd:Representation', ns):
|
|
95
|
+
rep_id = rep.get('id')
|
|
96
|
+
bandwidth = rep.get('bandwidth')
|
|
97
|
+
codecs = rep.get('codecs')
|
|
98
|
+
width = rep.get('width')
|
|
99
|
+
height = rep.get('height')
|
|
100
|
+
|
|
101
|
+
# Try to find SegmentTemplate at Representation level (overrides AdaptationSet)
|
|
102
|
+
rep_seg_template = rep.find('mpd:SegmentTemplate', ns)
|
|
103
|
+
seg_tmpl = rep_seg_template if rep_seg_template is not None else seg_template
|
|
104
|
+
if seg_tmpl is None:
|
|
105
|
+
continue
|
|
106
|
+
|
|
107
|
+
init = seg_tmpl.get('initialization')
|
|
108
|
+
media = seg_tmpl.get('media')
|
|
109
|
+
start_number = int(seg_tmpl.get('startNumber', 1))
|
|
110
|
+
|
|
111
|
+
# Use BaseURL from Representation if present, else fallback to self.base_url
|
|
112
|
+
base_url_elem = rep.find('mpd:BaseURL', ns)
|
|
113
|
+
base_url = base_url_elem.text if base_url_elem is not None else self.base_url
|
|
114
|
+
|
|
115
|
+
# Replace $RepresentationID$ in init/media if present
|
|
116
|
+
if init and '$RepresentationID$' in init:
|
|
117
|
+
init = init.replace('$RepresentationID$', rep_id)
|
|
118
|
+
if media and '$RepresentationID$' in media:
|
|
119
|
+
media = media.replace('$RepresentationID$', rep_id)
|
|
120
|
+
|
|
121
|
+
init_url = urljoin(base_url, init) if init else None
|
|
122
|
+
|
|
123
|
+
# Calculate segments from timeline
|
|
124
|
+
segments = []
|
|
125
|
+
seg_timeline = seg_tmpl.find('mpd:SegmentTimeline', ns)
|
|
126
|
+
if seg_timeline is not None:
|
|
127
|
+
segment_number = start_number
|
|
128
|
+
for s in seg_timeline.findall('mpd:S', ns):
|
|
129
|
+
repeat = int(s.get('r', 0))
|
|
130
|
+
|
|
131
|
+
# Always append at least one segment
|
|
132
|
+
segments.append(segment_number)
|
|
133
|
+
segment_number += 1
|
|
134
|
+
for _ in range(repeat):
|
|
135
|
+
segments.append(segment_number)
|
|
136
|
+
segment_number += 1
|
|
137
|
+
|
|
138
|
+
if not segments:
|
|
139
|
+
segments = list(range(start_number, start_number + 100))
|
|
140
|
+
|
|
141
|
+
# Replace $Number$ and $RepresentationID$ in media URL
|
|
142
|
+
media_urls = []
|
|
143
|
+
for n in segments:
|
|
144
|
+
url = media
|
|
145
|
+
if '$Number$' in url:
|
|
146
|
+
url = url.replace('$Number$', str(n))
|
|
147
|
+
if '$RepresentationID$' in url:
|
|
148
|
+
url = url.replace('$RepresentationID$', rep_id)
|
|
149
|
+
media_urls.append(urljoin(base_url, url))
|
|
150
|
+
|
|
151
|
+
self.representations.append({
|
|
152
|
+
'id': rep_id,
|
|
153
|
+
'type': mime_type.split('/')[0] if mime_type else (rep.get('mimeType', '').split('/')[0] if rep.get('mimeType') else 'unknown'),
|
|
154
|
+
'codec': codecs,
|
|
155
|
+
'bandwidth': int(bandwidth) if bandwidth else 0,
|
|
156
|
+
'width': int(width) if width else 0,
|
|
157
|
+
'height': int(height) if height else 0,
|
|
158
|
+
'language': lang,
|
|
159
|
+
'init_url': init_url,
|
|
160
|
+
'segment_urls': media_urls
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
def get_resolutions(self):
|
|
164
|
+
"""Return list of video representations with their resolutions."""
|
|
165
|
+
return [
|
|
166
|
+
rep for rep in self.representations
|
|
167
|
+
if rep['type'] == 'video'
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
def get_audios(self):
|
|
171
|
+
"""Return list of audio representations."""
|
|
172
|
+
return [
|
|
173
|
+
rep for rep in self.representations
|
|
174
|
+
if rep['type'] == 'audio'
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
def get_best_video(self):
|
|
178
|
+
"""Return the best video representation (highest resolution, then bandwidth)."""
|
|
179
|
+
videos = self.get_resolutions()
|
|
180
|
+
if not videos:
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
# Sort by (height, width, bandwidth)
|
|
184
|
+
return max(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
|
|
185
|
+
|
|
186
|
+
def get_best_audio(self):
|
|
187
|
+
"""Return the best audio representation (highest bandwidth)."""
|
|
188
|
+
audios = self.get_audios()
|
|
189
|
+
if not audios:
|
|
190
|
+
return None
|
|
191
|
+
return max(audios, key=lambda r: r['bandwidth'])
|
|
192
|
+
|
|
193
|
+
def select_video(self, force_resolution="Best"):
|
|
194
|
+
"""
|
|
195
|
+
Select a video representation based on the requested resolution.
|
|
196
|
+
Returns: (selected_video, list_available_resolution, filter_custom_resolution, downloadable_video)
|
|
197
|
+
"""
|
|
198
|
+
video_reps = self.get_resolutions()
|
|
199
|
+
list_available_resolution = [
|
|
200
|
+
f"{rep['width']}x{rep['height']}" for rep in video_reps
|
|
201
|
+
]
|
|
202
|
+
force_resolution_l = (force_resolution or "Best").lower()
|
|
203
|
+
|
|
204
|
+
if force_resolution_l == "best":
|
|
205
|
+
selected_video = self.get_best_video()
|
|
206
|
+
filter_custom_resolution = "Best"
|
|
207
|
+
|
|
208
|
+
elif force_resolution_l == "worst":
|
|
209
|
+
selected_video = MPDParser.get_worst(video_reps)
|
|
210
|
+
filter_custom_resolution = "Worst"
|
|
211
|
+
|
|
212
|
+
else:
|
|
213
|
+
selected_video = self.get_best_video()
|
|
214
|
+
filter_custom_resolution = "Best"
|
|
215
|
+
|
|
216
|
+
downloadable_video = f"{selected_video['width']}x{selected_video['height']}" if selected_video else "N/A"
|
|
217
|
+
return selected_video, list_available_resolution, filter_custom_resolution, downloadable_video
|
|
218
|
+
|
|
219
|
+
def select_audio(self, preferred_audio_langs=None):
|
|
220
|
+
"""
|
|
221
|
+
Select an audio representation based on preferred languages.
|
|
222
|
+
Returns: (selected_audio, list_available_audio_langs, filter_custom_audio, downloadable_audio)
|
|
223
|
+
"""
|
|
224
|
+
audio_reps = self.get_audios()
|
|
225
|
+
list_available_audio_langs = [
|
|
226
|
+
rep['language'] or "None" for rep in audio_reps
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
selected_audio = None
|
|
230
|
+
filter_custom_audio = "First"
|
|
231
|
+
|
|
232
|
+
if preferred_audio_langs:
|
|
233
|
+
|
|
234
|
+
# Search for the first available language in order of preference
|
|
235
|
+
for lang in preferred_audio_langs:
|
|
236
|
+
for rep in audio_reps:
|
|
237
|
+
if (rep['language'] or "None").lower() == lang.lower():
|
|
238
|
+
selected_audio = rep
|
|
239
|
+
filter_custom_audio = lang
|
|
240
|
+
break
|
|
241
|
+
if selected_audio:
|
|
242
|
+
break
|
|
243
|
+
if not selected_audio:
|
|
244
|
+
selected_audio = self.get_best_audio()
|
|
245
|
+
else:
|
|
246
|
+
selected_audio = self.get_best_audio()
|
|
247
|
+
|
|
248
|
+
downloadable_audio = selected_audio['language'] or "None" if selected_audio else "N/A"
|
|
249
|
+
return selected_audio, list_available_audio_langs, filter_custom_audio, downloadable_audio
|
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
# 25.07.25
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import asyncio
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External libraries
|
|
8
|
+
import httpx
|
|
9
|
+
from tqdm import tqdm
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Internal utilities
|
|
13
|
+
from StreamingCommunity.Util.headers import get_userAgent
|
|
14
|
+
from StreamingCommunity.Lib.M3U8.estimator import M3U8_Ts_Estimator
|
|
15
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
16
|
+
from StreamingCommunity.Util.color import Colors
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Config
|
|
20
|
+
REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry')
|
|
21
|
+
DEFAULT_VIDEO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_video_workers')
|
|
22
|
+
DEFAULT_AUDIO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
|
|
23
|
+
SEGMENT_MAX_TIMEOUT = config_manager.get_int("M3U8_DOWNLOAD", "segment_timeout")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MPD_Segments:
|
|
27
|
+
def __init__(self, tmp_folder: str, representation: dict, pssh: str = None):
|
|
28
|
+
"""
|
|
29
|
+
Initialize MPD_Segments with temp folder, representation, and optional pssh.
|
|
30
|
+
"""
|
|
31
|
+
self.tmp_folder = tmp_folder
|
|
32
|
+
self.selected_representation = representation
|
|
33
|
+
self.pssh = pssh
|
|
34
|
+
self.download_interrupted = False
|
|
35
|
+
self.info_nFailed = 0
|
|
36
|
+
|
|
37
|
+
def get_concat_path(self, output_dir: str = None):
|
|
38
|
+
"""
|
|
39
|
+
Get the path for the concatenated output file.
|
|
40
|
+
"""
|
|
41
|
+
rep_id = self.selected_representation['id']
|
|
42
|
+
return os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.m4s")
|
|
43
|
+
|
|
44
|
+
def download_streams(self, output_dir: str = None):
|
|
45
|
+
"""
|
|
46
|
+
Synchronous wrapper for download_segments, compatible with legacy calls.
|
|
47
|
+
"""
|
|
48
|
+
concat_path = self.get_concat_path(output_dir)
|
|
49
|
+
|
|
50
|
+
# Run async download in sync mode
|
|
51
|
+
try:
|
|
52
|
+
asyncio.run(self.download_segments(output_dir=output_dir))
|
|
53
|
+
|
|
54
|
+
except KeyboardInterrupt:
|
|
55
|
+
self.download_interrupted = True
|
|
56
|
+
print("\n[red]Download interrupted by user (Ctrl+C).")
|
|
57
|
+
|
|
58
|
+
return {
|
|
59
|
+
"concat_path": concat_path,
|
|
60
|
+
"representation_id": self.selected_representation['id'],
|
|
61
|
+
"pssh": self.pssh
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async def download_segments(self, output_dir: str = None, concurrent_downloads: int = 8, description: str = "DASH"):
|
|
65
|
+
"""
|
|
66
|
+
Download and concatenate all segments (including init) asynchronously and in order.
|
|
67
|
+
"""
|
|
68
|
+
rep = self.selected_representation
|
|
69
|
+
rep_id = rep['id']
|
|
70
|
+
segment_urls = rep['segment_urls']
|
|
71
|
+
init_url = rep.get('init_url')
|
|
72
|
+
|
|
73
|
+
os.makedirs(output_dir or self.tmp_folder, exist_ok=True)
|
|
74
|
+
concat_path = os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.m4s")
|
|
75
|
+
|
|
76
|
+
# Determine stream type (video/audio) for progress bar
|
|
77
|
+
stream_type = rep.get('type', description)
|
|
78
|
+
progress_bar = tqdm(
|
|
79
|
+
total=len(segment_urls) + 1,
|
|
80
|
+
desc=f"Downloading {rep_id}",
|
|
81
|
+
bar_format=self._get_bar_format(stream_type),
|
|
82
|
+
mininterval=0.6,
|
|
83
|
+
maxinterval=1.0
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Define semaphore for concurrent downloads
|
|
87
|
+
semaphore = asyncio.Semaphore(concurrent_downloads)
|
|
88
|
+
|
|
89
|
+
# Initialize estimator
|
|
90
|
+
estimator = M3U8_Ts_Estimator(total_segments=len(segment_urls) + 1)
|
|
91
|
+
|
|
92
|
+
results = [None] * len(segment_urls)
|
|
93
|
+
self.downloaded_segments = set()
|
|
94
|
+
self.info_nFailed = 0
|
|
95
|
+
self.download_interrupted = False
|
|
96
|
+
self.info_nRetry = 0
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
async with httpx.AsyncClient(timeout=SEGMENT_MAX_TIMEOUT) as client:
|
|
100
|
+
# Download init segment
|
|
101
|
+
await self._download_init_segment(client, init_url, concat_path, estimator, progress_bar)
|
|
102
|
+
|
|
103
|
+
# Download all segments (first batch)
|
|
104
|
+
await self._download_segments_batch(
|
|
105
|
+
client, segment_urls, results, semaphore, REQUEST_MAX_RETRY, estimator, progress_bar
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Retry failed segments
|
|
109
|
+
await self._retry_failed_segments(
|
|
110
|
+
client, segment_urls, results, semaphore, REQUEST_MAX_RETRY, estimator, progress_bar
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Write all results to file
|
|
114
|
+
self._write_results_to_file(concat_path, results)
|
|
115
|
+
|
|
116
|
+
except KeyboardInterrupt:
|
|
117
|
+
self.download_interrupted = True
|
|
118
|
+
print("\n[red]Download interrupted by user (Ctrl+C).")
|
|
119
|
+
|
|
120
|
+
finally:
|
|
121
|
+
self._cleanup_resources(None, progress_bar)
|
|
122
|
+
|
|
123
|
+
self._verify_download_completion()
|
|
124
|
+
return self._generate_results(stream_type)
|
|
125
|
+
|
|
126
|
+
async def _download_init_segment(self, client, init_url, concat_path, estimator, progress_bar):
|
|
127
|
+
"""
|
|
128
|
+
Download the init segment and update progress/estimator.
|
|
129
|
+
"""
|
|
130
|
+
if not init_url:
|
|
131
|
+
with open(concat_path, 'wb') as outfile:
|
|
132
|
+
pass
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
try:
|
|
136
|
+
headers = {'User-Agent': get_userAgent()}
|
|
137
|
+
response = await client.get(init_url, headers=headers)
|
|
138
|
+
|
|
139
|
+
with open(concat_path, 'wb') as outfile:
|
|
140
|
+
if response.status_code == 200:
|
|
141
|
+
outfile.write(response.content)
|
|
142
|
+
# Update estimator with init segment size
|
|
143
|
+
estimator.add_ts_file(len(response.content))
|
|
144
|
+
|
|
145
|
+
progress_bar.update(1)
|
|
146
|
+
|
|
147
|
+
# Update progress bar with estimated info
|
|
148
|
+
estimator.update_progress_bar(len(response.content), progress_bar)
|
|
149
|
+
|
|
150
|
+
except Exception as e:
|
|
151
|
+
progress_bar.close()
|
|
152
|
+
raise RuntimeError(f"Error downloading init segment: {e}")
|
|
153
|
+
|
|
154
|
+
async def _download_segments_batch(self, client, segment_urls, results, semaphore, max_retry, estimator, progress_bar):
|
|
155
|
+
"""
|
|
156
|
+
Download a batch of segments and update results.
|
|
157
|
+
"""
|
|
158
|
+
async def download_single(url, idx):
|
|
159
|
+
async with semaphore:
|
|
160
|
+
headers = {'User-Agent': get_userAgent()}
|
|
161
|
+
for attempt in range(max_retry):
|
|
162
|
+
try:
|
|
163
|
+
resp = await client.get(url, headers=headers)
|
|
164
|
+
if resp.status_code == 200:
|
|
165
|
+
return idx, resp.content, attempt
|
|
166
|
+
else:
|
|
167
|
+
await asyncio.sleep(1.1 * (2 ** attempt))
|
|
168
|
+
except Exception:
|
|
169
|
+
await asyncio.sleep(1.1 * (2 ** attempt))
|
|
170
|
+
return idx, b'', max_retry
|
|
171
|
+
|
|
172
|
+
# Initial download attempt
|
|
173
|
+
tasks = [download_single(url, i) for i, url in enumerate(segment_urls)]
|
|
174
|
+
|
|
175
|
+
for coro in asyncio.as_completed(tasks):
|
|
176
|
+
try:
|
|
177
|
+
idx, data, nretry = await coro
|
|
178
|
+
results[idx] = data
|
|
179
|
+
if data and len(data) > 0:
|
|
180
|
+
self.downloaded_segments.add(idx)
|
|
181
|
+
else:
|
|
182
|
+
self.info_nFailed += 1
|
|
183
|
+
self.info_nRetry += nretry
|
|
184
|
+
progress_bar.update(1)
|
|
185
|
+
|
|
186
|
+
# Update estimator with segment size
|
|
187
|
+
estimator.add_ts_file(len(data))
|
|
188
|
+
|
|
189
|
+
# Update progress bar with estimated info
|
|
190
|
+
estimator.update_progress_bar(len(data), progress_bar)
|
|
191
|
+
|
|
192
|
+
except KeyboardInterrupt:
|
|
193
|
+
self.download_interrupted = True
|
|
194
|
+
print("\n[red]Download interrupted by user (Ctrl+C).")
|
|
195
|
+
break
|
|
196
|
+
|
|
197
|
+
async def _retry_failed_segments(self, client, segment_urls, results, semaphore, max_retry, estimator, progress_bar):
|
|
198
|
+
"""
|
|
199
|
+
Retry failed segments up to 5 times.
|
|
200
|
+
"""
|
|
201
|
+
max_global_retries = 5
|
|
202
|
+
global_retry_count = 0
|
|
203
|
+
|
|
204
|
+
while self.info_nFailed > 0 and global_retry_count < max_global_retries and not self.download_interrupted:
|
|
205
|
+
failed_indices = [i for i, data in enumerate(results) if not data or len(data) == 0]
|
|
206
|
+
if not failed_indices:
|
|
207
|
+
break
|
|
208
|
+
|
|
209
|
+
print(f"[yellow]Retrying {len(failed_indices)} failed segments (attempt {global_retry_count+1}/{max_global_retries})...")
|
|
210
|
+
async def download_single(url, idx):
|
|
211
|
+
async with semaphore:
|
|
212
|
+
headers = {'User-Agent': get_userAgent()}
|
|
213
|
+
|
|
214
|
+
for attempt in range(max_retry):
|
|
215
|
+
try:
|
|
216
|
+
resp = await client.get(url, headers=headers)
|
|
217
|
+
|
|
218
|
+
if resp.status_code == 200:
|
|
219
|
+
return idx, resp.content, attempt
|
|
220
|
+
else:
|
|
221
|
+
await asyncio.sleep(1.1 * (2 ** attempt))
|
|
222
|
+
|
|
223
|
+
except Exception:
|
|
224
|
+
await asyncio.sleep(1.1 * (2 ** attempt))
|
|
225
|
+
return idx, b'', max_retry
|
|
226
|
+
|
|
227
|
+
retry_tasks = [download_single(segment_urls[i], i) for i in failed_indices]
|
|
228
|
+
|
|
229
|
+
# Reset nFailed for this round
|
|
230
|
+
nFailed_this_round = 0
|
|
231
|
+
for coro in asyncio.as_completed(retry_tasks):
|
|
232
|
+
try:
|
|
233
|
+
idx, data, nretry = await coro
|
|
234
|
+
|
|
235
|
+
if data and len(data) > 0:
|
|
236
|
+
results[idx] = data
|
|
237
|
+
self.downloaded_segments.add(idx)
|
|
238
|
+
else:
|
|
239
|
+
nFailed_this_round += 1
|
|
240
|
+
|
|
241
|
+
self.info_nRetry += nretry
|
|
242
|
+
progress_bar.update(0) # No progress bar increment, already counted
|
|
243
|
+
estimator.add_ts_file(len(data))
|
|
244
|
+
estimator.update_progress_bar(len(data), progress_bar)
|
|
245
|
+
|
|
246
|
+
except KeyboardInterrupt:
|
|
247
|
+
self.download_interrupted = True
|
|
248
|
+
print("\n[red]Download interrupted by user (Ctrl+C).")
|
|
249
|
+
break
|
|
250
|
+
self.info_nFailed = nFailed_this_round
|
|
251
|
+
global_retry_count += 1
|
|
252
|
+
|
|
253
|
+
def _write_results_to_file(self, concat_path, results):
|
|
254
|
+
"""
|
|
255
|
+
Write all downloaded segments to the output file.
|
|
256
|
+
"""
|
|
257
|
+
with open(concat_path, 'ab') as outfile:
|
|
258
|
+
for data in results:
|
|
259
|
+
if data:
|
|
260
|
+
outfile.write(data)
|
|
261
|
+
|
|
262
|
+
def _get_bar_format(self, description: str) -> str:
|
|
263
|
+
"""
|
|
264
|
+
Generate platform-appropriate progress bar format.
|
|
265
|
+
"""
|
|
266
|
+
return (
|
|
267
|
+
f"{Colors.YELLOW}[MPD] ({Colors.CYAN}{description}{Colors.WHITE}): "
|
|
268
|
+
f"{Colors.RED}{{percentage:.2f}}% "
|
|
269
|
+
f"{Colors.MAGENTA}{{bar}} "
|
|
270
|
+
f"{Colors.YELLOW}{{elapsed}}{Colors.WHITE} < {Colors.CYAN}{{remaining}}{Colors.WHITE}{{postfix}}{Colors.WHITE}"
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
def _get_worker_count(self, stream_type: str) -> int:
|
|
274
|
+
"""
|
|
275
|
+
Calculate optimal parallel workers based on stream type and infrastructure.
|
|
276
|
+
"""
|
|
277
|
+
base_workers = {
|
|
278
|
+
'video': DEFAULT_VIDEO_WORKERS,
|
|
279
|
+
'audio': DEFAULT_AUDIO_WORKERS
|
|
280
|
+
}.get(stream_type.lower(), 1)
|
|
281
|
+
return base_workers
|
|
282
|
+
|
|
283
|
+
def _generate_results(self, stream_type: str) -> dict:
|
|
284
|
+
"""
|
|
285
|
+
Package final download results.
|
|
286
|
+
"""
|
|
287
|
+
return {
|
|
288
|
+
'type': stream_type,
|
|
289
|
+
'nFailed': getattr(self, 'info_nFailed', 0),
|
|
290
|
+
'stopped': getattr(self, 'download_interrupted', False)
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
def _verify_download_completion(self) -> None:
|
|
294
|
+
"""
|
|
295
|
+
Validate final download integrity.
|
|
296
|
+
"""
|
|
297
|
+
total = len(self.selected_representation['segment_urls'])
|
|
298
|
+
completed = getattr(self, 'downloaded_segments', set())
|
|
299
|
+
|
|
300
|
+
# If interrupted, skip raising error
|
|
301
|
+
if self.download_interrupted:
|
|
302
|
+
return
|
|
303
|
+
|
|
304
|
+
if total == 0:
|
|
305
|
+
return
|
|
306
|
+
|
|
307
|
+
if len(completed) / total < 0.999:
|
|
308
|
+
missing = sorted(set(range(total)) - completed)
|
|
309
|
+
raise RuntimeError(f"Download incomplete ({len(completed)/total:.1%}). Missing segments: {missing}")
|
|
310
|
+
|
|
311
|
+
def _cleanup_resources(self, writer_thread, progress_bar: tqdm) -> None:
|
|
312
|
+
"""
|
|
313
|
+
Ensure resource cleanup and final reporting.
|
|
314
|
+
"""
|
|
315
|
+
progress_bar.close()
|
|
316
|
+
if getattr(self, 'info_nFailed', 0) > 0:
|
|
317
|
+
self._display_error_summary()
|
|
318
|
+
|
|
319
|
+
self.buffer = {}
|
|
320
|
+
self.expected_index = 0
|
|
321
|
+
|
|
322
|
+
def _display_error_summary(self) -> None:
|
|
323
|
+
"""
|
|
324
|
+
Generate final error report.
|
|
325
|
+
"""
|
|
326
|
+
print(f"\n[cyan]Retry Summary: "
|
|
327
|
+
f"[white]Max retries: [green]{getattr(self, 'info_maxRetry', 0)} "
|
|
328
|
+
f"[white]Total retries: [green]{getattr(self, 'info_nRetry', 0)} "
|
|
329
|
+
f"[white]Failed segments: [red]{getattr(self, 'info_nFailed', 0)}")
|
|
330
|
+
|
|
331
|
+
if getattr(self, 'info_nRetry', 0) > len(self.selected_representation['segment_urls']) * 0.3:
|
|
332
|
+
print("[yellow]Warning: High retry count detected. Consider reducing worker count in config.")
|
|
@@ -17,7 +17,7 @@ from rich.panel import Panel
|
|
|
17
17
|
# Internal utilities
|
|
18
18
|
from StreamingCommunity.Util.config_json import config_manager
|
|
19
19
|
from StreamingCommunity.Util.headers import get_userAgent
|
|
20
|
-
from StreamingCommunity.Util.os import
|
|
20
|
+
from StreamingCommunity.Util.os import os_manager, internet_manager
|
|
21
21
|
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
|
|
22
22
|
|
|
23
23
|
|
|
@@ -33,15 +33,12 @@ from .segments import M3U8_Segments
|
|
|
33
33
|
|
|
34
34
|
|
|
35
35
|
# Config
|
|
36
|
-
ENABLE_AUDIO = config_manager.get_bool('M3U8_DOWNLOAD', 'download_audio')
|
|
37
36
|
ENABLE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'download_subtitle')
|
|
38
37
|
DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
|
|
39
38
|
DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
|
|
40
|
-
MERGE_AUDIO = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_audio')
|
|
41
39
|
MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
|
|
42
40
|
CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
|
|
43
41
|
FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_PARSER', 'force_resolution')).strip().lower()
|
|
44
|
-
GET_ONLY_LINK = config_manager.get_bool('M3U8_PARSER', 'get_only_link')
|
|
45
42
|
RETRY_LIMIT = config_manager.get_int('REQUESTS', 'max_retry')
|
|
46
43
|
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
|
|
47
44
|
TELEGRAM_BOT = config_manager.get_bool('DEFAULT', 'telegram_bot')
|
|
@@ -66,6 +63,7 @@ class HLSClient:
|
|
|
66
63
|
Response content/text or None if all retries fail
|
|
67
64
|
"""
|
|
68
65
|
client = httpx.Client(headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
|
|
66
|
+
|
|
69
67
|
for attempt in range(RETRY_LIMIT):
|
|
70
68
|
try:
|
|
71
69
|
response = client.get(url)
|
|
@@ -96,11 +94,6 @@ class PathManager:
|
|
|
96
94
|
Ensures output path is valid and follows expected format.
|
|
97
95
|
Creates a hash-based filename if no path is provided.
|
|
98
96
|
"""
|
|
99
|
-
if not path:
|
|
100
|
-
root = config_manager.get('OUT_FOLDER', 'root_path')
|
|
101
|
-
hash_name = compute_sha1_hash(self.m3u8_url) + ".mp4"
|
|
102
|
-
return os.path.join(root, "undefined", hash_name)
|
|
103
|
-
|
|
104
97
|
if not path.endswith(".mp4"):
|
|
105
98
|
path += ".mp4"
|
|
106
99
|
|
|
@@ -172,12 +165,11 @@ class M3U8Manager:
|
|
|
172
165
|
logging.error("Resolution not recognized.")
|
|
173
166
|
self.video_url, self.video_res = self.parser._video.get_best_uri()
|
|
174
167
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
self.
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
]
|
|
168
|
+
# Audio info
|
|
169
|
+
self.audio_streams = [
|
|
170
|
+
s for s in (self.parser._audio.get_all_uris_and_names() or [])
|
|
171
|
+
if s.get('language') in DOWNLOAD_SPECIFIC_AUDIO
|
|
172
|
+
]
|
|
181
173
|
|
|
182
174
|
self.sub_streams = []
|
|
183
175
|
if ENABLE_SUBTITLE:
|
|
@@ -309,8 +301,8 @@ class DownloadManager:
|
|
|
309
301
|
Downloads all selected streams (video, audio, subtitles).
|
|
310
302
|
"""
|
|
311
303
|
return_stopped = False
|
|
312
|
-
|
|
313
304
|
video_file = os.path.join(self.temp_dir, 'video', '0.ts')
|
|
305
|
+
|
|
314
306
|
if not os.path.exists(video_file):
|
|
315
307
|
if self.download_video(video_url):
|
|
316
308
|
if not return_stopped:
|
|
@@ -375,7 +367,7 @@ class MergeManager:
|
|
|
375
367
|
)
|
|
376
368
|
|
|
377
369
|
else:
|
|
378
|
-
if
|
|
370
|
+
if self.audio_streams:
|
|
379
371
|
audio_tracks = [{
|
|
380
372
|
'path': os.path.join(self.temp_dir, 'audio', a['language'], '0.ts'),
|
|
381
373
|
'name': a['language']
|
|
@@ -445,19 +437,7 @@ class HLS_Downloader:
|
|
|
445
437
|
if TELEGRAM_BOT:
|
|
446
438
|
bot.send_message(f"Contenuto già scaricato!", None)
|
|
447
439
|
return response
|
|
448
|
-
|
|
449
|
-
if GET_ONLY_LINK:
|
|
450
|
-
console.print(f"URL: [bold red]{self.m3u8_url}[/bold red]")
|
|
451
|
-
return {
|
|
452
|
-
'path': None,
|
|
453
|
-
'url': self.m3u8_url,
|
|
454
|
-
'is_master': getattr(self.m3u8_manager, 'is_master', None),
|
|
455
|
-
'msg': None,
|
|
456
|
-
'error': None,
|
|
457
|
-
'stopped': True
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
|
|
440
|
+
|
|
461
441
|
self.path_manager.setup_directories()
|
|
462
442
|
|
|
463
443
|
# Parse M3U8 and determine if it's a master playlist
|