StreamingCommunity 3.3.9__py3-none-any.whl → 3.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (60) hide show
  1. StreamingCommunity/Api/Player/hdplayer.py +0 -5
  2. StreamingCommunity/Api/Player/mediapolisvod.py +4 -13
  3. StreamingCommunity/Api/Player/supervideo.py +3 -8
  4. StreamingCommunity/Api/Player/sweetpixel.py +1 -9
  5. StreamingCommunity/Api/Player/vixcloud.py +5 -16
  6. StreamingCommunity/Api/Site/altadefinizione/film.py +4 -15
  7. StreamingCommunity/Api/Site/altadefinizione/site.py +2 -7
  8. StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py +2 -7
  9. StreamingCommunity/Api/Site/animeunity/site.py +9 -24
  10. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +11 -27
  11. StreamingCommunity/Api/Site/animeworld/film.py +4 -2
  12. StreamingCommunity/Api/Site/animeworld/site.py +3 -11
  13. StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py +1 -4
  14. StreamingCommunity/Api/Site/crunchyroll/film.py +4 -5
  15. StreamingCommunity/Api/Site/crunchyroll/series.py +2 -3
  16. StreamingCommunity/Api/Site/crunchyroll/site.py +2 -9
  17. StreamingCommunity/Api/Site/crunchyroll/util/ScrapeSerie.py +5 -27
  18. StreamingCommunity/Api/Site/crunchyroll/util/get_license.py +11 -26
  19. StreamingCommunity/Api/Site/guardaserie/site.py +4 -12
  20. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +3 -10
  21. StreamingCommunity/Api/Site/mediasetinfinity/film.py +11 -12
  22. StreamingCommunity/Api/Site/mediasetinfinity/series.py +1 -2
  23. StreamingCommunity/Api/Site/mediasetinfinity/site.py +3 -11
  24. StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +39 -50
  25. StreamingCommunity/Api/Site/mediasetinfinity/util/fix_mpd.py +3 -3
  26. StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py +7 -25
  27. StreamingCommunity/Api/Site/raiplay/film.py +6 -7
  28. StreamingCommunity/Api/Site/raiplay/series.py +0 -2
  29. StreamingCommunity/Api/Site/raiplay/site.py +3 -11
  30. StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py +4 -11
  31. StreamingCommunity/Api/Site/raiplay/util/get_license.py +3 -12
  32. StreamingCommunity/Api/Site/streamingcommunity/film.py +5 -16
  33. StreamingCommunity/Api/Site/streamingcommunity/site.py +3 -22
  34. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +11 -26
  35. StreamingCommunity/Api/Site/streamingwatch/__init__.py +1 -0
  36. StreamingCommunity/Api/Site/streamingwatch/film.py +4 -2
  37. StreamingCommunity/Api/Site/streamingwatch/series.py +1 -1
  38. StreamingCommunity/Api/Site/streamingwatch/site.py +4 -18
  39. StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +0 -3
  40. StreamingCommunity/Api/Template/config_loader.py +0 -7
  41. StreamingCommunity/Lib/Downloader/DASH/decrypt.py +54 -1
  42. StreamingCommunity/Lib/Downloader/DASH/downloader.py +131 -54
  43. StreamingCommunity/Lib/Downloader/DASH/parser.py +2 -3
  44. StreamingCommunity/Lib/Downloader/DASH/segments.py +66 -54
  45. StreamingCommunity/Lib/Downloader/HLS/downloader.py +31 -50
  46. StreamingCommunity/Lib/Downloader/HLS/segments.py +23 -28
  47. StreamingCommunity/Lib/FFmpeg/capture.py +37 -5
  48. StreamingCommunity/Lib/FFmpeg/command.py +32 -90
  49. StreamingCommunity/Lib/TMBD/tmdb.py +2 -4
  50. StreamingCommunity/TelegramHelp/config.json +0 -1
  51. StreamingCommunity/Upload/version.py +1 -1
  52. StreamingCommunity/Util/config_json.py +28 -21
  53. StreamingCommunity/Util/http_client.py +28 -0
  54. StreamingCommunity/Util/os.py +16 -6
  55. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/METADATA +1 -3
  56. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/RECORD +60 -60
  57. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/WHEEL +0 -0
  58. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/entry_points.txt +0 -0
  59. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/licenses/LICENSE +0 -0
  60. {streamingcommunity-3.3.9.dist-info → streamingcommunity-3.4.0.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,6 @@
1
1
  # 25.07.25
2
2
 
3
3
  import os
4
- import time
5
4
  import shutil
6
5
 
7
6
 
@@ -13,7 +12,7 @@ from rich.table import Table
13
12
 
14
13
  # Internal utilities
15
14
  from StreamingCommunity.Util.config_json import config_manager
16
- from StreamingCommunity.Util.os import os_manager, internet_manager
15
+ from StreamingCommunity.Util.os import os_manager, internet_manager, get_wvd_path
17
16
  from StreamingCommunity.Util.http_client import create_client
18
17
  from StreamingCommunity.Util.headers import get_userAgent
19
18
 
@@ -32,11 +31,11 @@ from ...FFmpeg import print_duration_table, join_audios, join_video, join_subtit
32
31
  # Config
33
32
  DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
34
33
  DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
35
- ENABLE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'download_subtitle')
36
34
  MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
37
35
  FILTER_CUSTOM_REOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
38
36
  CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
39
37
  RETRY_LIMIT = config_manager.get_int('REQUESTS', 'max_retry')
38
+ EXTENSION_OUTPUT = config_manager.get("M3U8_CONVERSION", "extension")
40
39
 
41
40
 
42
41
  # Variable
@@ -44,18 +43,17 @@ console = Console()
44
43
 
45
44
 
46
45
  class DASH_Downloader:
47
- def __init__(self, cdm_device, license_url, mpd_url, mpd_sub_list: list = None, output_path: str = None):
46
+ def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path: str = None):
48
47
  """
49
48
  Initialize the DASH Downloader with necessary parameters.
50
49
 
51
50
  Parameters:
52
- - cdm_device (str): Path to the CDM device for decryption.
53
51
  - license_url (str): URL to obtain the license for decryption.
54
52
  - mpd_url (str): URL of the MPD manifest file.
55
53
  - mpd_sub_list (list): List of subtitle dicts with keys: 'language', 'url', 'format'.
56
54
  - output_path (str): Path to save the final output file.
57
55
  """
58
- self.cdm_device = cdm_device
56
+ self.cdm_device = get_wvd_path()
59
57
  self.license_url = license_url
60
58
  self.mpd_url = mpd_url
61
59
  self.mpd_sub_list = mpd_sub_list or []
@@ -196,49 +194,28 @@ class DASH_Downloader:
196
194
  Download subtitle files based on configuration with retry mechanism.
197
195
  Returns True if successful or if no subtitles to download, False on critical error.
198
196
  """
199
- if not ENABLE_SUBTITLE or not self.selected_subs:
200
- return True
201
-
202
- headers = {'User-Agent': get_userAgent()}
203
- client = create_client(headers=headers)
197
+ client = create_client(headers={'User-Agent': get_userAgent()})
204
198
 
205
199
  for sub in self.selected_subs:
206
- language = sub.get('language', 'unknown')
207
- url = sub.get('url')
208
- fmt = sub.get('format', 'vtt')
209
-
210
- if not url:
211
- console.print(f"[yellow]Warning: No URL for subtitle {language}[/yellow]")
212
- continue
213
-
214
- # Retry mechanism for downloading subtitles
215
- success = False
216
- for attempt in range(RETRY_LIMIT):
217
- try:
218
- # Download subtitle
219
- response = client.get(url)
220
- response.raise_for_status()
221
-
222
- # Save subtitle file
223
- sub_filename = f"{language}.{fmt}"
224
- sub_path = os.path.join(self.subs_dir, sub_filename)
225
-
226
- with open(sub_path, 'wb') as f:
227
- f.write(response.content)
228
-
229
- success = True
230
- break
200
+ try:
201
+ language = sub.get('language', 'unknown')
202
+ fmt = sub.get('format', 'vtt')
203
+
204
+ # Download subtitle
205
+ response = client.get(sub.get('url'))
206
+ response.raise_for_status()
207
+
208
+ # Save subtitle file and make request
209
+ sub_filename = f"{language}.{fmt}"
210
+ sub_path = os.path.join(self.subs_dir, sub_filename)
211
+
212
+ with open(sub_path, 'wb') as f:
213
+ f.write(response.content)
231
214
 
232
- except Exception as e:
233
- if attempt < RETRY_LIMIT - 1:
234
- console.print(f"[yellow]Attempt {attempt + 1}/{RETRY_LIMIT} failed for subtitle {language}: {e}. Retrying...[/yellow]")
235
- time.sleep(1.5 ** attempt)
236
- else:
237
- console.print(f"[yellow]Warning: Failed to download subtitle {language} after {RETRY_LIMIT} attempts: {e}[/yellow]")
215
+ except Exception as e:
216
+ console.print(f"[red]Error downloading subtitle {language}: {e}[/red]")
217
+ return False
238
218
 
239
- if not success:
240
- continue
241
-
242
219
  return True
243
220
 
244
221
  def download_and_decrypt(self, custom_headers=None, custom_payload=None):
@@ -256,7 +233,6 @@ class DASH_Downloader:
256
233
 
257
234
  # Fetch keys immediately after obtaining PSSH
258
235
  if not self.parser.pssh:
259
- console.print("[red]No PSSH found: segments are not encrypted, skipping decryption.")
260
236
  self.download_segments(clear=True)
261
237
  return True
262
238
 
@@ -316,7 +292,7 @@ class DASH_Downloader:
316
292
  # Decrypt video
317
293
  decrypted_path = os.path.join(self.decrypted_dir, "video.mp4")
318
294
  result_path = decrypt_with_mp4decrypt(
319
- encrypted_path, KID, KEY, output_path=decrypted_path
295
+ "Video", encrypted_path, KID, KEY, output_path=decrypted_path
320
296
  )
321
297
 
322
298
  if not result_path:
@@ -365,7 +341,7 @@ class DASH_Downloader:
365
341
  # Decrypt audio
366
342
  decrypted_path = os.path.join(self.decrypted_dir, "audio.mp4")
367
343
  result_path = decrypt_with_mp4decrypt(
368
- encrypted_path, KID, KEY, output_path=decrypted_path
344
+ f"Audio {audio_language}", encrypted_path, KID, KEY, output_path=decrypted_path
369
345
  )
370
346
 
371
347
  if not result_path:
@@ -381,9 +357,110 @@ class DASH_Downloader:
381
357
  return True
382
358
 
383
359
  def download_segments(self, clear=False):
384
- # Download segments and concatenate them
385
- # clear=True: no decryption needed
386
- pass
360
+ """
361
+ Download video/audio segments without decryption (for clear content).
362
+
363
+ Parameters:
364
+ clear (bool): If True, content is not encrypted and doesn't need decryption
365
+ """
366
+ if not clear:
367
+ console.print("[yellow]Warning: download_segments called with clear=False[/yellow]")
368
+ return False
369
+
370
+ video_segments_count = 0
371
+
372
+ # Download subtitles
373
+ self.download_subtitles()
374
+
375
+ # Download video
376
+ video_rep = self.get_representation_by_type("video")
377
+ if video_rep:
378
+ encrypted_path = os.path.join(self.encrypted_dir, f"{video_rep['id']}_encrypted.m4s")
379
+
380
+ # If m4s file doesn't exist, start downloading
381
+ if not os.path.exists(encrypted_path):
382
+ video_downloader = MPD_Segments(
383
+ tmp_folder=self.encrypted_dir,
384
+ representation=video_rep,
385
+ pssh=self.parser.pssh
386
+ )
387
+
388
+ try:
389
+ result = video_downloader.download_streams(description="Video")
390
+
391
+ # Store the video segment count for limiting audio
392
+ video_segments_count = video_downloader.get_segments_count()
393
+
394
+ # Check for interruption or failure
395
+ if result.get("stopped"):
396
+ self.stopped = True
397
+ self.error = "Download interrupted"
398
+ return False
399
+
400
+ if result.get("nFailed", 0) > 0:
401
+ self.error = f"Failed segments: {result['nFailed']}"
402
+ return False
403
+
404
+ except Exception as ex:
405
+ self.error = str(ex)
406
+ console.print(f"[red]Error downloading video: {ex}[/red]")
407
+ return False
408
+
409
+ # NO DECRYPTION: just copy/move to decrypted folder
410
+ decrypted_path = os.path.join(self.decrypted_dir, "video.mp4")
411
+ if os.path.exists(encrypted_path) and not os.path.exists(decrypted_path):
412
+ shutil.copy2(encrypted_path, decrypted_path)
413
+
414
+ else:
415
+ self.error = "No video found"
416
+ console.print(f"[red]{self.error}[/red]")
417
+ return False
418
+
419
+ # Download audio with segment limiting
420
+ audio_rep = self.get_representation_by_type("audio")
421
+ if audio_rep:
422
+ encrypted_path = os.path.join(self.encrypted_dir, f"{audio_rep['id']}_encrypted.m4s")
423
+
424
+ # If m4s file doesn't exist, start downloading
425
+ if not os.path.exists(encrypted_path):
426
+ audio_language = audio_rep.get('language', 'Unknown')
427
+
428
+ audio_downloader = MPD_Segments(
429
+ tmp_folder=self.encrypted_dir,
430
+ representation=audio_rep,
431
+ pssh=self.parser.pssh,
432
+ limit_segments=video_segments_count if video_segments_count > 0 else None
433
+ )
434
+
435
+ try:
436
+ result = audio_downloader.download_streams(description=f"Audio {audio_language}")
437
+
438
+ # Check for interruption or failure
439
+ if result.get("stopped"):
440
+ self.stopped = True
441
+ self.error = "Download interrupted"
442
+ return False
443
+
444
+ if result.get("nFailed", 0) > 0:
445
+ self.error = f"Failed segments: {result['nFailed']}"
446
+ return False
447
+
448
+ except Exception as ex:
449
+ self.error = str(ex)
450
+ console.print(f"[red]Error downloading audio: {ex}[/red]")
451
+ return False
452
+
453
+ # NO DECRYPTION: just copy/move to decrypted folder
454
+ decrypted_path = os.path.join(self.decrypted_dir, "audio.mp4")
455
+ if os.path.exists(encrypted_path) and not os.path.exists(decrypted_path):
456
+ shutil.copy2(encrypted_path, decrypted_path)
457
+
458
+ else:
459
+ self.error = "No audio found"
460
+ console.print(f"[red]{self.error}[/red]")
461
+ return False
462
+
463
+ return True
387
464
 
388
465
  def finalize_output(self):
389
466
  """
@@ -415,8 +492,8 @@ class DASH_Downloader:
415
492
  console.print("[red]Video file missing, cannot export[/red]")
416
493
  return None
417
494
 
418
- # Merge subtitles if enabled and available
419
- if MERGE_SUBTITLE and ENABLE_SUBTITLE and self.selected_subs:
495
+ # Merge subtitles if available
496
+ if MERGE_SUBTITLE and self.selected_subs:
420
497
 
421
498
  # Check which subtitle files actually exist
422
499
  existing_sub_tracks = []
@@ -455,7 +532,7 @@ class DASH_Downloader:
455
532
 
456
533
  # Handle failed sync case
457
534
  if use_shortest:
458
- new_filename = output_file.replace(".mp4", "_failed_sync.mp4")
535
+ new_filename = output_file.replace(EXTENSION_OUTPUT, f"_failed_sync{EXTENSION_OUTPUT}")
459
536
  if os.path.exists(output_file):
460
537
  os.rename(output_file, new_filename)
461
538
  output_file = new_filename
@@ -1,6 +1,7 @@
1
1
  # 25.07.25
2
2
 
3
3
  import re
4
+ import logging
4
5
  from urllib.parse import urljoin
5
6
  import xml.etree.ElementTree as ET
6
7
  from typing import List, Dict, Optional, Tuple, Any
@@ -456,6 +457,7 @@ class MPDParser:
456
457
  )
457
458
 
458
459
  response.raise_for_status()
460
+ logging.info(f"Successfully fetched MPD: {response.content}")
459
461
  self.root = ET.fromstring(response.content)
460
462
  break
461
463
 
@@ -480,9 +482,6 @@ class MPDParser:
480
482
  self.pssh = pssh_element.text
481
483
  break
482
484
 
483
- if not self.pssh:
484
- console.print("[bold red]PSSH not found in MPD![/bold red]")
485
-
486
485
  def _parse_representations(self) -> None:
487
486
  """Parse all representations from the MPD"""
488
487
  base_url = self._get_initial_base_url()
@@ -24,6 +24,7 @@ DEFAULT_VIDEO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_video_w
24
24
  DEFAULT_AUDIO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
25
25
  SEGMENT_MAX_TIMEOUT = config_manager.get_int("M3U8_DOWNLOAD", "segment_timeout")
26
26
  LIMIT_SEGMENT = config_manager.get_int('M3U8_DOWNLOAD', 'limit_segment')
27
+ ENABLE_RETRY = config_manager.get_bool('M3U8_DOWNLOAD', 'enable_retry')
27
28
 
28
29
 
29
30
  # Variable
@@ -50,7 +51,8 @@ class MPD_Segments:
50
51
  self.limit_segments = LIMIT_SEGMENT if LIMIT_SEGMENT > 0 else None
51
52
  else:
52
53
  self.limit_segments = limit_segments
53
-
54
+
55
+ self.enable_retry = ENABLE_RETRY
54
56
  self.download_interrupted = False
55
57
  self.info_nFailed = 0
56
58
 
@@ -63,8 +65,8 @@ class MPD_Segments:
63
65
  self._last_progress_update = 0
64
66
  self._progress_update_interval = 0.1
65
67
 
66
- # Segment tracking
67
- self.segment_files = {}
68
+ # Segment tracking - store only metadata, not content
69
+ self.segment_status = {} # {idx: {'downloaded': bool, 'size': int}}
68
70
  self.segments_lock = asyncio.Lock()
69
71
 
70
72
  def get_concat_path(self, output_dir: str = None):
@@ -114,7 +116,7 @@ class MPD_Segments:
114
116
 
115
117
  async def download_segments(self, output_dir: str = None, concurrent_downloads: int = None, description: str = "DASH"):
116
118
  """
117
- Download and concatenate all segments (including init) asynchronously and in order.
119
+ Download segments to temporary files, then concatenate them in order.
118
120
 
119
121
  Parameters:
120
122
  - output_dir (str): Output directory for segments
@@ -150,7 +152,7 @@ class MPD_Segments:
150
152
  # Initialize estimator
151
153
  estimator = M3U8_Ts_Estimator(total_segments=len(segment_urls) + 1)
152
154
 
153
- self.segment_files = {}
155
+ self.segment_status = {}
154
156
  self.downloaded_segments = set()
155
157
  self.info_nFailed = 0
156
158
  self.download_interrupted = False
@@ -166,18 +168,19 @@ class MPD_Segments:
166
168
  # Download init segment
167
169
  await self._download_init_segment(client, init_url, concat_path, estimator, progress_bar)
168
170
 
169
- # Download all segments (first batch) - writes to temp files
171
+ # Download all segments to temp files
170
172
  await self._download_segments_batch(
171
173
  client, segment_urls, temp_dir, semaphore, REQUEST_MAX_RETRY, estimator, progress_bar
172
174
  )
173
175
 
174
- # Retry failed segments
175
- await self._retry_failed_segments(
176
- client, segment_urls, temp_dir, semaphore, REQUEST_MAX_RETRY, estimator, progress_bar
177
- )
176
+ # Retry failed segments only if enabled
177
+ if self.enable_retry:
178
+ await self._retry_failed_segments(
179
+ client, segment_urls, temp_dir, semaphore, REQUEST_MAX_RETRY, estimator, progress_bar
180
+ )
178
181
 
179
- # Concatenate all segment files in order
180
- await self._concatenate_segments(concat_path, len(segment_urls))
182
+ # Concatenate all segments IN ORDER
183
+ await self._concatenate_segments_in_order(temp_dir, concat_path, len(segment_urls))
181
184
 
182
185
  except KeyboardInterrupt:
183
186
  self.download_interrupted = True
@@ -225,29 +228,33 @@ class MPD_Segments:
225
228
 
226
229
  async def _download_segments_batch(self, client, segment_urls, temp_dir, semaphore, max_retry, estimator, progress_bar):
227
230
  """
228
- Download a batch of segments and write them to temp files immediately.
231
+ Download segments to temporary files - write immediately to disk, not memory.
229
232
  """
230
233
  async def download_single(url, idx):
231
234
  async with semaphore:
232
235
  headers = {'User-Agent': get_userAgent()}
236
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
233
237
 
234
238
  for attempt in range(max_retry):
235
239
  if self.download_interrupted:
236
- return idx, False, attempt
240
+ return idx, False, attempt, 0
237
241
 
238
242
  try:
239
243
  timeout = min(SEGMENT_MAX_TIMEOUT, 10 + attempt * 3)
240
244
  resp = await client.get(url, headers=headers, follow_redirects=True, timeout=timeout)
241
245
 
242
- # Write to temp file immediately
246
+ # Write directly to temp file
243
247
  if resp.status_code == 200:
244
- temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
248
+ content_size = len(resp.content)
249
+ with open(temp_file, 'wb') as f:
250
+ f.write(resp.content)
251
+
252
+ # Update status
245
253
  async with self.segments_lock:
246
- with open(temp_file, 'wb') as f:
247
- f.write(resp.content)
248
- self.segment_files[idx] = temp_file
254
+ self.segment_status[idx] = {'downloaded': True, 'size': content_size}
255
+ self.downloaded_segments.add(idx)
249
256
 
250
- return idx, True, attempt, len(resp.content)
257
+ return idx, True, attempt, content_size
251
258
  else:
252
259
  if attempt < 2:
253
260
  sleep_time = 0.5 + attempt * 0.5
@@ -258,19 +265,21 @@ class MPD_Segments:
258
265
  except Exception:
259
266
  sleep_time = min(2.0, 1.1 * (2 ** attempt))
260
267
  await asyncio.sleep(sleep_time)
268
+
269
+ # Mark as failed
270
+ async with self.segments_lock:
271
+ self.segment_status[idx] = {'downloaded': False, 'size': 0}
261
272
 
262
273
  return idx, False, max_retry, 0
263
274
 
264
- # Initial download attempt
275
+ # Download all segments concurrently
265
276
  tasks = [download_single(url, i) for i, url in enumerate(segment_urls)]
266
277
 
267
278
  for coro in asyncio.as_completed(tasks):
268
279
  try:
269
280
  idx, success, nretry, size = await coro
270
281
 
271
- if success:
272
- self.downloaded_segments.add(idx)
273
- else:
282
+ if not success:
274
283
  self.info_nFailed += 1
275
284
 
276
285
  if nretry > self.info_maxRetry:
@@ -283,7 +292,7 @@ class MPD_Segments:
283
292
 
284
293
  except KeyboardInterrupt:
285
294
  self.download_interrupted = True
286
- print("\n[red]Download interrupted by user (Ctrl+C).")
295
+ console.print("\n[red]Download interrupted by user (Ctrl+C).")
287
296
  break
288
297
 
289
298
  async def _retry_failed_segments(self, client, segment_urls, temp_dir, semaphore, max_retry, estimator, progress_bar):
@@ -301,6 +310,7 @@ class MPD_Segments:
301
310
  async def download_single(url, idx):
302
311
  async with semaphore:
303
312
  headers = {'User-Agent': get_userAgent()}
313
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
304
314
 
305
315
  for attempt in range(max_retry):
306
316
  if self.download_interrupted:
@@ -310,15 +320,17 @@ class MPD_Segments:
310
320
  timeout = min(SEGMENT_MAX_TIMEOUT, 15 + attempt * 5)
311
321
  resp = await client.get(url, headers=headers, timeout=timeout)
312
322
 
313
- # Write to temp file immediately
323
+ # Write directly to temp file
314
324
  if resp.status_code == 200:
315
- temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
325
+ content_size = len(resp.content)
326
+ with open(temp_file, 'wb') as f:
327
+ f.write(resp.content)
328
+
316
329
  async with self.segments_lock:
317
- with open(temp_file, 'wb') as f:
318
- f.write(resp.content)
319
- self.segment_files[idx] = temp_file
330
+ self.segment_status[idx] = {'downloaded': True, 'size': content_size}
331
+ self.downloaded_segments.add(idx)
320
332
 
321
- return idx, True, attempt, len(resp.content)
333
+ return idx, True, attempt, content_size
322
334
  else:
323
335
  await asyncio.sleep(1.5 * (2 ** attempt))
324
336
 
@@ -334,9 +346,7 @@ class MPD_Segments:
334
346
  try:
335
347
  idx, success, nretry, size = await coro
336
348
 
337
- if success:
338
- self.downloaded_segments.add(idx)
339
- else:
349
+ if not success:
340
350
  nFailed_this_round += 1
341
351
 
342
352
  if nretry > self.info_maxRetry:
@@ -355,20 +365,24 @@ class MPD_Segments:
355
365
  self.info_nFailed = nFailed_this_round
356
366
  global_retry_count += 1
357
367
 
358
- async def _concatenate_segments(self, concat_path, total_segments):
368
+ async def _concatenate_segments_in_order(self, temp_dir, concat_path, total_segments):
359
369
  """
360
- Concatenate all segment files in order to the final output file.
361
- Skip missing segments and continue with available ones.
370
+ Concatenate all segment files IN ORDER to the final output file.
362
371
  """
363
- successful_segments = 0
364
372
  with open(concat_path, 'ab') as outfile:
365
373
  for idx in range(total_segments):
366
- if idx in self.segment_files:
367
- temp_file = self.segment_files[idx]
368
- if os.path.exists(temp_file):
369
- with open(temp_file, 'rb') as infile:
370
- outfile.write(infile.read())
371
- successful_segments += 1
374
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
375
+
376
+ # Only concatenate successfully downloaded segments
377
+ if idx in self.downloaded_segments and os.path.exists(temp_file):
378
+ with open(temp_file, 'rb') as infile:
379
+
380
+ # Read and write in chunks to avoid memory issues
381
+ while True:
382
+ chunk = infile.read(8192) # 8KB chunks
383
+ if not chunk:
384
+ break
385
+ outfile.write(chunk)
372
386
 
373
387
  def _get_bar_format(self, description: str) -> str:
374
388
  """
@@ -435,7 +449,8 @@ class MPD_Segments:
435
449
  # Delete temp segment files
436
450
  if temp_dir and os.path.exists(temp_dir):
437
451
  try:
438
- for temp_file in self.segment_files.values():
452
+ for idx in range(len(self.selected_representation.get('segment_urls', []))):
453
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
439
454
  if os.path.exists(temp_file):
440
455
  os.remove(temp_file)
441
456
  os.rmdir(temp_dir)
@@ -447,7 +462,7 @@ class MPD_Segments:
447
462
  self._display_error_summary()
448
463
 
449
464
  # Clear memory
450
- self.segment_files = {}
465
+ self.segment_status = {}
451
466
 
452
467
  def _display_error_summary(self) -> None:
453
468
  """
@@ -455,11 +470,8 @@ class MPD_Segments:
455
470
  """
456
471
  total_segments = len(self.selected_representation.get('segment_urls', []))
457
472
  failed_indices = [i for i in range(total_segments) if i not in self.downloaded_segments]
458
- successful_segments = len(self.downloaded_segments)
459
-
460
- console.print(f"[green]Download Summary: "
461
- f"[cyan]Successful: [red]{successful_segments}/{total_segments} "
462
- f"[cyan]Max retries: [red]{getattr(self, 'info_maxRetry', 0)} "
463
- f"[cyan]Total retries: [red]{getattr(self, 'info_nRetry', 0)} "
464
- f"[cyan]Failed segments: [red]{getattr(self, 'info_nFailed', 0)} "
465
- f"[cyan]Failed indices: [red]{failed_indices} \n")
473
+
474
+ console.print(f" [cyan]Max retries: [red]{getattr(self, 'info_maxRetry', 0)} [white]| "
475
+ f"[cyan]Total retries: [red]{getattr(self, 'info_nRetry', 0)} [white]| "
476
+ f"[cyan]Failed segments: [red]{getattr(self, 'info_nFailed', 0)} [white]| "
477
+ f"[cyan]Failed indices: [red]{failed_indices}")