StreamingCommunity 3.0.1__py3-none-any.whl → 3.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/hdplayer.py +65 -0
- StreamingCommunity/Api/Player/mixdrop.py +145 -0
- StreamingCommunity/Api/Site/1337xx/site.py +1 -1
- StreamingCommunity/Api/Site/altadefinizione/site.py +1 -1
- StreamingCommunity/Api/Site/animeunity/site.py +2 -1
- StreamingCommunity/Api/Site/animeworld/site.py +1 -1
- StreamingCommunity/Api/Site/ddlstreamitaly/site.py +1 -1
- StreamingCommunity/Api/Site/guardaserie/site.py +1 -1
- StreamingCommunity/Api/Site/raiplay/site.py +2 -2
- StreamingCommunity/Api/Site/streamingcommunity/series.py +2 -2
- StreamingCommunity/Api/Site/streamingcommunity/site.py +1 -1
- StreamingCommunity/Api/Site/streamingwatch/__init__.py +95 -0
- StreamingCommunity/Api/Site/streamingwatch/film.py +61 -0
- StreamingCommunity/Api/Site/streamingwatch/series.py +160 -0
- StreamingCommunity/Api/Site/streamingwatch/site.py +111 -0
- StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +118 -0
- StreamingCommunity/Lib/Proxies/proxy.py +232 -0
- StreamingCommunity/Upload/version.py +1 -1
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/METADATA +16 -2
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/RECORD +24 -17
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/WHEEL +1 -1
- StreamingCommunity/Api/Player/maxstream.py +0 -140
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/entry_points.txt +0 -0
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/licenses/LICENSE +0 -0
- {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# 29.04.25
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External library
|
|
7
|
+
import httpx
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Internal utilities
|
|
12
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
13
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Variable
|
|
17
|
+
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class VideoSource:
|
|
21
|
+
def __init__(self, proxy=None):
|
|
22
|
+
self.client = httpx.Client(headers=get_headers(), timeout=MAX_TIMEOUT, proxy=proxy)
|
|
23
|
+
|
|
24
|
+
def extractLinkHdPlayer(self, response):
|
|
25
|
+
"""Extract iframe source from the page."""
|
|
26
|
+
soup = BeautifulSoup(response.content, 'html.parser')
|
|
27
|
+
iframes = soup.find_all("iframe")
|
|
28
|
+
if iframes:
|
|
29
|
+
return iframes[0].get('data-lazy-src')
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
def get_m3u8_url(self, page_url):
|
|
33
|
+
"""
|
|
34
|
+
Extract m3u8 URL from hdPlayer page.
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
# Get the page content
|
|
38
|
+
response = self.client.get(page_url)
|
|
39
|
+
|
|
40
|
+
# Extract HDPlayer iframe URL
|
|
41
|
+
iframe_url = self.extractLinkHdPlayer(response)
|
|
42
|
+
if not iframe_url:
|
|
43
|
+
return None
|
|
44
|
+
|
|
45
|
+
# Get HDPlayer page content
|
|
46
|
+
response_hdplayer = self.client.get(iframe_url)
|
|
47
|
+
if response_hdplayer.status_code != 200:
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
soup = BeautifulSoup(response_hdplayer.text, 'html.parser')
|
|
51
|
+
|
|
52
|
+
# Find m3u8 URL in scripts
|
|
53
|
+
for script in soup.find_all("script"):
|
|
54
|
+
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', script.text)
|
|
55
|
+
if match:
|
|
56
|
+
return match.group(1)
|
|
57
|
+
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
except Exception as e:
|
|
61
|
+
print(f"Error in HDPlayer: {str(e)}")
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
finally:
|
|
65
|
+
self.client.close()
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# 05.07.24
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External libraries
|
|
8
|
+
import httpx
|
|
9
|
+
import jsbeautifier
|
|
10
|
+
from bs4 import BeautifulSoup
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Internal utilities
|
|
14
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
15
|
+
from StreamingCommunity.Util.headers import get_userAgent
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Variable
|
|
19
|
+
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VideoSource:
|
|
23
|
+
STAYONLINE_BASE_URL = "https://stayonline.pro"
|
|
24
|
+
MIXDROP_BASE_URL = "https://mixdrop.sb"
|
|
25
|
+
|
|
26
|
+
def __init__(self, url: str):
|
|
27
|
+
self.url = url
|
|
28
|
+
self.redirect_url: str | None = None
|
|
29
|
+
self._init_headers()
|
|
30
|
+
|
|
31
|
+
def _init_headers(self) -> None:
|
|
32
|
+
"""Initialize the base headers used for requests."""
|
|
33
|
+
self.headers = {
|
|
34
|
+
'origin': self.STAYONLINE_BASE_URL,
|
|
35
|
+
'user-agent': get_userAgent(),
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
def _get_mixdrop_headers(self) -> dict:
|
|
39
|
+
"""Get headers specifically for MixDrop requests."""
|
|
40
|
+
return {
|
|
41
|
+
'referer': 'https://mixdrop.club/',
|
|
42
|
+
'user-agent': get_userAgent()
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
def get_redirect_url(self) -> str:
|
|
46
|
+
"""Extract the stayonline redirect URL from the initial page."""
|
|
47
|
+
try:
|
|
48
|
+
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
49
|
+
response.raise_for_status()
|
|
50
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
51
|
+
|
|
52
|
+
for link in soup.find_all('a'):
|
|
53
|
+
href = link.get('href')
|
|
54
|
+
if href and 'stayonline' in href:
|
|
55
|
+
self.redirect_url = href
|
|
56
|
+
logging.info(f"Redirect URL: {self.redirect_url}")
|
|
57
|
+
return self.redirect_url
|
|
58
|
+
|
|
59
|
+
raise ValueError("Stayonline URL not found")
|
|
60
|
+
|
|
61
|
+
except Exception as e:
|
|
62
|
+
logging.error(f"Error getting redirect URL: {e}")
|
|
63
|
+
raise
|
|
64
|
+
|
|
65
|
+
def get_link_id(self) -> str:
|
|
66
|
+
"""Extract the link ID from the redirect page."""
|
|
67
|
+
if not self.redirect_url:
|
|
68
|
+
raise ValueError("Redirect URL not set. Call get_redirect_url first.")
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
72
|
+
response.raise_for_status()
|
|
73
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
74
|
+
|
|
75
|
+
for script in soup.find_all('script'):
|
|
76
|
+
match = re.search(r'var\s+linkId\s*=\s*"([^"]+)"', script.text)
|
|
77
|
+
if match:
|
|
78
|
+
return match.group(1)
|
|
79
|
+
|
|
80
|
+
raise ValueError("LinkId not found")
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
logging.error(f"Error getting link ID: {e}")
|
|
84
|
+
raise
|
|
85
|
+
|
|
86
|
+
def get_final_url(self, link_id: str) -> str:
|
|
87
|
+
"""Get the final URL using the link ID."""
|
|
88
|
+
try:
|
|
89
|
+
self.headers['referer'] = f'{self.STAYONLINE_BASE_URL}/l/{link_id}/'
|
|
90
|
+
data = {'id': link_id, 'ref': ''}
|
|
91
|
+
|
|
92
|
+
response = httpx.post(f'{self.STAYONLINE_BASE_URL}/ajax/linkView.php', headers=self.headers, data=data, timeout=MAX_TIMEOUT)
|
|
93
|
+
response.raise_for_status()
|
|
94
|
+
return response.json()['data']['value']
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logging.error(f"Error getting final URL: {e}")
|
|
98
|
+
raise
|
|
99
|
+
|
|
100
|
+
def _extract_video_id(self, final_url: str) -> str:
|
|
101
|
+
"""Extract video ID from the final URL."""
|
|
102
|
+
parts = final_url.split('/')
|
|
103
|
+
if len(parts) < 5:
|
|
104
|
+
raise ValueError("Invalid final URL format")
|
|
105
|
+
return parts[4]
|
|
106
|
+
|
|
107
|
+
def _extract_delivery_url(self, script_text: str) -> str:
|
|
108
|
+
"""Extract delivery URL from beautified JavaScript."""
|
|
109
|
+
beautified = jsbeautifier.beautify(script_text)
|
|
110
|
+
for line in beautified.splitlines():
|
|
111
|
+
if 'MDCore.wurl' in line:
|
|
112
|
+
url = line.split('= ')[1].strip('"').strip(';')
|
|
113
|
+
return f"https:{url}"
|
|
114
|
+
raise ValueError("Delivery URL not found in script")
|
|
115
|
+
|
|
116
|
+
def get_playlist(self) -> str:
|
|
117
|
+
"""
|
|
118
|
+
Execute the entire flow to obtain the final video URL.
|
|
119
|
+
Returns:
|
|
120
|
+
str: The final video delivery URL
|
|
121
|
+
"""
|
|
122
|
+
self.get_redirect_url()
|
|
123
|
+
link_id = self.get_link_id()
|
|
124
|
+
|
|
125
|
+
final_url = self.get_final_url(link_id)
|
|
126
|
+
video_id = self._extract_video_id(final_url)
|
|
127
|
+
|
|
128
|
+
response = httpx.get(
|
|
129
|
+
f'{self.MIXDROP_BASE_URL}/e/{video_id}',
|
|
130
|
+
headers=self._get_mixdrop_headers(),
|
|
131
|
+
timeout=MAX_TIMEOUT
|
|
132
|
+
)
|
|
133
|
+
response.raise_for_status()
|
|
134
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
135
|
+
|
|
136
|
+
script_text = next(
|
|
137
|
+
(script.text for script in soup.find_all('script')
|
|
138
|
+
if "eval" in str(script.text)),
|
|
139
|
+
None
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
if not script_text:
|
|
143
|
+
raise ValueError("Required script not found")
|
|
144
|
+
|
|
145
|
+
return self._extract_delivery_url(script_text).replace('"', '')
|
|
@@ -52,7 +52,7 @@ def title_search(query: str) -> int:
|
|
|
52
52
|
response.raise_for_status()
|
|
53
53
|
|
|
54
54
|
except Exception as e:
|
|
55
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
55
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
56
56
|
return 0
|
|
57
57
|
|
|
58
58
|
# Create soup and find table
|
|
@@ -55,7 +55,7 @@ def title_search(query: str) -> int:
|
|
|
55
55
|
response.raise_for_status()
|
|
56
56
|
|
|
57
57
|
except Exception as e:
|
|
58
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
58
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
59
59
|
if site_constant.TELEGRAM_BOT:
|
|
60
60
|
bot.send_message(f"ERRORE\n\nErrore nella richiesta di ricerca:\n\n{e}", None)
|
|
61
61
|
return 0
|
|
@@ -119,7 +119,8 @@ def title_search(query: str) -> int:
|
|
|
119
119
|
process_results(response1.json()['records'], seen_titles, media_search_manager, choices)
|
|
120
120
|
|
|
121
121
|
except Exception as e:
|
|
122
|
-
console.print(f"Site: {site_constant.SITE_NAME},
|
|
122
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
123
|
+
return 0
|
|
123
124
|
|
|
124
125
|
# Second API call - archivio
|
|
125
126
|
try:
|
|
@@ -78,7 +78,7 @@ def title_search(query: str) -> int:
|
|
|
78
78
|
)
|
|
79
79
|
|
|
80
80
|
except Exception as e:
|
|
81
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
81
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
82
82
|
return 0
|
|
83
83
|
|
|
84
84
|
# Create soup istance
|
|
@@ -53,7 +53,7 @@ def title_search(query: str) -> int:
|
|
|
53
53
|
response.raise_for_status()
|
|
54
54
|
|
|
55
55
|
except Exception as e:
|
|
56
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
56
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
57
57
|
return 0
|
|
58
58
|
|
|
59
59
|
# Create soup and find table
|
|
@@ -54,7 +54,7 @@ def title_search(query: str) -> int:
|
|
|
54
54
|
response.raise_for_status()
|
|
55
55
|
|
|
56
56
|
except Exception as e:
|
|
57
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
57
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
58
58
|
return 0
|
|
59
59
|
|
|
60
60
|
# Create soup and find table
|
|
@@ -26,7 +26,7 @@ console = Console()
|
|
|
26
26
|
media_search_manager = MediaManager()
|
|
27
27
|
table_show_manager = TVShowManager()
|
|
28
28
|
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
29
|
-
MAX_THREADS =
|
|
29
|
+
MAX_THREADS = 12
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
def determine_media_type(title):
|
|
@@ -134,7 +134,7 @@ def title_search(query: str) -> int:
|
|
|
134
134
|
response.raise_for_status()
|
|
135
135
|
|
|
136
136
|
except Exception as e:
|
|
137
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
137
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
138
138
|
return 0
|
|
139
139
|
|
|
140
140
|
# Limit to only 15 results for performance
|
|
@@ -155,7 +155,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
|
|
|
155
155
|
|
|
156
156
|
# Init class
|
|
157
157
|
video_source = VideoSource(site_constant.FULL_URL, True, select_season.id)
|
|
158
|
-
scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.
|
|
158
|
+
scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.slug)
|
|
159
159
|
|
|
160
160
|
# Collect information about season
|
|
161
161
|
scrape_serie.getNumberSeason()
|
|
@@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
|
|
|
219
219
|
# Get script_id
|
|
220
220
|
script_id = TelegramSession.get_session()
|
|
221
221
|
if script_id != "unknown":
|
|
222
|
-
TelegramSession.deleteScriptId(script_id)
|
|
222
|
+
TelegramSession.deleteScriptId(script_id)
|
|
@@ -55,7 +55,7 @@ def title_search(query: str) -> int:
|
|
|
55
55
|
response.raise_for_status()
|
|
56
56
|
|
|
57
57
|
except Exception as e:
|
|
58
|
-
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
58
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
59
59
|
if site_constant.TELEGRAM_BOT:
|
|
60
60
|
bot.send_message(f"ERRORE\n\nErrore nella richiesta di ricerca:\n\n{e}", None)
|
|
61
61
|
return 0
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# 29.04.25
|
|
2
|
+
|
|
3
|
+
# External library
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.prompt import Prompt
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Internal utilities
|
|
9
|
+
from StreamingCommunity.Api.Template import get_select_title
|
|
10
|
+
from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
|
|
11
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
12
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Logic class
|
|
16
|
+
from .site import title_search, table_show_manager, media_search_manager
|
|
17
|
+
from .film import download_film
|
|
18
|
+
from .series import download_series
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Variable
|
|
22
|
+
indice = 8
|
|
23
|
+
_useFor = "film_serie"
|
|
24
|
+
_priority = 10 # !!! MOLTO LENTO
|
|
25
|
+
_engineDownload = "hls"
|
|
26
|
+
|
|
27
|
+
msg = Prompt()
|
|
28
|
+
console = Console()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_user_input(string_to_search: str = None):
|
|
32
|
+
"""
|
|
33
|
+
Asks the user to input a search term.
|
|
34
|
+
Handles both Telegram bot input and direct input.
|
|
35
|
+
"""
|
|
36
|
+
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
37
|
+
return string_to_search
|
|
38
|
+
|
|
39
|
+
def process_search_result(select_title, selections=None, proxy=None):
|
|
40
|
+
"""
|
|
41
|
+
Handles the search result and initiates the download for either a film or series.
|
|
42
|
+
|
|
43
|
+
Parameters:
|
|
44
|
+
select_title (MediaItem): The selected media item
|
|
45
|
+
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
46
|
+
{'season': season_selection, 'episode': episode_selection}
|
|
47
|
+
"""
|
|
48
|
+
if select_title.type == 'tv':
|
|
49
|
+
season_selection = None
|
|
50
|
+
episode_selection = None
|
|
51
|
+
|
|
52
|
+
if selections:
|
|
53
|
+
season_selection = selections.get('season')
|
|
54
|
+
episode_selection = selections.get('episode')
|
|
55
|
+
|
|
56
|
+
download_series(select_title, season_selection, episode_selection, proxy)
|
|
57
|
+
|
|
58
|
+
else:
|
|
59
|
+
download_film(select_title, proxy)
|
|
60
|
+
|
|
61
|
+
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
|
|
62
|
+
"""
|
|
63
|
+
Main function of the application for search.
|
|
64
|
+
|
|
65
|
+
Parameters:
|
|
66
|
+
string_to_search (str, optional): String to search for
|
|
67
|
+
get_onlyDatabase (bool, optional): If True, return only the database object
|
|
68
|
+
direct_item (dict, optional): Direct item to process (bypass search)
|
|
69
|
+
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
70
|
+
{'season': season_selection, 'episode': episode_selection}
|
|
71
|
+
"""
|
|
72
|
+
if direct_item:
|
|
73
|
+
select_title = MediaItem(**direct_item)
|
|
74
|
+
process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
|
|
75
|
+
return
|
|
76
|
+
|
|
77
|
+
if string_to_search is None:
|
|
78
|
+
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
79
|
+
|
|
80
|
+
finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
|
|
81
|
+
proxy, response_serie, _ = finder.find_fast_proxy()
|
|
82
|
+
len_database = title_search(string_to_search, [proxy, response_serie])
|
|
83
|
+
|
|
84
|
+
# If only the database is needed, return the manager
|
|
85
|
+
if get_onlyDatabase:
|
|
86
|
+
return media_search_manager
|
|
87
|
+
|
|
88
|
+
if len_database > 0:
|
|
89
|
+
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
90
|
+
process_search_result(select_title, selections, proxy)
|
|
91
|
+
|
|
92
|
+
else:
|
|
93
|
+
# If no results are found, ask again
|
|
94
|
+
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
95
|
+
search()
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# 29.04.25
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External library
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Internal utilities
|
|
11
|
+
from StreamingCommunity.Util.os import os_manager
|
|
12
|
+
from StreamingCommunity.Util.message import start_message
|
|
13
|
+
from StreamingCommunity.Lib.Downloader import HLS_Downloader
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Logic class
|
|
17
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
18
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Player
|
|
22
|
+
from StreamingCommunity.Api.Player.hdplayer import VideoSource
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# Variable
|
|
26
|
+
console = Console()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def download_film(select_title: MediaItem, proxy) -> str:
|
|
30
|
+
"""
|
|
31
|
+
Downloads a film using the provided film ID, title name, and domain.
|
|
32
|
+
|
|
33
|
+
Parameters:
|
|
34
|
+
- domain (str): The domain of the site
|
|
35
|
+
- version (str): Version of site.
|
|
36
|
+
|
|
37
|
+
Return:
|
|
38
|
+
- str: output path
|
|
39
|
+
"""
|
|
40
|
+
start_message()
|
|
41
|
+
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
|
|
42
|
+
|
|
43
|
+
# Get master playlists
|
|
44
|
+
video_source = VideoSource(proxy)
|
|
45
|
+
master_playlist = video_source.get_m3u8_url(select_title.url)
|
|
46
|
+
|
|
47
|
+
# Define the filename and path for the downloaded film
|
|
48
|
+
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
|
|
49
|
+
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
50
|
+
|
|
51
|
+
# Download the film using the m3u8 playlist, and output filename
|
|
52
|
+
r_proc = HLS_Downloader(
|
|
53
|
+
m3u8_url=master_playlist,
|
|
54
|
+
output_path=os.path.join(mp4_path, title_name)
|
|
55
|
+
).start()
|
|
56
|
+
|
|
57
|
+
if r_proc['error'] is not None:
|
|
58
|
+
try: os.remove(r_proc['path'])
|
|
59
|
+
except: pass
|
|
60
|
+
|
|
61
|
+
return r_proc['path']
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# 29.04.25
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Tuple
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External library
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
from rich.prompt import Prompt
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Internal utilities
|
|
13
|
+
from StreamingCommunity.Util.message import start_message
|
|
14
|
+
from StreamingCommunity.Lib.Downloader import HLS_Downloader
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Logic class
|
|
18
|
+
from .util.ScrapeSerie import GetSerieInfo
|
|
19
|
+
from StreamingCommunity.Api.Template.Util import (
|
|
20
|
+
manage_selection,
|
|
21
|
+
map_episode_title,
|
|
22
|
+
validate_selection,
|
|
23
|
+
validate_episode_selection,
|
|
24
|
+
display_episodes_list
|
|
25
|
+
)
|
|
26
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
27
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Player
|
|
31
|
+
from StreamingCommunity.Api.Player.hdplayer import VideoSource
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# Variable
|
|
35
|
+
msg = Prompt()
|
|
36
|
+
console = Console()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo, proxy=None) -> Tuple[str,bool]:
|
|
40
|
+
"""
|
|
41
|
+
Downloads a specific episode from a specified season.
|
|
42
|
+
|
|
43
|
+
Parameters:
|
|
44
|
+
- index_season_selected (int): Season number
|
|
45
|
+
- index_episode_selected (int): Episode index
|
|
46
|
+
- scrape_serie (GetSerieInfo): Scraper object with series information
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
- str: Path to downloaded file
|
|
50
|
+
- bool: Whether download was stopped
|
|
51
|
+
"""
|
|
52
|
+
start_message()
|
|
53
|
+
|
|
54
|
+
# Get episode information
|
|
55
|
+
obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected-1)
|
|
56
|
+
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [bold magenta]{obj_episode.name}[/bold magenta] ([cyan]S{index_season_selected}E{index_episode_selected}[/cyan]) \n")
|
|
57
|
+
|
|
58
|
+
# Define filename and path for the downloaded video
|
|
59
|
+
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
|
|
60
|
+
mp4_path = os.path.join(site_constant.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
|
|
61
|
+
|
|
62
|
+
# Retrieve scws and if available master playlist
|
|
63
|
+
video_source = VideoSource(proxy)
|
|
64
|
+
master_playlist = video_source.get_m3u8_url(obj_episode.url)
|
|
65
|
+
|
|
66
|
+
# Download the episode
|
|
67
|
+
r_proc = HLS_Downloader(
|
|
68
|
+
m3u8_url=master_playlist,
|
|
69
|
+
output_path=os.path.join(mp4_path, mp4_name)
|
|
70
|
+
).start()
|
|
71
|
+
|
|
72
|
+
if r_proc['error'] is not None:
|
|
73
|
+
try: os.remove(r_proc['path'])
|
|
74
|
+
except: pass
|
|
75
|
+
|
|
76
|
+
return r_proc['path'], r_proc['stopped']
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None, proxy = None) -> None:
|
|
80
|
+
"""
|
|
81
|
+
Handle downloading episodes for a specific season.
|
|
82
|
+
|
|
83
|
+
Parameters:
|
|
84
|
+
- index_season_selected (int): Season number
|
|
85
|
+
- scrape_serie (GetSerieInfo): Scraper object with series information
|
|
86
|
+
- download_all (bool): Whether to download all episodes
|
|
87
|
+
- episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
|
|
88
|
+
"""
|
|
89
|
+
# Get episodes for the selected season
|
|
90
|
+
episodes = scrape_serie.getEpisodeSeasons(index_season_selected)
|
|
91
|
+
episodes_count = len(episodes)
|
|
92
|
+
|
|
93
|
+
if download_all:
|
|
94
|
+
for i_episode in range(1, episodes_count + 1):
|
|
95
|
+
path, stopped = download_video(index_season_selected, i_episode, scrape_serie, proxy)
|
|
96
|
+
|
|
97
|
+
if stopped:
|
|
98
|
+
break
|
|
99
|
+
|
|
100
|
+
console.print(f"\n[red]End downloaded [yellow]season: [red]{index_season_selected}.")
|
|
101
|
+
|
|
102
|
+
else:
|
|
103
|
+
if episode_selection is not None:
|
|
104
|
+
last_command = episode_selection
|
|
105
|
+
console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
|
|
106
|
+
|
|
107
|
+
else:
|
|
108
|
+
last_command = display_episodes_list(episodes)
|
|
109
|
+
|
|
110
|
+
# Prompt user for episode selection
|
|
111
|
+
list_episode_select = manage_selection(last_command, episodes_count)
|
|
112
|
+
list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
|
|
113
|
+
|
|
114
|
+
# Download selected episodes if not stopped
|
|
115
|
+
for i_episode in list_episode_select:
|
|
116
|
+
path, stopped = download_video(index_season_selected, i_episode, scrape_serie, proxy)
|
|
117
|
+
|
|
118
|
+
if stopped:
|
|
119
|
+
break
|
|
120
|
+
|
|
121
|
+
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
|
|
122
|
+
"""
|
|
123
|
+
Handle downloading a complete series.
|
|
124
|
+
|
|
125
|
+
Parameters:
|
|
126
|
+
- select_season (MediaItem): Series metadata from search
|
|
127
|
+
- season_selection (str, optional): Pre-defined season selection that bypasses manual input
|
|
128
|
+
- episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
|
|
129
|
+
"""
|
|
130
|
+
scrape_serie = GetSerieInfo(select_season.url, proxy)
|
|
131
|
+
|
|
132
|
+
# Get total number of seasons
|
|
133
|
+
seasons_count = scrape_serie.getNumberSeason()
|
|
134
|
+
|
|
135
|
+
# Prompt user for season selection and download episodes
|
|
136
|
+
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
|
|
137
|
+
|
|
138
|
+
# If season_selection is provided, use it instead of asking for input
|
|
139
|
+
if season_selection is None:
|
|
140
|
+
index_season_selected = msg.ask(
|
|
141
|
+
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
|
|
142
|
+
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
else:
|
|
146
|
+
index_season_selected = season_selection
|
|
147
|
+
console.print(f"\n[cyan]Using provided season selection: [yellow]{season_selection}")
|
|
148
|
+
|
|
149
|
+
# Validate the selection
|
|
150
|
+
list_season_select = manage_selection(index_season_selected, seasons_count)
|
|
151
|
+
list_season_select = validate_selection(list_season_select, seasons_count)
|
|
152
|
+
|
|
153
|
+
# Loop through the selected seasons and download episodes
|
|
154
|
+
for i_season in list_season_select:
|
|
155
|
+
if len(list_season_select) > 1 or index_season_selected == "*":
|
|
156
|
+
# Download all episodes if multiple seasons are selected or if '*' is used
|
|
157
|
+
download_episode(i_season, scrape_serie, download_all=True, proxy=proxy)
|
|
158
|
+
else:
|
|
159
|
+
# Otherwise, let the user select specific episodes for the single season
|
|
160
|
+
download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection, proxy=proxy)
|