StreamingCommunity 3.2.0__py3-none-any.whl → 3.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +4 -0
- StreamingCommunity/Api/Player/hdplayer.py +2 -2
- StreamingCommunity/Api/Player/mixdrop.py +1 -1
- StreamingCommunity/Api/Player/vixcloud.py +4 -5
- StreamingCommunity/Api/Site/animeunity/__init__.py +2 -2
- StreamingCommunity/Api/Site/crunchyroll/__init__.py +103 -0
- StreamingCommunity/Api/Site/crunchyroll/film.py +83 -0
- StreamingCommunity/Api/Site/crunchyroll/series.py +182 -0
- StreamingCommunity/Api/Site/crunchyroll/site.py +113 -0
- StreamingCommunity/Api/Site/crunchyroll/util/ScrapeSerie.py +218 -0
- StreamingCommunity/Api/Site/crunchyroll/util/get_license.py +227 -0
- StreamingCommunity/Api/Site/guardaserie/site.py +1 -2
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +9 -8
- StreamingCommunity/Api/Site/mediasetinfinity/__init__.py +96 -0
- StreamingCommunity/Api/Site/mediasetinfinity/film.py +76 -0
- StreamingCommunity/Api/Site/mediasetinfinity/series.py +177 -0
- StreamingCommunity/Api/Site/mediasetinfinity/site.py +112 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +259 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/fix_mpd.py +64 -0
- StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py +217 -0
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +6 -17
- StreamingCommunity/Api/Site/streamingcommunity/film.py +2 -2
- StreamingCommunity/Api/Site/streamingcommunity/series.py +9 -9
- StreamingCommunity/Api/Site/streamingcommunity/site.py +3 -4
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +3 -6
- StreamingCommunity/Api/Site/streamingwatch/__init__.py +6 -14
- StreamingCommunity/Api/Site/streamingwatch/film.py +2 -2
- StreamingCommunity/Api/Site/streamingwatch/series.py +9 -9
- StreamingCommunity/Api/Site/streamingwatch/site.py +5 -7
- StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +2 -2
- StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py +131 -0
- StreamingCommunity/Lib/Downloader/DASH/decrypt.py +79 -0
- StreamingCommunity/Lib/Downloader/DASH/downloader.py +220 -0
- StreamingCommunity/Lib/Downloader/DASH/parser.py +249 -0
- StreamingCommunity/Lib/Downloader/DASH/segments.py +332 -0
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +1 -14
- StreamingCommunity/Lib/Downloader/HLS/segments.py +3 -3
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -5
- StreamingCommunity/Lib/FFmpeg/capture.py +3 -3
- StreamingCommunity/Lib/FFmpeg/command.py +1 -1
- StreamingCommunity/TelegramHelp/config.json +3 -5
- StreamingCommunity/Upload/version.py +2 -2
- StreamingCommunity/Util/os.py +21 -0
- StreamingCommunity/run.py +1 -1
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/METADATA +4 -2
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/RECORD +50 -36
- StreamingCommunity/Api/Site/1337xx/__init__.py +0 -72
- StreamingCommunity/Api/Site/1337xx/site.py +0 -82
- StreamingCommunity/Api/Site/1337xx/title.py +0 -61
- StreamingCommunity/Lib/Proxies/proxy.py +0 -72
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/WHEEL +0 -0
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/entry_points.txt +0 -0
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/licenses/LICENSE +0 -0
- {streamingcommunity-3.2.0.dist-info → streamingcommunity-3.2.5.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# 16.03.25
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External libraries
|
|
7
|
+
from curl_cffi import requests
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Internal utilities
|
|
11
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
12
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
13
|
+
from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager
|
|
14
|
+
from .get_license import get_auth_token, generate_device_id
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Variable
|
|
18
|
+
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_series_seasons(series_id, headers, params):
|
|
22
|
+
"""
|
|
23
|
+
Fetches the seasons for a given series ID from Crunchyroll.
|
|
24
|
+
"""
|
|
25
|
+
url = f'https://www.crunchyroll.com/content/v2/cms/series/{series_id}/seasons'
|
|
26
|
+
response = requests.get(
|
|
27
|
+
url,
|
|
28
|
+
params=params,
|
|
29
|
+
headers=headers,
|
|
30
|
+
impersonate="chrome110"
|
|
31
|
+
)
|
|
32
|
+
return response
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_season_episodes(season_id, headers, params):
|
|
36
|
+
"""
|
|
37
|
+
Fetches the episodes for a given season ID from Crunchyroll.
|
|
38
|
+
"""
|
|
39
|
+
url = f'https://www.crunchyroll.com/content/v2/cms/seasons/{season_id}/episodes'
|
|
40
|
+
response = requests.get(
|
|
41
|
+
url,
|
|
42
|
+
params=params,
|
|
43
|
+
headers=headers,
|
|
44
|
+
impersonate="chrome110"
|
|
45
|
+
)
|
|
46
|
+
return response
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class GetSerieInfo:
|
|
50
|
+
def __init__(self, series_id):
|
|
51
|
+
"""
|
|
52
|
+
Initialize the GetSerieInfo class for scraping TV series information using Crunchyroll API.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
- series_id (str): The Crunchyroll series ID.
|
|
56
|
+
"""
|
|
57
|
+
self.series_id = series_id
|
|
58
|
+
self.seasons_manager = SeasonManager()
|
|
59
|
+
self.headers = get_headers()
|
|
60
|
+
self.headers['authorization'] = f"Bearer {get_auth_token(generate_device_id()).access_token}"
|
|
61
|
+
self.params = {
|
|
62
|
+
'force_locale': '',
|
|
63
|
+
'preferred_audio_language': 'it-IT',
|
|
64
|
+
'locale': 'it-IT',
|
|
65
|
+
}
|
|
66
|
+
self.series_name = None
|
|
67
|
+
self._episodes_cache = {}
|
|
68
|
+
|
|
69
|
+
def collect_season(self) -> None:
|
|
70
|
+
"""
|
|
71
|
+
Retrieve all seasons using Crunchyroll API, but NOT episodes.
|
|
72
|
+
"""
|
|
73
|
+
response = get_series_seasons(self.series_id, self.headers, self.params)
|
|
74
|
+
|
|
75
|
+
if response.status_code != 200:
|
|
76
|
+
logging.error(f"Failed to fetch seasons for series {self.series_id}")
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
# Get the JSON response
|
|
80
|
+
data = response.json()
|
|
81
|
+
seasons = data.get("data", [])
|
|
82
|
+
|
|
83
|
+
# Set series name from first season if available
|
|
84
|
+
if seasons:
|
|
85
|
+
self.series_name = seasons[0].get("series_title") or seasons[0].get("title")
|
|
86
|
+
|
|
87
|
+
for season in seasons:
|
|
88
|
+
season_num = season.get("season_number", 0)
|
|
89
|
+
season_name = season.get("title", f"Season {season_num}")
|
|
90
|
+
|
|
91
|
+
self.seasons_manager.add_season({
|
|
92
|
+
'number': season_num,
|
|
93
|
+
'name': season_name,
|
|
94
|
+
'id': season.get('id')
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
def _fetch_episodes_for_season(self, season_number: int):
|
|
98
|
+
"""
|
|
99
|
+
Fetch and cache episodes for a specific season number.
|
|
100
|
+
"""
|
|
101
|
+
season = self.seasons_manager.get_season_by_number(season_number)
|
|
102
|
+
|
|
103
|
+
if not season or getattr(season, 'id', None) is None:
|
|
104
|
+
logging.error(f"Season {season_number} not found or missing id.")
|
|
105
|
+
return []
|
|
106
|
+
|
|
107
|
+
season_id = season.id
|
|
108
|
+
ep_response = get_season_episodes(season_id, self.headers, self.params)
|
|
109
|
+
if ep_response.status_code != 200:
|
|
110
|
+
logging.error(f"Failed to fetch episodes for season {season_id}")
|
|
111
|
+
return []
|
|
112
|
+
|
|
113
|
+
ep_data = ep_response.json()
|
|
114
|
+
episodes = ep_data.get("data", [])
|
|
115
|
+
episode_list = []
|
|
116
|
+
|
|
117
|
+
for ep in episodes:
|
|
118
|
+
ep_num = ep.get("episode_number")
|
|
119
|
+
ep_title = ep.get("title", f"Episodio {ep_num}")
|
|
120
|
+
ep_id = ep.get("id")
|
|
121
|
+
ep_url = f"https://www.crunchyroll.com/watch/{ep_id}"
|
|
122
|
+
|
|
123
|
+
episode_list.append({
|
|
124
|
+
'number': ep_num,
|
|
125
|
+
'name': ep_title,
|
|
126
|
+
'url': ep_url,
|
|
127
|
+
'duration': int(ep.get('duration_ms', 0) / 60000),
|
|
128
|
+
})
|
|
129
|
+
|
|
130
|
+
self._episodes_cache[season_number] = episode_list
|
|
131
|
+
return episode_list
|
|
132
|
+
|
|
133
|
+
def _get_episode_audio_locales_and_urls(self, episode_id):
|
|
134
|
+
"""
|
|
135
|
+
Fetch available audio locales and their URLs for a given episode ID.
|
|
136
|
+
Returns: (audio_locales, urls_by_locale)
|
|
137
|
+
"""
|
|
138
|
+
url = f'https://www.crunchyroll.com/content/v2/cms/objects/{episode_id}'
|
|
139
|
+
headers = self.headers.copy()
|
|
140
|
+
params = {
|
|
141
|
+
'ratings': 'true',
|
|
142
|
+
'locale': 'it-IT',
|
|
143
|
+
}
|
|
144
|
+
response = requests.get(
|
|
145
|
+
url,
|
|
146
|
+
params=params,
|
|
147
|
+
headers=headers,
|
|
148
|
+
impersonate="chrome110"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if response.status_code != 200:
|
|
152
|
+
logging.warning(f"Failed to fetch audio locales for episode {episode_id}")
|
|
153
|
+
return [], {}
|
|
154
|
+
|
|
155
|
+
data = response.json()
|
|
156
|
+
try:
|
|
157
|
+
versions = data["data"][0]['episode_metadata'].get("versions", [])
|
|
158
|
+
|
|
159
|
+
audio_locales = []
|
|
160
|
+
urls_by_locale = {}
|
|
161
|
+
|
|
162
|
+
for v in versions:
|
|
163
|
+
locale = v.get("audio_locale")
|
|
164
|
+
guid = v.get("guid")
|
|
165
|
+
|
|
166
|
+
if locale and guid:
|
|
167
|
+
audio_locales.append(locale)
|
|
168
|
+
urls_by_locale[locale] = f"https://www.crunchyroll.com/it/watch/{guid}"
|
|
169
|
+
#print(f"Locale: {locale}, URL: {urls_by_locale[locale]}")
|
|
170
|
+
|
|
171
|
+
return audio_locales, urls_by_locale
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logging.error(f"Error parsing audio locales for episode {episode_id}: {e}")
|
|
175
|
+
return [], {}
|
|
176
|
+
|
|
177
|
+
# ------------- FOR GUI -------------
|
|
178
|
+
def getNumberSeason(self) -> int:
|
|
179
|
+
"""
|
|
180
|
+
Get the total number of seasons available for the series.
|
|
181
|
+
"""
|
|
182
|
+
if not self.seasons_manager.seasons:
|
|
183
|
+
self.collect_season()
|
|
184
|
+
|
|
185
|
+
return len(self.seasons_manager.seasons)
|
|
186
|
+
|
|
187
|
+
def getEpisodeSeasons(self, season_number: int) -> list:
|
|
188
|
+
"""
|
|
189
|
+
Get all episodes for a specific season (fetches only when needed).
|
|
190
|
+
"""
|
|
191
|
+
if not self.seasons_manager.seasons:
|
|
192
|
+
self.collect_season()
|
|
193
|
+
if season_number not in self._episodes_cache:
|
|
194
|
+
episodes = self._fetch_episodes_for_season(season_number)
|
|
195
|
+
else:
|
|
196
|
+
episodes = self._episodes_cache[season_number]
|
|
197
|
+
return episodes
|
|
198
|
+
|
|
199
|
+
def selectEpisode(self, season_number: int, episode_index: int) -> dict:
|
|
200
|
+
"""
|
|
201
|
+
Get information for a specific episode in a specific season.
|
|
202
|
+
"""
|
|
203
|
+
episodes = self.getEpisodeSeasons(season_number)
|
|
204
|
+
if not episodes or episode_index < 0 or episode_index >= len(episodes):
|
|
205
|
+
logging.error(f"Episode index {episode_index} is out of range for season {season_number}")
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
episode = episodes[episode_index]
|
|
209
|
+
episode_id = episode.get("url", "").split("/")[-1] if "url" in episode else None
|
|
210
|
+
|
|
211
|
+
# Update only the episode URL if available in it-IT or en-US
|
|
212
|
+
_, urls_by_locale = self._get_episode_audio_locales_and_urls(episode_id)
|
|
213
|
+
new_url = urls_by_locale.get("it-IT") or urls_by_locale.get("en-US")
|
|
214
|
+
|
|
215
|
+
if new_url:
|
|
216
|
+
episode["url"] = new_url
|
|
217
|
+
|
|
218
|
+
return episode
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
# 28.07.25
|
|
2
|
+
|
|
3
|
+
import uuid
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Optional, Dict, Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# External library
|
|
9
|
+
from curl_cffi.requests import Session
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Internal utilities
|
|
13
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
14
|
+
from StreamingCommunity.Util.headers import get_userAgent, get_headers
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Variable
|
|
18
|
+
device_id = None
|
|
19
|
+
auth_basic = 'bm9haWhkZXZtXzZpeWcwYThsMHE6'
|
|
20
|
+
etp_rt = config_manager.get_dict("SITE_LOGIN", "crunchyroll")['etp_rt']
|
|
21
|
+
x_cr_tab_id = config_manager.get_dict("SITE_LOGIN", "crunchyroll")['x_cr_tab_id']
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class Token:
|
|
26
|
+
access_token: Optional[str] = None
|
|
27
|
+
refresh_token: Optional[str] = None
|
|
28
|
+
expires_in: Optional[int] = None
|
|
29
|
+
token_type: Optional[str] = None
|
|
30
|
+
scope: Optional[str] = None
|
|
31
|
+
country: Optional[str] = None
|
|
32
|
+
account_id: Optional[str] = None
|
|
33
|
+
profile_id: Optional[str] = None
|
|
34
|
+
extra: Dict[str, Any] = field(default_factory=dict)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class Account:
|
|
39
|
+
account_id: Optional[str] = None
|
|
40
|
+
external_id: Optional[str] = None
|
|
41
|
+
email: Optional[str] = None
|
|
42
|
+
extra: Dict[str, Any] = field(default_factory=dict)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class Profile:
|
|
47
|
+
profile_id: Optional[str] = None
|
|
48
|
+
email: Optional[str] = None
|
|
49
|
+
profile_name: Optional[str] = None
|
|
50
|
+
extra: Dict[str, Any] = field(default_factory=dict)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def generate_device_id():
|
|
55
|
+
global device_id
|
|
56
|
+
|
|
57
|
+
if device_id is not None:
|
|
58
|
+
return device_id
|
|
59
|
+
|
|
60
|
+
device_id = str(uuid.uuid4())
|
|
61
|
+
return device_id
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_auth_token(device_id):
|
|
65
|
+
with Session(impersonate="chrome110") as session:
|
|
66
|
+
cookies = {
|
|
67
|
+
'etp_rt': etp_rt,
|
|
68
|
+
}
|
|
69
|
+
response = session.post(
|
|
70
|
+
'https://www.crunchyroll.com/auth/v1/token',
|
|
71
|
+
headers={
|
|
72
|
+
'authorization': f'Basic {auth_basic}',
|
|
73
|
+
'user-agent': get_userAgent(),
|
|
74
|
+
},
|
|
75
|
+
data={
|
|
76
|
+
'device_id': device_id,
|
|
77
|
+
'device_type': 'Chrome on Windows',
|
|
78
|
+
'grant_type': 'etp_rt_cookie',
|
|
79
|
+
},
|
|
80
|
+
cookies=cookies
|
|
81
|
+
)
|
|
82
|
+
if response.status_code == 400:
|
|
83
|
+
print("Error 400: Please enter a correct 'etp_rt' value in config.json. You can find the value in the request headers.")
|
|
84
|
+
|
|
85
|
+
# Get the JSON response
|
|
86
|
+
data = response.json()
|
|
87
|
+
known = {
|
|
88
|
+
'access_token', 'refresh_token', 'expires_in', 'token_type', 'scope',
|
|
89
|
+
'country', 'account_id', 'profile_id'
|
|
90
|
+
}
|
|
91
|
+
extra = {k: v for k, v in data.items() if k not in known}
|
|
92
|
+
return Token(
|
|
93
|
+
access_token=data.get('access_token'),
|
|
94
|
+
refresh_token=data.get('refresh_token'),
|
|
95
|
+
expires_in=data.get('expires_in'),
|
|
96
|
+
token_type=data.get('token_type'),
|
|
97
|
+
scope=data.get('scope'),
|
|
98
|
+
country=data.get('country'),
|
|
99
|
+
account_id=data.get('account_id'),
|
|
100
|
+
profile_id=data.get('profile_id'),
|
|
101
|
+
extra=extra
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def get_account(token: Token, device_id):
|
|
106
|
+
with Session(impersonate="chrome110") as session:
|
|
107
|
+
country = (token.country or "IT")
|
|
108
|
+
cookies = {
|
|
109
|
+
'device_id': device_id,
|
|
110
|
+
'c_locale': f'{country.lower()}-{country.upper()}',
|
|
111
|
+
}
|
|
112
|
+
response = session.get(
|
|
113
|
+
'https://www.crunchyroll.com/accounts/v1/me',
|
|
114
|
+
headers={
|
|
115
|
+
'authorization': f'Bearer {token.access_token}',
|
|
116
|
+
'user-agent': get_userAgent(),
|
|
117
|
+
},
|
|
118
|
+
cookies=cookies
|
|
119
|
+
)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
|
|
122
|
+
# Get the JSON response
|
|
123
|
+
data = response.json()
|
|
124
|
+
known = {
|
|
125
|
+
'account_id', 'external_id', 'email'
|
|
126
|
+
}
|
|
127
|
+
extra = {k: v for k, v in data.items() if k not in known}
|
|
128
|
+
return Account(
|
|
129
|
+
account_id=data.get('account_id'),
|
|
130
|
+
external_id=data.get('external_id'),
|
|
131
|
+
email=data.get('email'),
|
|
132
|
+
extra=extra
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def get_profiles(token: Token, device_id):
|
|
137
|
+
with Session(impersonate="chrome110") as session:
|
|
138
|
+
country = token.country
|
|
139
|
+
cookies = {
|
|
140
|
+
'device_id': device_id,
|
|
141
|
+
'c_locale': f'{country.lower()}-{country.upper()}',
|
|
142
|
+
}
|
|
143
|
+
response = session.get(
|
|
144
|
+
'https://www.crunchyroll.com/accounts/v1/me/multiprofile',
|
|
145
|
+
headers={
|
|
146
|
+
'authorization': f'Bearer {token.access_token}',
|
|
147
|
+
'user-agent': get_userAgent(),
|
|
148
|
+
},
|
|
149
|
+
cookies=cookies
|
|
150
|
+
)
|
|
151
|
+
response.raise_for_status()
|
|
152
|
+
|
|
153
|
+
# Get the JSON response
|
|
154
|
+
data = response.json()
|
|
155
|
+
profiles = []
|
|
156
|
+
for p in data.get('profiles', []):
|
|
157
|
+
known = {
|
|
158
|
+
'profile_id', 'email', 'profile_name'
|
|
159
|
+
}
|
|
160
|
+
extra = {k: v for k, v in p.items() if k not in known}
|
|
161
|
+
profiles.append(Profile(
|
|
162
|
+
profile_id=p.get('profile_id'),
|
|
163
|
+
email=p.get('email'),
|
|
164
|
+
profile_name=p.get('profile_name'),
|
|
165
|
+
extra=extra
|
|
166
|
+
))
|
|
167
|
+
return profiles
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def cr_login_session(device_id: str, email: str, password: str):
|
|
171
|
+
"""
|
|
172
|
+
Esegue una richiesta di login a Crunchyroll SSO usando curl_cffi.requests.
|
|
173
|
+
"""
|
|
174
|
+
cookies = {
|
|
175
|
+
'device_id': device_id,
|
|
176
|
+
}
|
|
177
|
+
data = (
|
|
178
|
+
f'{{"email":"{email}","password":"{password}","eventSettings":{{}}}}'
|
|
179
|
+
)
|
|
180
|
+
with Session(impersonate="chrome110") as session:
|
|
181
|
+
response = session.post(
|
|
182
|
+
'https://sso.crunchyroll.com/api/login',
|
|
183
|
+
cookies=cookies,
|
|
184
|
+
headers=get_headers(),
|
|
185
|
+
data=data
|
|
186
|
+
)
|
|
187
|
+
response.raise_for_status()
|
|
188
|
+
return response
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def get_playback_session(token: Token, device_id: str, url_id: str):
|
|
192
|
+
"""
|
|
193
|
+
Crea una sessione per ottenere i dati di playback da Crunchyroll.
|
|
194
|
+
"""
|
|
195
|
+
cookies = {
|
|
196
|
+
'device_id': device_id,
|
|
197
|
+
'etp_rt': etp_rt
|
|
198
|
+
}
|
|
199
|
+
headers = {
|
|
200
|
+
'authorization': f'Bearer {token.access_token}',
|
|
201
|
+
'user-agent': get_userAgent(),
|
|
202
|
+
'x-cr-tab-id': x_cr_tab_id
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
with Session(impersonate="chrome110") as session:
|
|
206
|
+
response = session.get(
|
|
207
|
+
f'https://www.crunchyroll.com/playback/v3/{url_id}/web/chrome/play',
|
|
208
|
+
cookies=cookies,
|
|
209
|
+
headers=headers
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
if (response.status_code == 403):
|
|
213
|
+
raise Exception("Playback is Rejected: The current subscription does not have access to this content")
|
|
214
|
+
|
|
215
|
+
if (response.status_code == 420):
|
|
216
|
+
raise Exception("TOO_MANY_ACTIVE_STREAMS. Wait a few minutes and try again.")
|
|
217
|
+
|
|
218
|
+
response.raise_for_status()
|
|
219
|
+
|
|
220
|
+
# Get the JSON response
|
|
221
|
+
data = response.json()
|
|
222
|
+
|
|
223
|
+
if data.get('error') == 'Playback is Rejected':
|
|
224
|
+
raise Exception("Playback is Rejected: Premium required")
|
|
225
|
+
|
|
226
|
+
url = data.get('url')
|
|
227
|
+
return url, headers
|
|
@@ -59,9 +59,8 @@ def title_search(query: str) -> int:
|
|
|
59
59
|
|
|
60
60
|
# Create soup and find table
|
|
61
61
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
62
|
-
table_content = soup.find('div', class_="recent-posts")
|
|
63
62
|
|
|
64
|
-
for serie_div in
|
|
63
|
+
for serie_div in soup.find_all('div', class_='entry'):
|
|
65
64
|
try:
|
|
66
65
|
|
|
67
66
|
title = serie_div.find('a').get("title")
|
|
@@ -40,26 +40,27 @@ class GetSerieInfo:
|
|
|
40
40
|
Retrieves the number of seasons of a TV series.
|
|
41
41
|
|
|
42
42
|
Returns:
|
|
43
|
-
int: Number of seasons of the TV series.
|
|
43
|
+
int: Number of seasons of the TV series. Returns -1 if parsing fails.
|
|
44
44
|
"""
|
|
45
45
|
try:
|
|
46
|
-
|
|
47
46
|
# Make an HTTP request to the series URL
|
|
48
47
|
response = httpx.get(self.url, headers=self.headers, timeout=max_timeout, follow_redirects=True)
|
|
49
48
|
response.raise_for_status()
|
|
50
49
|
|
|
50
|
+
# Find the seasons container
|
|
51
51
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
52
|
-
|
|
53
52
|
table_content = soup.find('div', class_="tt_season")
|
|
54
53
|
seasons_number = len(table_content.find_all("li"))
|
|
55
|
-
|
|
54
|
+
|
|
55
|
+
# Try to get the title, with fallback
|
|
56
|
+
title_element = soup.find("h1", class_="entry-title")
|
|
57
|
+
self.tv_name = title_element.get_text(strip=True) if title_element else "Unknown Title"
|
|
56
58
|
|
|
57
59
|
return seasons_number
|
|
58
60
|
|
|
59
61
|
except Exception as e:
|
|
60
|
-
logging.error(f"Error parsing HTML page: {e}")
|
|
61
|
-
|
|
62
|
-
return -1
|
|
62
|
+
logging.error(f"Error parsing HTML page: {str(e)}")
|
|
63
|
+
return -1
|
|
63
64
|
|
|
64
65
|
def get_episode_number(self, n_season: int) -> List[Dict[str, str]]:
|
|
65
66
|
"""
|
|
@@ -88,7 +89,7 @@ class GetSerieInfo:
|
|
|
88
89
|
for episode_div in episode_content:
|
|
89
90
|
index = episode_div.find("a").get("data-num")
|
|
90
91
|
link = episode_div.find("a").get("data-link")
|
|
91
|
-
name = episode_div.find("a").get("data-
|
|
92
|
+
name = episode_div.find("a").get("data-num")
|
|
92
93
|
|
|
93
94
|
obj_episode = {
|
|
94
95
|
'number': index,
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# 21.05.24
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
# External library
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
from rich.prompt import Prompt
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Internal utilities
|
|
10
|
+
from StreamingCommunity.Api.Template import get_select_title
|
|
11
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
12
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Logic class
|
|
16
|
+
from .site import title_search, table_show_manager, media_search_manager
|
|
17
|
+
from .series import download_series
|
|
18
|
+
from .film import download_film
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Variable
|
|
22
|
+
indice = 3
|
|
23
|
+
_useFor = "Film_&_Serie"
|
|
24
|
+
_priority = 0
|
|
25
|
+
_engineDownload = "hls"
|
|
26
|
+
_deprecate = False
|
|
27
|
+
|
|
28
|
+
msg = Prompt()
|
|
29
|
+
console = Console()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_user_input(string_to_search: str = None):
|
|
33
|
+
"""
|
|
34
|
+
Asks the user to input a search term.
|
|
35
|
+
"""
|
|
36
|
+
if string_to_search is None:
|
|
37
|
+
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
38
|
+
|
|
39
|
+
return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
40
|
+
|
|
41
|
+
def process_search_result(select_title, selections=None):
|
|
42
|
+
"""
|
|
43
|
+
Handles the search result and initiates the download for either a film or series.
|
|
44
|
+
|
|
45
|
+
Parameters:
|
|
46
|
+
select_title (MediaItem): The selected media item
|
|
47
|
+
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
48
|
+
{'season': season_selection, 'episode': episode_selection}
|
|
49
|
+
"""
|
|
50
|
+
if select_title.type == 'tv':
|
|
51
|
+
season_selection = None
|
|
52
|
+
episode_selection = None
|
|
53
|
+
|
|
54
|
+
if selections:
|
|
55
|
+
season_selection = selections.get('season')
|
|
56
|
+
episode_selection = selections.get('episode')
|
|
57
|
+
|
|
58
|
+
download_series(select_title, season_selection, episode_selection)
|
|
59
|
+
|
|
60
|
+
else:
|
|
61
|
+
download_film(select_title)
|
|
62
|
+
|
|
63
|
+
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
|
|
64
|
+
"""
|
|
65
|
+
Main function of the application for search.
|
|
66
|
+
|
|
67
|
+
Parameters:
|
|
68
|
+
string_to_search (str, optional): String to search for
|
|
69
|
+
get_onlyDatabase (bool, optional): If True, return only the database object
|
|
70
|
+
direct_item (dict, optional): Direct item to process (bypass search)
|
|
71
|
+
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
72
|
+
{'season': season_selection, 'episode': episode_selection}
|
|
73
|
+
"""
|
|
74
|
+
if direct_item:
|
|
75
|
+
select_title = MediaItem(**direct_item)
|
|
76
|
+
process_search_result(select_title, selections)
|
|
77
|
+
return
|
|
78
|
+
|
|
79
|
+
if string_to_search is None:
|
|
80
|
+
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
81
|
+
|
|
82
|
+
# Search on database
|
|
83
|
+
len_database = title_search(string_to_search)
|
|
84
|
+
|
|
85
|
+
# If only the database is needed, return the manager
|
|
86
|
+
if get_onlyDatabase:
|
|
87
|
+
return media_search_manager
|
|
88
|
+
|
|
89
|
+
if len_database > 0:
|
|
90
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
91
|
+
process_search_result(select_title, selections)
|
|
92
|
+
|
|
93
|
+
else:
|
|
94
|
+
# If no results are found, ask again
|
|
95
|
+
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
96
|
+
search()
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# 21.05.24
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Tuple
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External library
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Internal utilities
|
|
12
|
+
from StreamingCommunity.Util.os import os_manager, get_wvd_path
|
|
13
|
+
from StreamingCommunity.Util.message import start_message
|
|
14
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
15
|
+
from StreamingCommunity.Lib.Downloader.DASH.downloader import DASH_Download
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Logic class
|
|
19
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
20
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Player
|
|
24
|
+
from .util.fix_mpd import get_manifest
|
|
25
|
+
from .util.get_license import get_bearer_token, get_playback_url, get_tracking_info, generate_license_url
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Variable
|
|
29
|
+
console = Console()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def download_film(select_title: MediaItem) -> Tuple[str, bool]:
|
|
33
|
+
"""
|
|
34
|
+
Downloads a film using the provided film ID, title name, and domain.
|
|
35
|
+
|
|
36
|
+
Parameters:
|
|
37
|
+
- select_title (MediaItem): The selected media item.
|
|
38
|
+
|
|
39
|
+
Return:
|
|
40
|
+
- str: output path if successful, otherwise None
|
|
41
|
+
"""
|
|
42
|
+
start_message()
|
|
43
|
+
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
|
|
44
|
+
|
|
45
|
+
# Define the filename and path for the downloaded film
|
|
46
|
+
title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
|
|
47
|
+
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
48
|
+
|
|
49
|
+
# Generate mpd and license URLs
|
|
50
|
+
bearer = get_bearer_token()
|
|
51
|
+
playback_json = get_playback_url(bearer, select_title.url.split('_')[-1])
|
|
52
|
+
tracking_info = get_tracking_info(bearer, playback_json)[0]
|
|
53
|
+
|
|
54
|
+
license_url = generate_license_url(bearer, tracking_info)
|
|
55
|
+
mpd_url = get_manifest(tracking_info['video_src'])
|
|
56
|
+
|
|
57
|
+
# Download the episode
|
|
58
|
+
r_proc = DASH_Download(
|
|
59
|
+
cdm_device=get_wvd_path(),
|
|
60
|
+
license_url=license_url,
|
|
61
|
+
mpd_url=mpd_url,
|
|
62
|
+
output_path=mp4_path,
|
|
63
|
+
)
|
|
64
|
+
r_proc.parse_manifest(custom_headers=get_headers())
|
|
65
|
+
|
|
66
|
+
if r_proc.download_and_decrypt():
|
|
67
|
+
r_proc.finalize_output()
|
|
68
|
+
|
|
69
|
+
# Get final output path and status
|
|
70
|
+
status = r_proc.get_status()
|
|
71
|
+
|
|
72
|
+
if status['error'] is not None and status['path']:
|
|
73
|
+
try: os.remove(status['path'])
|
|
74
|
+
except Exception: pass
|
|
75
|
+
|
|
76
|
+
return status['path'], status['stopped']
|