StreamingCommunity 1.9.5__py3-none-any.whl → 1.9.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/Helper/Vixcloud/js_parser.py +143 -0
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +145 -0
- StreamingCommunity/Api/Player/ddl.py +89 -0
- StreamingCommunity/Api/Player/maxstream.py +151 -0
- StreamingCommunity/Api/Player/supervideo.py +194 -0
- StreamingCommunity/Api/Player/vixcloud.py +273 -0
- StreamingCommunity/Api/Site/1337xx/__init__.py +51 -0
- StreamingCommunity/Api/Site/1337xx/costant.py +15 -0
- StreamingCommunity/Api/Site/1337xx/site.py +86 -0
- StreamingCommunity/Api/Site/1337xx/title.py +66 -0
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +51 -0
- StreamingCommunity/Api/Site/altadefinizione/costant.py +15 -0
- StreamingCommunity/Api/Site/altadefinizione/film.py +74 -0
- StreamingCommunity/Api/Site/altadefinizione/site.py +89 -0
- StreamingCommunity/Api/Site/animeunity/__init__.py +51 -0
- StreamingCommunity/Api/Site/animeunity/costant.py +15 -0
- StreamingCommunity/Api/Site/animeunity/film_serie.py +135 -0
- StreamingCommunity/Api/Site/animeunity/site.py +167 -0
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +97 -0
- StreamingCommunity/Api/Site/cb01new/__init__.py +52 -0
- StreamingCommunity/Api/Site/cb01new/costant.py +15 -0
- StreamingCommunity/Api/Site/cb01new/film.py +73 -0
- StreamingCommunity/Api/Site/cb01new/site.py +76 -0
- StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +58 -0
- StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +16 -0
- StreamingCommunity/Api/Site/ddlstreamitaly/series.py +146 -0
- StreamingCommunity/Api/Site/ddlstreamitaly/site.py +95 -0
- StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +85 -0
- StreamingCommunity/Api/Site/guardaserie/__init__.py +53 -0
- StreamingCommunity/Api/Site/guardaserie/costant.py +15 -0
- StreamingCommunity/Api/Site/guardaserie/series.py +199 -0
- StreamingCommunity/Api/Site/guardaserie/site.py +86 -0
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +110 -0
- StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +52 -0
- StreamingCommunity/Api/Site/ilcorsaronero/costant.py +15 -0
- StreamingCommunity/Api/Site/ilcorsaronero/site.py +63 -0
- StreamingCommunity/Api/Site/ilcorsaronero/title.py +46 -0
- StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +141 -0
- StreamingCommunity/Api/Site/mostraguarda/__init__.py +49 -0
- StreamingCommunity/Api/Site/mostraguarda/costant.py +15 -0
- StreamingCommunity/Api/Site/mostraguarda/film.py +99 -0
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +56 -0
- StreamingCommunity/Api/Site/streamingcommunity/costant.py +15 -0
- StreamingCommunity/Api/Site/streamingcommunity/film.py +75 -0
- StreamingCommunity/Api/Site/streamingcommunity/series.py +206 -0
- StreamingCommunity/Api/Site/streamingcommunity/site.py +137 -0
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +123 -0
- StreamingCommunity/Api/Template/Class/SearchType.py +101 -0
- StreamingCommunity/Api/Template/Util/__init__.py +5 -0
- StreamingCommunity/Api/Template/Util/get_domain.py +173 -0
- StreamingCommunity/Api/Template/Util/manage_ep.py +179 -0
- StreamingCommunity/Api/Template/Util/recall_search.py +37 -0
- StreamingCommunity/Api/Template/__init__.py +3 -0
- StreamingCommunity/Api/Template/site.py +87 -0
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +946 -0
- StreamingCommunity/Lib/Downloader/HLS/proxyes.py +110 -0
- StreamingCommunity/Lib/Downloader/HLS/segments.py +561 -0
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +155 -0
- StreamingCommunity/Lib/Downloader/TOR/downloader.py +296 -0
- StreamingCommunity/Lib/Downloader/__init__.py +5 -0
- StreamingCommunity/Lib/FFmpeg/__init__.py +4 -0
- StreamingCommunity/Lib/FFmpeg/capture.py +170 -0
- StreamingCommunity/Lib/FFmpeg/command.py +296 -0
- StreamingCommunity/Lib/FFmpeg/util.py +249 -0
- StreamingCommunity/Lib/M3U8/__init__.py +6 -0
- StreamingCommunity/Lib/M3U8/decryptor.py +164 -0
- StreamingCommunity/Lib/M3U8/estimator.py +176 -0
- StreamingCommunity/Lib/M3U8/parser.py +666 -0
- StreamingCommunity/Lib/M3U8/url_fixer.py +52 -0
- StreamingCommunity/Lib/TMBD/__init__.py +2 -0
- StreamingCommunity/Lib/TMBD/obj_tmbd.py +39 -0
- StreamingCommunity/Lib/TMBD/tmdb.py +346 -0
- StreamingCommunity/Upload/update.py +68 -0
- StreamingCommunity/Upload/version.py +5 -0
- StreamingCommunity/Util/_jsonConfig.py +204 -0
- StreamingCommunity/Util/call_stack.py +42 -0
- StreamingCommunity/Util/color.py +20 -0
- StreamingCommunity/Util/console.py +12 -0
- StreamingCommunity/Util/ffmpeg_installer.py +311 -0
- StreamingCommunity/Util/headers.py +147 -0
- StreamingCommunity/Util/logger.py +53 -0
- StreamingCommunity/Util/message.py +64 -0
- StreamingCommunity/Util/os.py +554 -0
- StreamingCommunity/Util/table.py +229 -0
- StreamingCommunity/__init__.py +0 -0
- StreamingCommunity/run.py +2 -11
- {StreamingCommunity-1.9.5.dist-info → StreamingCommunity-1.9.90.dist-info}/METADATA +10 -27
- StreamingCommunity-1.9.90.dist-info/RECORD +92 -0
- {StreamingCommunity-1.9.5.dist-info → StreamingCommunity-1.9.90.dist-info}/WHEEL +1 -1
- {StreamingCommunity-1.9.5.dist-info → StreamingCommunity-1.9.90.dist-info}/entry_points.txt +0 -1
- StreamingCommunity-1.9.5.dist-info/RECORD +0 -7
- {StreamingCommunity-1.9.5.dist-info → StreamingCommunity-1.9.90.dist-info}/LICENSE +0 -0
- {StreamingCommunity-1.9.5.dist-info → StreamingCommunity-1.9.90.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
# 26.05.24
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# External libraries
|
|
8
|
+
import httpx
|
|
9
|
+
import jsbeautifier
|
|
10
|
+
from bs4 import BeautifulSoup
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Internal utilities
|
|
14
|
+
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
15
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Variable
|
|
19
|
+
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VideoSource:
|
|
23
|
+
def __init__(self, url: str) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Initializes the VideoSource object with default values.
|
|
26
|
+
|
|
27
|
+
Attributes:
|
|
28
|
+
- url (str): The URL of the video source.
|
|
29
|
+
"""
|
|
30
|
+
self.headers = {
|
|
31
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
|
|
32
|
+
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
|
33
|
+
'User-Agent': get_headers()
|
|
34
|
+
}
|
|
35
|
+
self.client = httpx.Client()
|
|
36
|
+
self.url = url
|
|
37
|
+
|
|
38
|
+
def make_request(self, url: str) -> str:
|
|
39
|
+
"""
|
|
40
|
+
Make an HTTP GET request to the provided URL.
|
|
41
|
+
|
|
42
|
+
Parameters:
|
|
43
|
+
- url (str): The URL to make the request to.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
- str: The response content if successful, None otherwise.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
response = self.client.get(
|
|
51
|
+
url=url,
|
|
52
|
+
headers=self.headers,
|
|
53
|
+
follow_redirects=True,
|
|
54
|
+
timeout=max_timeout
|
|
55
|
+
)
|
|
56
|
+
response.raise_for_status()
|
|
57
|
+
return response.text
|
|
58
|
+
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logging.error(f"Request failed: {e}")
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
def parse_html(self, html_content: str) -> BeautifulSoup:
|
|
64
|
+
"""
|
|
65
|
+
Parse the provided HTML content using BeautifulSoup.
|
|
66
|
+
|
|
67
|
+
Parameters:
|
|
68
|
+
- html_content (str): The HTML content to parse.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
- BeautifulSoup: Parsed HTML content if successful, None otherwise.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
soup = BeautifulSoup(html_content, "html.parser")
|
|
76
|
+
return soup
|
|
77
|
+
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logging.error(f"Failed to parse HTML content: {e}")
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
def get_iframe(self, soup):
|
|
83
|
+
"""
|
|
84
|
+
Extracts the source URL of the second iframe in the provided BeautifulSoup object.
|
|
85
|
+
|
|
86
|
+
Parameters:
|
|
87
|
+
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
- str: The source URL of the second iframe, or None if not found.
|
|
91
|
+
"""
|
|
92
|
+
iframes = soup.find_all("iframe")
|
|
93
|
+
if iframes and len(iframes) > 1:
|
|
94
|
+
return iframes[1].get("src")
|
|
95
|
+
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
def find_content(self, url):
|
|
99
|
+
"""
|
|
100
|
+
Makes a request to the specified URL and parses the HTML content.
|
|
101
|
+
|
|
102
|
+
Parameters:
|
|
103
|
+
- url (str): The URL to fetch content from.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
- BeautifulSoup: A BeautifulSoup object representing the parsed HTML content, or None if the request fails.
|
|
107
|
+
"""
|
|
108
|
+
content = self.make_request(url)
|
|
109
|
+
if content:
|
|
110
|
+
return self.parse_html(content)
|
|
111
|
+
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
def get_result_node_js(self, soup):
|
|
115
|
+
"""
|
|
116
|
+
Prepares and runs a Node.js script from the provided BeautifulSoup object to retrieve the video URL.
|
|
117
|
+
|
|
118
|
+
Parameters:
|
|
119
|
+
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML content.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
- str: The output from the Node.js script, or None if the script cannot be found or executed.
|
|
123
|
+
"""
|
|
124
|
+
for script in soup.find_all("script"):
|
|
125
|
+
if "eval" in str(script):
|
|
126
|
+
return jsbeautifier.beautify(script.text)
|
|
127
|
+
|
|
128
|
+
return None
|
|
129
|
+
|
|
130
|
+
def get_playlist(self) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Download a video from the provided URL.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
str: The URL of the downloaded video if successful, None otherwise.
|
|
136
|
+
"""
|
|
137
|
+
try:
|
|
138
|
+
html_content = self.make_request(self.url)
|
|
139
|
+
if not html_content:
|
|
140
|
+
logging.error("Failed to fetch HTML content.")
|
|
141
|
+
return None
|
|
142
|
+
|
|
143
|
+
soup = self.parse_html(html_content)
|
|
144
|
+
if not soup:
|
|
145
|
+
logging.error("Failed to parse HTML content.")
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
# Find master playlist
|
|
149
|
+
data_js = self.get_result_node_js(soup)
|
|
150
|
+
|
|
151
|
+
if data_js is not None:
|
|
152
|
+
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
|
|
153
|
+
|
|
154
|
+
if match:
|
|
155
|
+
return match.group(1)
|
|
156
|
+
|
|
157
|
+
else:
|
|
158
|
+
|
|
159
|
+
iframe_src = self.get_iframe(soup)
|
|
160
|
+
if not iframe_src:
|
|
161
|
+
logging.error("No iframe found.")
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
down_page_soup = self.find_content(iframe_src)
|
|
165
|
+
if not down_page_soup:
|
|
166
|
+
logging.error("Failed to fetch down page content.")
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
pattern = r'data-link="(//supervideo[^"]+)"'
|
|
170
|
+
match = re.search(pattern, str(down_page_soup))
|
|
171
|
+
if not match:
|
|
172
|
+
logging.error("No player available for download.")
|
|
173
|
+
return None
|
|
174
|
+
|
|
175
|
+
supervideo_url = "https:" + match.group(1)
|
|
176
|
+
supervideo_soup = self.find_content(supervideo_url)
|
|
177
|
+
if not supervideo_soup:
|
|
178
|
+
logging.error("Failed to fetch supervideo content.")
|
|
179
|
+
return None
|
|
180
|
+
|
|
181
|
+
# Find master playlist
|
|
182
|
+
data_js = self.get_result_node_js(supervideo_soup)
|
|
183
|
+
|
|
184
|
+
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
|
|
185
|
+
|
|
186
|
+
if match:
|
|
187
|
+
return match.group(1)
|
|
188
|
+
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
logging.error(f"An error occurred: {e}")
|
|
193
|
+
return None
|
|
194
|
+
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
# 01.03.24
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import logging
|
|
5
|
+
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# External libraries
|
|
9
|
+
import httpx
|
|
10
|
+
from bs4 import BeautifulSoup
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Internal utilities
|
|
14
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
15
|
+
from StreamingCommunity.Util.console import console
|
|
16
|
+
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
17
|
+
from .Helper.Vixcloud.util import WindowVideo, WindowParameter, StreamsCollection
|
|
18
|
+
from .Helper.Vixcloud.js_parser import JavaScriptParser
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Variable
|
|
22
|
+
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class VideoSource:
|
|
26
|
+
def __init__(self, site_name: str, is_series: bool):
|
|
27
|
+
"""
|
|
28
|
+
Initialize video source for streaming site.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
site_name (str): Name of streaming site
|
|
32
|
+
is_series (bool): Flag for series or movie content
|
|
33
|
+
"""
|
|
34
|
+
self.headers = {'user-agent': get_headers()}
|
|
35
|
+
self.base_name = site_name
|
|
36
|
+
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
|
|
37
|
+
self.is_series = is_series
|
|
38
|
+
|
|
39
|
+
def setup(self, media_id: int):
|
|
40
|
+
"""
|
|
41
|
+
Configure media-specific context.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
media_id (int): Unique identifier for media item
|
|
45
|
+
"""
|
|
46
|
+
self.media_id = media_id
|
|
47
|
+
|
|
48
|
+
def get_iframe(self, episode_id: int) -> None:
|
|
49
|
+
"""
|
|
50
|
+
Retrieve iframe source for specified episode.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
episode_id (int): Unique identifier for episode
|
|
54
|
+
"""
|
|
55
|
+
params = {}
|
|
56
|
+
|
|
57
|
+
if self.is_series:
|
|
58
|
+
params = {
|
|
59
|
+
'episode_id': episode_id,
|
|
60
|
+
'next_episode': '1'
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
try:
|
|
64
|
+
|
|
65
|
+
# Make a request to get iframe source
|
|
66
|
+
response = httpx.get(
|
|
67
|
+
url=f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}",
|
|
68
|
+
params=params,
|
|
69
|
+
timeout=max_timeout
|
|
70
|
+
)
|
|
71
|
+
response.raise_for_status()
|
|
72
|
+
|
|
73
|
+
# Parse response with BeautifulSoup to get iframe source
|
|
74
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
75
|
+
self.iframe_src = soup.find("iframe").get("src")
|
|
76
|
+
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logging.error(f"Error getting iframe source: {e}")
|
|
79
|
+
raise
|
|
80
|
+
|
|
81
|
+
def parse_script(self, script_text: str) -> None:
|
|
82
|
+
"""
|
|
83
|
+
Convert raw script to structured video metadata.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
script_text (str): Raw JavaScript/HTML script content
|
|
87
|
+
"""
|
|
88
|
+
try:
|
|
89
|
+
converter = JavaScriptParser.parse(js_string=str(script_text))
|
|
90
|
+
|
|
91
|
+
# Create window video, streams and parameter objects
|
|
92
|
+
self.canPlayFHD = bool(converter.get('canPlayFHD'))
|
|
93
|
+
self.window_video = WindowVideo(converter.get('video'))
|
|
94
|
+
self.window_streams = StreamsCollection(converter.get('streams'))
|
|
95
|
+
self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logging.error(f"Error parsing script: {e}")
|
|
99
|
+
raise
|
|
100
|
+
|
|
101
|
+
def get_content(self) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Fetch and process video content from iframe source.
|
|
104
|
+
|
|
105
|
+
Workflow:
|
|
106
|
+
- Validate iframe source
|
|
107
|
+
- Retrieve content
|
|
108
|
+
- Parse embedded script
|
|
109
|
+
"""
|
|
110
|
+
try:
|
|
111
|
+
if self.iframe_src is not None:
|
|
112
|
+
|
|
113
|
+
# Make a request to get content
|
|
114
|
+
try:
|
|
115
|
+
response = httpx.get(
|
|
116
|
+
url=self.iframe_src,
|
|
117
|
+
headers=self.headers,
|
|
118
|
+
timeout=max_timeout
|
|
119
|
+
)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
|
|
122
|
+
except Exception as e:
|
|
123
|
+
logging.error(f"Failed to get vixcloud contente with error: {e}")
|
|
124
|
+
sys.exit(0)
|
|
125
|
+
|
|
126
|
+
# Parse response with BeautifulSoup to get content
|
|
127
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
128
|
+
script = soup.find("body").find("script").text
|
|
129
|
+
|
|
130
|
+
# Parse script to get video information
|
|
131
|
+
self.parse_script(script_text=script)
|
|
132
|
+
|
|
133
|
+
except Exception as e:
|
|
134
|
+
logging.error(f"Error getting content: {e}")
|
|
135
|
+
raise
|
|
136
|
+
|
|
137
|
+
def get_playlist(self) -> str:
|
|
138
|
+
"""
|
|
139
|
+
Generate authenticated playlist URL.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
str: Fully constructed playlist URL with authentication parameters
|
|
143
|
+
"""
|
|
144
|
+
# Initialize parameters dictionary
|
|
145
|
+
params = {}
|
|
146
|
+
|
|
147
|
+
# Add 'h' parameter if video quality is 1080p
|
|
148
|
+
if self.canPlayFHD:
|
|
149
|
+
params['h'] = 1
|
|
150
|
+
|
|
151
|
+
# Parse the original URL
|
|
152
|
+
parsed_url = urlparse(self.window_parameter.url)
|
|
153
|
+
query_params = parse_qs(parsed_url.query)
|
|
154
|
+
|
|
155
|
+
# Check specifically for 'b=1' in the query parameters
|
|
156
|
+
if 'b' in query_params and query_params['b'] == ['1']:
|
|
157
|
+
params['b'] = 1
|
|
158
|
+
|
|
159
|
+
# Add authentication parameters (token and expiration)
|
|
160
|
+
params.update({
|
|
161
|
+
"token": self.window_parameter.token,
|
|
162
|
+
"expires": self.window_parameter.expires
|
|
163
|
+
})
|
|
164
|
+
|
|
165
|
+
# Build the updated query string
|
|
166
|
+
query_string = urlencode(params)
|
|
167
|
+
|
|
168
|
+
# Construct the new URL with updated query parameters
|
|
169
|
+
return urlunparse(parsed_url._replace(query=query_string))
|
|
170
|
+
|
|
171
|
+
def get_mp4(self, url_to_download: str, scws_id: str) -> list:
|
|
172
|
+
"""
|
|
173
|
+
Generate download links for the specified resolutions from StreamingCommunity.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
url_to_download (str): URL of the video page.
|
|
177
|
+
scws_id (str): SCWS ID of the title.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
list: A list of video download URLs.
|
|
181
|
+
"""
|
|
182
|
+
headers = {
|
|
183
|
+
'referer': url_to_download,
|
|
184
|
+
'user-agent': get_headers(),
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
# API request to get video details
|
|
188
|
+
video_api_url = f'https://{self.base_name}.{self.domain}/api/video/{scws_id}'
|
|
189
|
+
response = httpx.get(video_api_url, headers=headers)
|
|
190
|
+
|
|
191
|
+
if response.status_code == 200:
|
|
192
|
+
response_json = response.json()
|
|
193
|
+
|
|
194
|
+
video_tracks = response_json.get('video_tracks', [])
|
|
195
|
+
track = video_tracks[-1]
|
|
196
|
+
console.print(f"[cyan]Available resolutions: [red]{[str(track['quality']) for track in video_tracks]}")
|
|
197
|
+
|
|
198
|
+
# Request download link generation for each track
|
|
199
|
+
download_response = httpx.post(
|
|
200
|
+
url=f'https://{self.base_name}.{self.domain}/api/download/generate_link?scws_id={track["video_id"]}&rendition={track["quality"]}',
|
|
201
|
+
headers={
|
|
202
|
+
'referer': url_to_download,
|
|
203
|
+
'user-agent': get_headers(),
|
|
204
|
+
'x-xsrf-token': config_manager.get("SITE", self.base_name)['extra']['x-xsrf-token']
|
|
205
|
+
},
|
|
206
|
+
cookies={
|
|
207
|
+
'streamingcommunity_session': config_manager.get("SITE", self.base_name)['extra']['streamingcommunity_session']
|
|
208
|
+
}
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
if download_response.status_code == 200:
|
|
212
|
+
return {'url': download_response.text, 'quality': track["quality"]}
|
|
213
|
+
|
|
214
|
+
else:
|
|
215
|
+
logging.error(f"Failed to generate link for resolution {track['quality']} (HTTP {download_response.status_code}).")
|
|
216
|
+
|
|
217
|
+
else:
|
|
218
|
+
logging.error(f"Error fetching video API URL (HTTP {response.status_code}).")
|
|
219
|
+
return []
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class VideoSourceAnime(VideoSource):
|
|
223
|
+
def __init__(self, site_name: str):
|
|
224
|
+
"""
|
|
225
|
+
Initialize anime-specific video source.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
site_name (str): Name of anime streaming site
|
|
229
|
+
|
|
230
|
+
Extends base VideoSource with anime-specific initialization
|
|
231
|
+
"""
|
|
232
|
+
self.headers = {'user-agent': get_headers()}
|
|
233
|
+
self.base_name = site_name
|
|
234
|
+
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
|
|
235
|
+
self.src_mp4 = None
|
|
236
|
+
|
|
237
|
+
def get_embed(self, episode_id: int):
|
|
238
|
+
"""
|
|
239
|
+
Retrieve embed URL and extract video source.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
episode_id (int): Unique identifier for episode
|
|
243
|
+
|
|
244
|
+
Returns:
|
|
245
|
+
str: Parsed script content
|
|
246
|
+
"""
|
|
247
|
+
try:
|
|
248
|
+
|
|
249
|
+
response = httpx.get(
|
|
250
|
+
url=f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}",
|
|
251
|
+
headers=self.headers,
|
|
252
|
+
timeout=max_timeout
|
|
253
|
+
)
|
|
254
|
+
response.raise_for_status()
|
|
255
|
+
|
|
256
|
+
# Extract and clean embed URL
|
|
257
|
+
embed_url = response.text.strip()
|
|
258
|
+
self.iframe_src = embed_url
|
|
259
|
+
|
|
260
|
+
# Fetch video content using embed URL
|
|
261
|
+
video_response = httpx.get(embed_url)
|
|
262
|
+
video_response.raise_for_status()
|
|
263
|
+
|
|
264
|
+
# Parse response with BeautifulSoup to get content of the scriot
|
|
265
|
+
soup = BeautifulSoup(video_response.text, "html.parser")
|
|
266
|
+
script = soup.find("body").find("script").text
|
|
267
|
+
self.src_mp4 = soup.find("body").find_all("script")[1].text.split(" = ")[1].replace("'", "")
|
|
268
|
+
|
|
269
|
+
return script
|
|
270
|
+
|
|
271
|
+
except Exception as e:
|
|
272
|
+
logging.error(f"Error fetching embed URL: {e}")
|
|
273
|
+
return None
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# 02.07.24
|
|
2
|
+
|
|
3
|
+
from urllib.parse import quote_plus
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Internal utilities
|
|
7
|
+
from StreamingCommunity.Util.console import console, msg
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Logic class
|
|
11
|
+
from .site import title_search, run_get_select_title, media_search_manager
|
|
12
|
+
from .title import download_title
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Variable
|
|
16
|
+
indice = 8
|
|
17
|
+
_useFor = "film_serie"
|
|
18
|
+
_deprecate = False
|
|
19
|
+
_priority = 2
|
|
20
|
+
_engineDownload = "tor"
|
|
21
|
+
from .costant import SITE_NAME
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def search(string_to_search: str = None, get_onylDatabase: bool = False):
|
|
25
|
+
"""
|
|
26
|
+
Main function of the application for film and series.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
if string_to_search is None:
|
|
30
|
+
string_to_search = msg.ask(f"\n[purple]Insert word to search in [red]{SITE_NAME}").strip()
|
|
31
|
+
|
|
32
|
+
# Search on database
|
|
33
|
+
len_database = title_search(quote_plus(string_to_search))
|
|
34
|
+
|
|
35
|
+
# Return list of elements
|
|
36
|
+
if get_onylDatabase:
|
|
37
|
+
return media_search_manager
|
|
38
|
+
|
|
39
|
+
if len_database > 0:
|
|
40
|
+
|
|
41
|
+
# Select title from list
|
|
42
|
+
select_title = run_get_select_title()
|
|
43
|
+
|
|
44
|
+
# Download title
|
|
45
|
+
download_title(select_title)
|
|
46
|
+
|
|
47
|
+
else:
|
|
48
|
+
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
49
|
+
|
|
50
|
+
# Retry
|
|
51
|
+
search()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# 09.06.24
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Internal utilities
|
|
7
|
+
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
|
|
11
|
+
ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
|
|
12
|
+
DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
|
|
13
|
+
|
|
14
|
+
SERIES_FOLDER = config_manager.get('DEFAULT', 'serie_folder_name')
|
|
15
|
+
MOVIE_FOLDER = config_manager.get('DEFAULT', 'movie_folder_name')
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# 02.07.24
|
|
2
|
+
|
|
3
|
+
# External libraries
|
|
4
|
+
import httpx
|
|
5
|
+
from bs4 import BeautifulSoup
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Internal utilities
|
|
9
|
+
from StreamingCommunity.Util.console import console
|
|
10
|
+
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
11
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
12
|
+
from StreamingCommunity.Util.table import TVShowManager
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Logic class
|
|
16
|
+
from StreamingCommunity.Api.Template import get_select_title
|
|
17
|
+
from StreamingCommunity.Api.Template.Util import search_domain
|
|
18
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Variable
|
|
22
|
+
from .costant import SITE_NAME
|
|
23
|
+
media_search_manager = MediaManager()
|
|
24
|
+
table_show_manager = TVShowManager()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def title_search(word_to_search: str) -> int:
|
|
28
|
+
"""
|
|
29
|
+
Search for titles based on a search query.
|
|
30
|
+
|
|
31
|
+
Parameters:
|
|
32
|
+
- title_search (str): The title to search for.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
- int: The number of titles found.
|
|
36
|
+
"""
|
|
37
|
+
media_search_manager.clear()
|
|
38
|
+
table_show_manager.clear()
|
|
39
|
+
|
|
40
|
+
# Find new domain if prev dont work
|
|
41
|
+
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
42
|
+
domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
|
|
43
|
+
|
|
44
|
+
# Construct the full site URL and load the search page
|
|
45
|
+
try:
|
|
46
|
+
response = httpx.get(
|
|
47
|
+
url=f"https://{SITE_NAME}.{domain_to_use}/search/{word_to_search}/1/",
|
|
48
|
+
headers={'user-agent': get_headers()},
|
|
49
|
+
follow_redirects=True,
|
|
50
|
+
timeout=max_timeout
|
|
51
|
+
)
|
|
52
|
+
response.raise_for_status()
|
|
53
|
+
|
|
54
|
+
except Exception as e:
|
|
55
|
+
console.print(f"Site: {SITE_NAME}, request search error: {e}")
|
|
56
|
+
|
|
57
|
+
# Create soup and find table
|
|
58
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
59
|
+
|
|
60
|
+
# Scrape div film in table on single page
|
|
61
|
+
for tr in soup.find_all('tr'):
|
|
62
|
+
try:
|
|
63
|
+
|
|
64
|
+
title_info = {
|
|
65
|
+
'name': tr.find_all("a")[1].get_text(strip=True),
|
|
66
|
+
'url': tr.find_all("a")[1].get("href"),
|
|
67
|
+
'seader': tr.find_all("td")[-5].get_text(strip=True),
|
|
68
|
+
'leacher': tr.find_all("td")[-4].get_text(strip=True),
|
|
69
|
+
'date': tr.find_all("td")[-3].get_text(strip=True).replace("'", ""),
|
|
70
|
+
'size': tr.find_all("td")[-2].get_text(strip=True)
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
media_search_manager.add_media(title_info)
|
|
74
|
+
|
|
75
|
+
except:
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
# Return the number of titles found
|
|
79
|
+
return media_search_manager.get_length()
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def run_get_select_title():
|
|
83
|
+
"""
|
|
84
|
+
Display a selection of titles and prompt the user to choose one.
|
|
85
|
+
"""
|
|
86
|
+
return get_select_title(table_show_manager, media_search_manager)
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# 02.07.24
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External libraries
|
|
7
|
+
import httpx
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Internal utilities
|
|
12
|
+
from StreamingCommunity.Util.console import console
|
|
13
|
+
from StreamingCommunity.Util.os import os_manager
|
|
14
|
+
from StreamingCommunity.Util.message import start_message
|
|
15
|
+
from StreamingCommunity.Util.headers import get_headers
|
|
16
|
+
from StreamingCommunity.Lib.Downloader import TOR_downloader
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Logic class
|
|
20
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Config
|
|
24
|
+
from .costant import ROOT_PATH, DOMAIN_NOW, SITE_NAME, MOVIE_FOLDER
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def download_title(select_title: MediaItem):
|
|
28
|
+
"""
|
|
29
|
+
Downloads a media item and saves it as an MP4 file.
|
|
30
|
+
|
|
31
|
+
Parameters:
|
|
32
|
+
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
start_message()
|
|
36
|
+
console.print(f"[yellow]Download: [red]{select_title.name} \n")
|
|
37
|
+
print()
|
|
38
|
+
|
|
39
|
+
# Define output path
|
|
40
|
+
title_name = os_manager.get_sanitize_file(select_title.name)
|
|
41
|
+
mp4_path = os_manager.get_sanitize_path(
|
|
42
|
+
os.path.join(ROOT_PATH, MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Create output folder
|
|
46
|
+
os_manager.create_path(mp4_path)
|
|
47
|
+
|
|
48
|
+
# Make request to page with magnet
|
|
49
|
+
full_site_name = f"{SITE_NAME}.{DOMAIN_NOW}"
|
|
50
|
+
response = httpx.get(
|
|
51
|
+
url="https://" + full_site_name + select_title.url,
|
|
52
|
+
headers={
|
|
53
|
+
'user-agent': get_headers()
|
|
54
|
+
},
|
|
55
|
+
follow_redirects=True
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Create soup and find table
|
|
59
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
60
|
+
final_url = soup.find("a", class_="torrentdown1").get("href")
|
|
61
|
+
|
|
62
|
+
# Tor manager
|
|
63
|
+
manager = TOR_downloader()
|
|
64
|
+
manager.add_magnet_link(final_url)
|
|
65
|
+
manager.start_download()
|
|
66
|
+
manager.move_downloaded_files(mp4_path)
|