StreamingCommunity 2.3.0__py3-none-any.whl → 2.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/run.py +61 -7
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/METADATA +88 -18
- StreamingCommunity-2.5.0.dist-info/RECORD +8 -0
- StreamingCommunity/Api/Player/Helper/Vixcloud/js_parser.py +0 -143
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +0 -136
- StreamingCommunity/Api/Player/ddl.py +0 -89
- StreamingCommunity/Api/Player/maxstream.py +0 -151
- StreamingCommunity/Api/Player/supervideo.py +0 -194
- StreamingCommunity/Api/Player/vixcloud.py +0 -273
- StreamingCommunity/Api/Site/1337xx/__init__.py +0 -51
- StreamingCommunity/Api/Site/1337xx/costant.py +0 -15
- StreamingCommunity/Api/Site/1337xx/site.py +0 -89
- StreamingCommunity/Api/Site/1337xx/title.py +0 -66
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +0 -51
- StreamingCommunity/Api/Site/altadefinizione/costant.py +0 -19
- StreamingCommunity/Api/Site/altadefinizione/film.py +0 -74
- StreamingCommunity/Api/Site/altadefinizione/site.py +0 -95
- StreamingCommunity/Api/Site/animeunity/__init__.py +0 -51
- StreamingCommunity/Api/Site/animeunity/costant.py +0 -19
- StreamingCommunity/Api/Site/animeunity/film_serie.py +0 -135
- StreamingCommunity/Api/Site/animeunity/site.py +0 -175
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +0 -97
- StreamingCommunity/Api/Site/cb01new/__init__.py +0 -52
- StreamingCommunity/Api/Site/cb01new/costant.py +0 -19
- StreamingCommunity/Api/Site/cb01new/film.py +0 -73
- StreamingCommunity/Api/Site/cb01new/site.py +0 -83
- StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +0 -56
- StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +0 -20
- StreamingCommunity/Api/Site/ddlstreamitaly/series.py +0 -146
- StreamingCommunity/Api/Site/ddlstreamitaly/site.py +0 -99
- StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +0 -85
- StreamingCommunity/Api/Site/guardaserie/__init__.py +0 -51
- StreamingCommunity/Api/Site/guardaserie/costant.py +0 -19
- StreamingCommunity/Api/Site/guardaserie/series.py +0 -198
- StreamingCommunity/Api/Site/guardaserie/site.py +0 -90
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +0 -110
- StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +0 -52
- StreamingCommunity/Api/Site/ilcorsaronero/costant.py +0 -19
- StreamingCommunity/Api/Site/ilcorsaronero/site.py +0 -72
- StreamingCommunity/Api/Site/ilcorsaronero/title.py +0 -46
- StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +0 -149
- StreamingCommunity/Api/Site/mostraguarda/__init__.py +0 -49
- StreamingCommunity/Api/Site/mostraguarda/costant.py +0 -19
- StreamingCommunity/Api/Site/mostraguarda/film.py +0 -101
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +0 -56
- StreamingCommunity/Api/Site/streamingcommunity/costant.py +0 -19
- StreamingCommunity/Api/Site/streamingcommunity/film.py +0 -75
- StreamingCommunity/Api/Site/streamingcommunity/series.py +0 -206
- StreamingCommunity/Api/Site/streamingcommunity/site.py +0 -139
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +0 -123
- StreamingCommunity/Api/Template/Class/SearchType.py +0 -101
- StreamingCommunity/Api/Template/Util/__init__.py +0 -5
- StreamingCommunity/Api/Template/Util/get_domain.py +0 -137
- StreamingCommunity/Api/Template/Util/manage_ep.py +0 -179
- StreamingCommunity/Api/Template/Util/recall_search.py +0 -37
- StreamingCommunity/Api/Template/__init__.py +0 -3
- StreamingCommunity/Api/Template/site.py +0 -87
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +0 -955
- StreamingCommunity/Lib/Downloader/HLS/proxyes.py +0 -110
- StreamingCommunity/Lib/Downloader/HLS/segments.py +0 -564
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -155
- StreamingCommunity/Lib/Downloader/TOR/downloader.py +0 -296
- StreamingCommunity/Lib/Downloader/__init__.py +0 -5
- StreamingCommunity/Lib/FFmpeg/__init__.py +0 -4
- StreamingCommunity/Lib/FFmpeg/capture.py +0 -170
- StreamingCommunity/Lib/FFmpeg/command.py +0 -296
- StreamingCommunity/Lib/FFmpeg/util.py +0 -249
- StreamingCommunity/Lib/M3U8/__init__.py +0 -6
- StreamingCommunity/Lib/M3U8/decryptor.py +0 -164
- StreamingCommunity/Lib/M3U8/estimator.py +0 -229
- StreamingCommunity/Lib/M3U8/parser.py +0 -666
- StreamingCommunity/Lib/M3U8/url_fixer.py +0 -52
- StreamingCommunity/Lib/TMBD/__init__.py +0 -2
- StreamingCommunity/Lib/TMBD/obj_tmbd.py +0 -39
- StreamingCommunity/Lib/TMBD/tmdb.py +0 -346
- StreamingCommunity/Upload/update.py +0 -67
- StreamingCommunity/Upload/version.py +0 -5
- StreamingCommunity/Util/_jsonConfig.py +0 -204
- StreamingCommunity/Util/call_stack.py +0 -42
- StreamingCommunity/Util/color.py +0 -20
- StreamingCommunity/Util/console.py +0 -12
- StreamingCommunity/Util/ffmpeg_installer.py +0 -351
- StreamingCommunity/Util/headers.py +0 -147
- StreamingCommunity/Util/logger.py +0 -53
- StreamingCommunity/Util/message.py +0 -64
- StreamingCommunity/Util/os.py +0 -545
- StreamingCommunity/Util/table.py +0 -229
- StreamingCommunity-2.3.0.dist-info/RECORD +0 -92
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/LICENSE +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/WHEEL +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/entry_points.txt +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/top_level.txt +0 -0
|
@@ -1,151 +0,0 @@
|
|
|
1
|
-
# 05.07.24
|
|
2
|
-
|
|
3
|
-
import re
|
|
4
|
-
import logging
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# External libraries
|
|
8
|
-
import httpx
|
|
9
|
-
import jsbeautifier
|
|
10
|
-
from bs4 import BeautifulSoup
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Internal utilities
|
|
14
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
15
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
# Variable
|
|
19
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class VideoSource:
|
|
23
|
-
def __init__(self, url: str):
|
|
24
|
-
"""
|
|
25
|
-
Sets up the video source with the provided URL.
|
|
26
|
-
|
|
27
|
-
Parameters:
|
|
28
|
-
- url (str): The URL of the video.
|
|
29
|
-
"""
|
|
30
|
-
self.url = url
|
|
31
|
-
self.redirect_url = None
|
|
32
|
-
self.maxstream_url = None
|
|
33
|
-
self.m3u8_url = None
|
|
34
|
-
self.headers = {'user-agent': get_headers()}
|
|
35
|
-
|
|
36
|
-
def get_redirect_url(self):
|
|
37
|
-
"""
|
|
38
|
-
Sends a request to the initial URL and extracts the redirect URL.
|
|
39
|
-
"""
|
|
40
|
-
try:
|
|
41
|
-
|
|
42
|
-
# Send a GET request to the initial URL
|
|
43
|
-
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
|
|
44
|
-
response.raise_for_status()
|
|
45
|
-
|
|
46
|
-
# Extract the redirect URL from the HTML
|
|
47
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
48
|
-
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
|
|
49
|
-
logging.info(f"Redirect URL: {self.redirect_url}")
|
|
50
|
-
|
|
51
|
-
return self.redirect_url
|
|
52
|
-
|
|
53
|
-
except httpx.RequestError as e:
|
|
54
|
-
logging.error(f"Error during the initial request: {e}")
|
|
55
|
-
raise
|
|
56
|
-
|
|
57
|
-
except AttributeError as e:
|
|
58
|
-
logging.error(f"Error parsing HTML: {e}")
|
|
59
|
-
raise
|
|
60
|
-
|
|
61
|
-
def get_maxstream_url(self):
|
|
62
|
-
"""
|
|
63
|
-
Sends a request to the redirect URL and extracts the Maxstream URL.
|
|
64
|
-
"""
|
|
65
|
-
try:
|
|
66
|
-
|
|
67
|
-
# Send a GET request to the redirect URL
|
|
68
|
-
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
|
|
69
|
-
response.raise_for_status()
|
|
70
|
-
|
|
71
|
-
# Extract the Maxstream URL from the HTML
|
|
72
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
73
|
-
maxstream_url = soup.find("a")
|
|
74
|
-
|
|
75
|
-
if maxstream_url is None:
|
|
76
|
-
|
|
77
|
-
# If no anchor tag is found, try the alternative method
|
|
78
|
-
logging.warning("Anchor tag not found. Trying the alternative method.")
|
|
79
|
-
headers = {
|
|
80
|
-
'origin': 'https://stayonline.pro',
|
|
81
|
-
'user-agent': get_headers(),
|
|
82
|
-
'x-requested-with': 'XMLHttpRequest',
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
# Make request to stayonline api
|
|
86
|
-
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
|
|
87
|
-
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
|
|
88
|
-
response.raise_for_status()
|
|
89
|
-
uprot_url = response.json()['data']['value']
|
|
90
|
-
|
|
91
|
-
# Retry getting maxtstream url
|
|
92
|
-
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
|
|
93
|
-
response.raise_for_status()
|
|
94
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
95
|
-
maxstream_url = soup.find("a").get("href")
|
|
96
|
-
|
|
97
|
-
else:
|
|
98
|
-
maxstream_url = maxstream_url.get("href")
|
|
99
|
-
|
|
100
|
-
self.maxstream_url = maxstream_url
|
|
101
|
-
logging.info(f"Maxstream URL: {self.maxstream_url}")
|
|
102
|
-
|
|
103
|
-
return self.maxstream_url
|
|
104
|
-
|
|
105
|
-
except httpx.RequestError as e:
|
|
106
|
-
logging.error(f"Error during the request to the redirect URL: {e}")
|
|
107
|
-
raise
|
|
108
|
-
|
|
109
|
-
except AttributeError as e:
|
|
110
|
-
logging.error(f"Error parsing HTML: {e}")
|
|
111
|
-
raise
|
|
112
|
-
|
|
113
|
-
def get_m3u8_url(self):
|
|
114
|
-
"""
|
|
115
|
-
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
|
|
116
|
-
"""
|
|
117
|
-
try:
|
|
118
|
-
|
|
119
|
-
# Send a GET request to the Maxstream URL
|
|
120
|
-
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
|
|
121
|
-
response.raise_for_status()
|
|
122
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
123
|
-
|
|
124
|
-
# Iterate over all script tags in the HTML
|
|
125
|
-
for script in soup.find_all("script"):
|
|
126
|
-
if "eval(function(p,a,c,k,e,d)" in script.text:
|
|
127
|
-
|
|
128
|
-
# Execute the script using
|
|
129
|
-
data_js = jsbeautifier.beautify(script.text)
|
|
130
|
-
|
|
131
|
-
# Extract the .m3u8 URL from the script's output
|
|
132
|
-
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
|
|
133
|
-
|
|
134
|
-
if match:
|
|
135
|
-
self.m3u8_url = match.group(1)
|
|
136
|
-
logging.info(f"M3U8 URL: {self.m3u8_url}")
|
|
137
|
-
break
|
|
138
|
-
|
|
139
|
-
return self.m3u8_url
|
|
140
|
-
|
|
141
|
-
except Exception as e:
|
|
142
|
-
logging.error(f"Error executing the Node.js script: {e}")
|
|
143
|
-
raise
|
|
144
|
-
|
|
145
|
-
def get_playlist(self):
|
|
146
|
-
"""
|
|
147
|
-
Executes the entire flow to obtain the final .m3u8 file URL.
|
|
148
|
-
"""
|
|
149
|
-
self.get_redirect_url()
|
|
150
|
-
self.get_maxstream_url()
|
|
151
|
-
return self.get_m3u8_url()
|
|
@@ -1,194 +0,0 @@
|
|
|
1
|
-
# 26.05.24
|
|
2
|
-
|
|
3
|
-
import re
|
|
4
|
-
import logging
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# External libraries
|
|
8
|
-
import httpx
|
|
9
|
-
import jsbeautifier
|
|
10
|
-
from bs4 import BeautifulSoup
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Internal utilities
|
|
14
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
15
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
# Variable
|
|
19
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class VideoSource:
|
|
23
|
-
def __init__(self, url: str) -> None:
|
|
24
|
-
"""
|
|
25
|
-
Initializes the VideoSource object with default values.
|
|
26
|
-
|
|
27
|
-
Attributes:
|
|
28
|
-
- url (str): The URL of the video source.
|
|
29
|
-
"""
|
|
30
|
-
self.headers = {
|
|
31
|
-
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
|
|
32
|
-
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
|
33
|
-
'User-Agent': get_headers()
|
|
34
|
-
}
|
|
35
|
-
self.client = httpx.Client()
|
|
36
|
-
self.url = url
|
|
37
|
-
|
|
38
|
-
def make_request(self, url: str) -> str:
|
|
39
|
-
"""
|
|
40
|
-
Make an HTTP GET request to the provided URL.
|
|
41
|
-
|
|
42
|
-
Parameters:
|
|
43
|
-
- url (str): The URL to make the request to.
|
|
44
|
-
|
|
45
|
-
Returns:
|
|
46
|
-
- str: The response content if successful, None otherwise.
|
|
47
|
-
"""
|
|
48
|
-
|
|
49
|
-
try:
|
|
50
|
-
response = self.client.get(
|
|
51
|
-
url=url,
|
|
52
|
-
headers=self.headers,
|
|
53
|
-
follow_redirects=True,
|
|
54
|
-
timeout=max_timeout
|
|
55
|
-
)
|
|
56
|
-
response.raise_for_status()
|
|
57
|
-
return response.text
|
|
58
|
-
|
|
59
|
-
except Exception as e:
|
|
60
|
-
logging.error(f"Request failed: {e}")
|
|
61
|
-
return None
|
|
62
|
-
|
|
63
|
-
def parse_html(self, html_content: str) -> BeautifulSoup:
|
|
64
|
-
"""
|
|
65
|
-
Parse the provided HTML content using BeautifulSoup.
|
|
66
|
-
|
|
67
|
-
Parameters:
|
|
68
|
-
- html_content (str): The HTML content to parse.
|
|
69
|
-
|
|
70
|
-
Returns:
|
|
71
|
-
- BeautifulSoup: Parsed HTML content if successful, None otherwise.
|
|
72
|
-
"""
|
|
73
|
-
|
|
74
|
-
try:
|
|
75
|
-
soup = BeautifulSoup(html_content, "html.parser")
|
|
76
|
-
return soup
|
|
77
|
-
|
|
78
|
-
except Exception as e:
|
|
79
|
-
logging.error(f"Failed to parse HTML content: {e}")
|
|
80
|
-
return None
|
|
81
|
-
|
|
82
|
-
def get_iframe(self, soup):
|
|
83
|
-
"""
|
|
84
|
-
Extracts the source URL of the second iframe in the provided BeautifulSoup object.
|
|
85
|
-
|
|
86
|
-
Parameters:
|
|
87
|
-
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML.
|
|
88
|
-
|
|
89
|
-
Returns:
|
|
90
|
-
- str: The source URL of the second iframe, or None if not found.
|
|
91
|
-
"""
|
|
92
|
-
iframes = soup.find_all("iframe")
|
|
93
|
-
if iframes and len(iframes) > 1:
|
|
94
|
-
return iframes[1].get("src")
|
|
95
|
-
|
|
96
|
-
return None
|
|
97
|
-
|
|
98
|
-
def find_content(self, url):
|
|
99
|
-
"""
|
|
100
|
-
Makes a request to the specified URL and parses the HTML content.
|
|
101
|
-
|
|
102
|
-
Parameters:
|
|
103
|
-
- url (str): The URL to fetch content from.
|
|
104
|
-
|
|
105
|
-
Returns:
|
|
106
|
-
- BeautifulSoup: A BeautifulSoup object representing the parsed HTML content, or None if the request fails.
|
|
107
|
-
"""
|
|
108
|
-
content = self.make_request(url)
|
|
109
|
-
if content:
|
|
110
|
-
return self.parse_html(content)
|
|
111
|
-
|
|
112
|
-
return None
|
|
113
|
-
|
|
114
|
-
def get_result_node_js(self, soup):
|
|
115
|
-
"""
|
|
116
|
-
Prepares and runs a Node.js script from the provided BeautifulSoup object to retrieve the video URL.
|
|
117
|
-
|
|
118
|
-
Parameters:
|
|
119
|
-
- soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML content.
|
|
120
|
-
|
|
121
|
-
Returns:
|
|
122
|
-
- str: The output from the Node.js script, or None if the script cannot be found or executed.
|
|
123
|
-
"""
|
|
124
|
-
for script in soup.find_all("script"):
|
|
125
|
-
if "eval" in str(script):
|
|
126
|
-
return jsbeautifier.beautify(script.text)
|
|
127
|
-
|
|
128
|
-
return None
|
|
129
|
-
|
|
130
|
-
def get_playlist(self) -> str:
|
|
131
|
-
"""
|
|
132
|
-
Download a video from the provided URL.
|
|
133
|
-
|
|
134
|
-
Returns:
|
|
135
|
-
str: The URL of the downloaded video if successful, None otherwise.
|
|
136
|
-
"""
|
|
137
|
-
try:
|
|
138
|
-
html_content = self.make_request(self.url)
|
|
139
|
-
if not html_content:
|
|
140
|
-
logging.error("Failed to fetch HTML content.")
|
|
141
|
-
return None
|
|
142
|
-
|
|
143
|
-
soup = self.parse_html(html_content)
|
|
144
|
-
if not soup:
|
|
145
|
-
logging.error("Failed to parse HTML content.")
|
|
146
|
-
return None
|
|
147
|
-
|
|
148
|
-
# Find master playlist
|
|
149
|
-
data_js = self.get_result_node_js(soup)
|
|
150
|
-
|
|
151
|
-
if data_js is not None:
|
|
152
|
-
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
|
|
153
|
-
|
|
154
|
-
if match:
|
|
155
|
-
return match.group(1)
|
|
156
|
-
|
|
157
|
-
else:
|
|
158
|
-
|
|
159
|
-
iframe_src = self.get_iframe(soup)
|
|
160
|
-
if not iframe_src:
|
|
161
|
-
logging.error("No iframe found.")
|
|
162
|
-
return None
|
|
163
|
-
|
|
164
|
-
down_page_soup = self.find_content(iframe_src)
|
|
165
|
-
if not down_page_soup:
|
|
166
|
-
logging.error("Failed to fetch down page content.")
|
|
167
|
-
return None
|
|
168
|
-
|
|
169
|
-
pattern = r'data-link="(//supervideo[^"]+)"'
|
|
170
|
-
match = re.search(pattern, str(down_page_soup))
|
|
171
|
-
if not match:
|
|
172
|
-
logging.error("No player available for download.")
|
|
173
|
-
return None
|
|
174
|
-
|
|
175
|
-
supervideo_url = "https:" + match.group(1)
|
|
176
|
-
supervideo_soup = self.find_content(supervideo_url)
|
|
177
|
-
if not supervideo_soup:
|
|
178
|
-
logging.error("Failed to fetch supervideo content.")
|
|
179
|
-
return None
|
|
180
|
-
|
|
181
|
-
# Find master playlist
|
|
182
|
-
data_js = self.get_result_node_js(supervideo_soup)
|
|
183
|
-
|
|
184
|
-
match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
|
|
185
|
-
|
|
186
|
-
if match:
|
|
187
|
-
return match.group(1)
|
|
188
|
-
|
|
189
|
-
return None
|
|
190
|
-
|
|
191
|
-
except Exception as e:
|
|
192
|
-
logging.error(f"An error occurred: {e}")
|
|
193
|
-
return None
|
|
194
|
-
|
|
@@ -1,273 +0,0 @@
|
|
|
1
|
-
# 01.03.24
|
|
2
|
-
|
|
3
|
-
import sys
|
|
4
|
-
import logging
|
|
5
|
-
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
# External libraries
|
|
9
|
-
import httpx
|
|
10
|
-
from bs4 import BeautifulSoup
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Internal utilities
|
|
14
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
15
|
-
from StreamingCommunity.Util.console import console
|
|
16
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
17
|
-
from .Helper.Vixcloud.util import WindowVideo, WindowParameter, StreamsCollection
|
|
18
|
-
from .Helper.Vixcloud.js_parser import JavaScriptParser
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
# Variable
|
|
22
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class VideoSource:
|
|
26
|
-
def __init__(self, site_name: str, is_series: bool):
|
|
27
|
-
"""
|
|
28
|
-
Initialize video source for streaming site.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
site_name (str): Name of streaming site
|
|
32
|
-
is_series (bool): Flag for series or movie content
|
|
33
|
-
"""
|
|
34
|
-
self.headers = {'user-agent': get_headers()}
|
|
35
|
-
self.base_name = site_name
|
|
36
|
-
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
|
|
37
|
-
self.is_series = is_series
|
|
38
|
-
|
|
39
|
-
def setup(self, media_id: int):
|
|
40
|
-
"""
|
|
41
|
-
Configure media-specific context.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
media_id (int): Unique identifier for media item
|
|
45
|
-
"""
|
|
46
|
-
self.media_id = media_id
|
|
47
|
-
|
|
48
|
-
def get_iframe(self, episode_id: int) -> None:
|
|
49
|
-
"""
|
|
50
|
-
Retrieve iframe source for specified episode.
|
|
51
|
-
|
|
52
|
-
Args:
|
|
53
|
-
episode_id (int): Unique identifier for episode
|
|
54
|
-
"""
|
|
55
|
-
params = {}
|
|
56
|
-
|
|
57
|
-
if self.is_series:
|
|
58
|
-
params = {
|
|
59
|
-
'episode_id': episode_id,
|
|
60
|
-
'next_episode': '1'
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
try:
|
|
64
|
-
|
|
65
|
-
# Make a request to get iframe source
|
|
66
|
-
response = httpx.get(
|
|
67
|
-
url=f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}",
|
|
68
|
-
params=params,
|
|
69
|
-
timeout=max_timeout
|
|
70
|
-
)
|
|
71
|
-
response.raise_for_status()
|
|
72
|
-
|
|
73
|
-
# Parse response with BeautifulSoup to get iframe source
|
|
74
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
75
|
-
self.iframe_src = soup.find("iframe").get("src")
|
|
76
|
-
|
|
77
|
-
except Exception as e:
|
|
78
|
-
logging.error(f"Error getting iframe source: {e}")
|
|
79
|
-
raise
|
|
80
|
-
|
|
81
|
-
def parse_script(self, script_text: str) -> None:
|
|
82
|
-
"""
|
|
83
|
-
Convert raw script to structured video metadata.
|
|
84
|
-
|
|
85
|
-
Args:
|
|
86
|
-
script_text (str): Raw JavaScript/HTML script content
|
|
87
|
-
"""
|
|
88
|
-
try:
|
|
89
|
-
converter = JavaScriptParser.parse(js_string=str(script_text))
|
|
90
|
-
|
|
91
|
-
# Create window video, streams and parameter objects
|
|
92
|
-
self.canPlayFHD = bool(converter.get('canPlayFHD'))
|
|
93
|
-
self.window_video = WindowVideo(converter.get('video'))
|
|
94
|
-
self.window_streams = StreamsCollection(converter.get('streams'))
|
|
95
|
-
self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
|
|
96
|
-
|
|
97
|
-
except Exception as e:
|
|
98
|
-
logging.error(f"Error parsing script: {e}")
|
|
99
|
-
raise
|
|
100
|
-
|
|
101
|
-
def get_content(self) -> None:
|
|
102
|
-
"""
|
|
103
|
-
Fetch and process video content from iframe source.
|
|
104
|
-
|
|
105
|
-
Workflow:
|
|
106
|
-
- Validate iframe source
|
|
107
|
-
- Retrieve content
|
|
108
|
-
- Parse embedded script
|
|
109
|
-
"""
|
|
110
|
-
try:
|
|
111
|
-
if self.iframe_src is not None:
|
|
112
|
-
|
|
113
|
-
# Make a request to get content
|
|
114
|
-
try:
|
|
115
|
-
response = httpx.get(
|
|
116
|
-
url=self.iframe_src,
|
|
117
|
-
headers=self.headers,
|
|
118
|
-
timeout=max_timeout
|
|
119
|
-
)
|
|
120
|
-
response.raise_for_status()
|
|
121
|
-
|
|
122
|
-
except Exception as e:
|
|
123
|
-
logging.error(f"Failed to get vixcloud contente with error: {e}")
|
|
124
|
-
sys.exit(0)
|
|
125
|
-
|
|
126
|
-
# Parse response with BeautifulSoup to get content
|
|
127
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
128
|
-
script = soup.find("body").find("script").text
|
|
129
|
-
|
|
130
|
-
# Parse script to get video information
|
|
131
|
-
self.parse_script(script_text=script)
|
|
132
|
-
|
|
133
|
-
except Exception as e:
|
|
134
|
-
logging.error(f"Error getting content: {e}")
|
|
135
|
-
raise
|
|
136
|
-
|
|
137
|
-
def get_playlist(self) -> str:
|
|
138
|
-
"""
|
|
139
|
-
Generate authenticated playlist URL.
|
|
140
|
-
|
|
141
|
-
Returns:
|
|
142
|
-
str: Fully constructed playlist URL with authentication parameters
|
|
143
|
-
"""
|
|
144
|
-
# Initialize parameters dictionary
|
|
145
|
-
params = {}
|
|
146
|
-
|
|
147
|
-
# Add 'h' parameter if video quality is 1080p
|
|
148
|
-
if self.canPlayFHD:
|
|
149
|
-
params['h'] = 1
|
|
150
|
-
|
|
151
|
-
# Parse the original URL
|
|
152
|
-
parsed_url = urlparse(self.window_parameter.url)
|
|
153
|
-
query_params = parse_qs(parsed_url.query)
|
|
154
|
-
|
|
155
|
-
# Check specifically for 'b=1' in the query parameters
|
|
156
|
-
if 'b' in query_params and query_params['b'] == ['1']:
|
|
157
|
-
params['b'] = 1
|
|
158
|
-
|
|
159
|
-
# Add authentication parameters (token and expiration)
|
|
160
|
-
params.update({
|
|
161
|
-
"token": self.window_parameter.token,
|
|
162
|
-
"expires": self.window_parameter.expires
|
|
163
|
-
})
|
|
164
|
-
|
|
165
|
-
# Build the updated query string
|
|
166
|
-
query_string = urlencode(params)
|
|
167
|
-
|
|
168
|
-
# Construct the new URL with updated query parameters
|
|
169
|
-
return urlunparse(parsed_url._replace(query=query_string))
|
|
170
|
-
|
|
171
|
-
def get_mp4(self, url_to_download: str, scws_id: str) -> list:
|
|
172
|
-
"""
|
|
173
|
-
Generate download links for the specified resolutions from StreamingCommunity.
|
|
174
|
-
|
|
175
|
-
Args:
|
|
176
|
-
url_to_download (str): URL of the video page.
|
|
177
|
-
scws_id (str): SCWS ID of the title.
|
|
178
|
-
|
|
179
|
-
Returns:
|
|
180
|
-
list: A list of video download URLs.
|
|
181
|
-
"""
|
|
182
|
-
headers = {
|
|
183
|
-
'referer': url_to_download,
|
|
184
|
-
'user-agent': get_headers(),
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
# API request to get video details
|
|
188
|
-
video_api_url = f'https://{self.base_name}.{self.domain}/api/video/{scws_id}'
|
|
189
|
-
response = httpx.get(video_api_url, headers=headers)
|
|
190
|
-
|
|
191
|
-
if response.status_code == 200:
|
|
192
|
-
response_json = response.json()
|
|
193
|
-
|
|
194
|
-
video_tracks = response_json.get('video_tracks', [])
|
|
195
|
-
track = video_tracks[-1]
|
|
196
|
-
console.print(f"[cyan]Available resolutions: [red]{[str(track['quality']) for track in video_tracks]}")
|
|
197
|
-
|
|
198
|
-
# Request download link generation for each track
|
|
199
|
-
download_response = httpx.post(
|
|
200
|
-
url=f'https://{self.base_name}.{self.domain}/api/download/generate_link?scws_id={track["video_id"]}&rendition={track["quality"]}',
|
|
201
|
-
headers={
|
|
202
|
-
'referer': url_to_download,
|
|
203
|
-
'user-agent': get_headers(),
|
|
204
|
-
'x-xsrf-token': config_manager.get("SITE", self.base_name)['extra']['x-xsrf-token']
|
|
205
|
-
},
|
|
206
|
-
cookies={
|
|
207
|
-
'streamingcommunity_session': config_manager.get("SITE", self.base_name)['extra']['streamingcommunity_session']
|
|
208
|
-
}
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
if download_response.status_code == 200:
|
|
212
|
-
return {'url': download_response.text, 'quality': track["quality"]}
|
|
213
|
-
|
|
214
|
-
else:
|
|
215
|
-
logging.error(f"Failed to generate link for resolution {track['quality']} (HTTP {download_response.status_code}).")
|
|
216
|
-
|
|
217
|
-
else:
|
|
218
|
-
logging.error(f"Error fetching video API URL (HTTP {response.status_code}).")
|
|
219
|
-
return []
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
class VideoSourceAnime(VideoSource):
|
|
223
|
-
def __init__(self, site_name: str):
|
|
224
|
-
"""
|
|
225
|
-
Initialize anime-specific video source.
|
|
226
|
-
|
|
227
|
-
Args:
|
|
228
|
-
site_name (str): Name of anime streaming site
|
|
229
|
-
|
|
230
|
-
Extends base VideoSource with anime-specific initialization
|
|
231
|
-
"""
|
|
232
|
-
self.headers = {'user-agent': get_headers()}
|
|
233
|
-
self.base_name = site_name
|
|
234
|
-
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
|
|
235
|
-
self.src_mp4 = None
|
|
236
|
-
|
|
237
|
-
def get_embed(self, episode_id: int):
|
|
238
|
-
"""
|
|
239
|
-
Retrieve embed URL and extract video source.
|
|
240
|
-
|
|
241
|
-
Args:
|
|
242
|
-
episode_id (int): Unique identifier for episode
|
|
243
|
-
|
|
244
|
-
Returns:
|
|
245
|
-
str: Parsed script content
|
|
246
|
-
"""
|
|
247
|
-
try:
|
|
248
|
-
|
|
249
|
-
response = httpx.get(
|
|
250
|
-
url=f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}",
|
|
251
|
-
headers=self.headers,
|
|
252
|
-
timeout=max_timeout
|
|
253
|
-
)
|
|
254
|
-
response.raise_for_status()
|
|
255
|
-
|
|
256
|
-
# Extract and clean embed URL
|
|
257
|
-
embed_url = response.text.strip()
|
|
258
|
-
self.iframe_src = embed_url
|
|
259
|
-
|
|
260
|
-
# Fetch video content using embed URL
|
|
261
|
-
video_response = httpx.get(embed_url)
|
|
262
|
-
video_response.raise_for_status()
|
|
263
|
-
|
|
264
|
-
# Parse response with BeautifulSoup to get content of the scriot
|
|
265
|
-
soup = BeautifulSoup(video_response.text, "html.parser")
|
|
266
|
-
script = soup.find("body").find("script").text
|
|
267
|
-
self.src_mp4 = soup.find("body").find_all("script")[1].text.split(" = ")[1].replace("'", "")
|
|
268
|
-
|
|
269
|
-
return script
|
|
270
|
-
|
|
271
|
-
except Exception as e:
|
|
272
|
-
logging.error(f"Error fetching embed URL: {e}")
|
|
273
|
-
return None
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
# 02.07.24
|
|
2
|
-
|
|
3
|
-
from urllib.parse import quote_plus
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util.console import console, msg
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# Logic class
|
|
11
|
-
from .site import title_search, run_get_select_title, media_search_manager
|
|
12
|
-
from .title import download_title
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
# Variable
|
|
16
|
-
indice = 8
|
|
17
|
-
_useFor = "film_serie"
|
|
18
|
-
_deprecate = False
|
|
19
|
-
_priority = 2
|
|
20
|
-
_engineDownload = "tor"
|
|
21
|
-
from .costant import SITE_NAME
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def search(string_to_search: str = None, get_onylDatabase: bool = False):
|
|
25
|
-
"""
|
|
26
|
-
Main function of the application for film and series.
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
if string_to_search is None:
|
|
30
|
-
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{SITE_NAME}").strip()
|
|
31
|
-
|
|
32
|
-
# Search on database
|
|
33
|
-
len_database = title_search(quote_plus(string_to_search))
|
|
34
|
-
|
|
35
|
-
# Return list of elements
|
|
36
|
-
if get_onylDatabase:
|
|
37
|
-
return media_search_manager
|
|
38
|
-
|
|
39
|
-
if len_database > 0:
|
|
40
|
-
|
|
41
|
-
# Select title from list
|
|
42
|
-
select_title = run_get_select_title()
|
|
43
|
-
|
|
44
|
-
# Download title
|
|
45
|
-
download_title(select_title)
|
|
46
|
-
|
|
47
|
-
else:
|
|
48
|
-
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
49
|
-
|
|
50
|
-
# Retry
|
|
51
|
-
search()
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
# 09.06.24
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
|
|
11
|
-
ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
|
|
12
|
-
DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
|
|
13
|
-
|
|
14
|
-
SERIES_FOLDER = config_manager.get('DEFAULT', 'serie_folder_name')
|
|
15
|
-
MOVIE_FOLDER = config_manager.get('DEFAULT', 'movie_folder_name')
|