StreamingCommunity 3.0.5__py3-none-any.whl → 3.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/maxstream.py +141 -0
- StreamingCommunity/Api/Player/vixcloud.py +5 -3
- StreamingCommunity/Api/Site/1337xx/__init__.py +4 -4
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +3 -3
- StreamingCommunity/Api/Site/altadefinizione/film.py +15 -35
- StreamingCommunity/Api/Site/animeunity/__init__.py +2 -2
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +21 -23
- StreamingCommunity/Api/Site/animeworld/__init__.py +3 -3
- StreamingCommunity/Api/Site/cb01new/__init__.py +72 -0
- StreamingCommunity/Api/Site/cb01new/film.py +62 -0
- StreamingCommunity/Api/Site/cb01new/site.py +78 -0
- StreamingCommunity/Api/Site/guardaserie/__init__.py +3 -3
- StreamingCommunity/Api/Site/raiplay/__init__.py +3 -3
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +87 -39
- StreamingCommunity/Api/Site/streamingcommunity/film.py +2 -2
- StreamingCommunity/Api/Site/streamingcommunity/series.py +4 -4
- StreamingCommunity/Api/Site/streamingcommunity/site.py +9 -6
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +6 -3
- StreamingCommunity/Api/Site/streamingwatch/__init__.py +12 -6
- StreamingCommunity/Api/Site/streamingwatch/site.py +12 -5
- StreamingCommunity/Api/Template/site.py +103 -58
- StreamingCommunity/Lib/Proxies/proxy.py +14 -174
- StreamingCommunity/TelegramHelp/config.json +62 -0
- StreamingCommunity/TelegramHelp/telegram_bot.py +4 -0
- StreamingCommunity/Upload/version.py +1 -1
- StreamingCommunity/Util/config_json.py +7 -2
- StreamingCommunity/run.py +25 -40
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/METADATA +31 -13
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/RECORD +33 -28
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/WHEEL +1 -1
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/entry_points.txt +0 -0
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/licenses/LICENSE +0 -0
- {streamingcommunity-3.0.5.dist-info → streamingcommunity-3.0.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# 05.07.24
|
|
2
|
+
# NOTE: NOT USED
|
|
3
|
+
|
|
4
|
+
import re
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# External libraries
|
|
9
|
+
import httpx
|
|
10
|
+
import jsbeautifier
|
|
11
|
+
from bs4 import BeautifulSoup
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Internal utilities
|
|
15
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
16
|
+
from StreamingCommunity.Util.headers import get_userAgent
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Variable
|
|
20
|
+
MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class VideoSource:
|
|
24
|
+
def __init__(self, url: str):
|
|
25
|
+
"""
|
|
26
|
+
Sets up the video source with the provided URL.
|
|
27
|
+
|
|
28
|
+
Parameters:
|
|
29
|
+
- url (str): The URL of the video.
|
|
30
|
+
"""
|
|
31
|
+
self.url = url
|
|
32
|
+
self.redirect_url = None
|
|
33
|
+
self.maxstream_url = None
|
|
34
|
+
self.m3u8_url = None
|
|
35
|
+
self.headers = {'user-agent': get_userAgent()}
|
|
36
|
+
|
|
37
|
+
def get_redirect_url(self):
|
|
38
|
+
"""
|
|
39
|
+
Sends a request to the initial URL and extracts the redirect URL.
|
|
40
|
+
"""
|
|
41
|
+
try:
|
|
42
|
+
response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
43
|
+
response.raise_for_status()
|
|
44
|
+
|
|
45
|
+
# Extract the redirect URL from the HTML
|
|
46
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
47
|
+
self.redirect_url = soup.find("div", id="iframen1").get("data-src")
|
|
48
|
+
logging.info(f"Redirect URL: {self.redirect_url}")
|
|
49
|
+
|
|
50
|
+
return self.redirect_url
|
|
51
|
+
|
|
52
|
+
except Exception as e:
|
|
53
|
+
logging.error(f"Error parsing HTML: {e}")
|
|
54
|
+
raise
|
|
55
|
+
|
|
56
|
+
def get_maxstream_url(self):
|
|
57
|
+
"""
|
|
58
|
+
Sends a request to the redirect URL and extracts the Maxstream URL.
|
|
59
|
+
"""
|
|
60
|
+
try:
|
|
61
|
+
response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
62
|
+
response.raise_for_status()
|
|
63
|
+
|
|
64
|
+
# Extract the Maxstream URL from the HTML
|
|
65
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
66
|
+
maxstream_url = soup.find("a")
|
|
67
|
+
|
|
68
|
+
if maxstream_url is None:
|
|
69
|
+
|
|
70
|
+
# If no anchor tag is found, try the alternative method
|
|
71
|
+
logging.warning("Anchor tag not found. Trying the alternative method.")
|
|
72
|
+
headers = {
|
|
73
|
+
'origin': 'https://stayonline.pro',
|
|
74
|
+
'user-agent': get_userAgent(),
|
|
75
|
+
'x-requested-with': 'XMLHttpRequest',
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Make request to stayonline api
|
|
79
|
+
data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
|
|
80
|
+
response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
|
|
81
|
+
response.raise_for_status()
|
|
82
|
+
uprot_url = response.json()['data']['value']
|
|
83
|
+
|
|
84
|
+
# Retry getting maxtstream url
|
|
85
|
+
response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
86
|
+
response.raise_for_status()
|
|
87
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
88
|
+
maxstream_url = soup.find("a").get("href")
|
|
89
|
+
|
|
90
|
+
else:
|
|
91
|
+
maxstream_url = maxstream_url.get("href")
|
|
92
|
+
|
|
93
|
+
self.maxstream_url = maxstream_url
|
|
94
|
+
logging.info(f"Maxstream URL: {self.maxstream_url}")
|
|
95
|
+
|
|
96
|
+
return self.maxstream_url
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logging.error(f"Error during the request: {e}")
|
|
100
|
+
raise
|
|
101
|
+
|
|
102
|
+
def get_m3u8_url(self):
|
|
103
|
+
"""
|
|
104
|
+
Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
|
|
105
|
+
"""
|
|
106
|
+
try:
|
|
107
|
+
response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
|
|
108
|
+
response.raise_for_status()
|
|
109
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
110
|
+
|
|
111
|
+
# Iterate over all script tags in the HTML
|
|
112
|
+
for script in soup.find_all("script"):
|
|
113
|
+
if "eval(function(p,a,c,k,e,d)" in script.text:
|
|
114
|
+
|
|
115
|
+
# Execute the script using
|
|
116
|
+
data_js = jsbeautifier.beautify(script.text)
|
|
117
|
+
|
|
118
|
+
# Extract the .m3u8 URL from the script's output
|
|
119
|
+
match = re.search(r'sources:\s*\[\{\s*src:\s*"([^"]+)"', data_js)
|
|
120
|
+
|
|
121
|
+
if match:
|
|
122
|
+
self.m3u8_url = match.group(1)
|
|
123
|
+
logging.info(f"M3U8 URL: {self.m3u8_url}")
|
|
124
|
+
break
|
|
125
|
+
|
|
126
|
+
else:
|
|
127
|
+
logging.error("Failed to find M3U8 URL: No match found")
|
|
128
|
+
|
|
129
|
+
return self.m3u8_url
|
|
130
|
+
|
|
131
|
+
except Exception as e:
|
|
132
|
+
logging.error(f"Error executing the Node.js script: {e}")
|
|
133
|
+
raise
|
|
134
|
+
|
|
135
|
+
def get_playlist(self):
|
|
136
|
+
"""
|
|
137
|
+
Executes the entire flow to obtain the final .m3u8 file URL.
|
|
138
|
+
"""
|
|
139
|
+
self.get_redirect_url()
|
|
140
|
+
self.get_maxstream_url()
|
|
141
|
+
return self.get_m3u8_url()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# 01.03.24
|
|
2
2
|
|
|
3
|
-
import
|
|
3
|
+
import time
|
|
4
4
|
import logging
|
|
5
5
|
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
|
|
6
6
|
|
|
@@ -24,7 +24,7 @@ console = Console()
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
class VideoSource:
|
|
27
|
-
def __init__(self, url: str, is_series: bool, media_id: int = None):
|
|
27
|
+
def __init__(self, url: str, is_series: bool, media_id: int = None, proxy: str = None):
|
|
28
28
|
"""
|
|
29
29
|
Initialize video source for streaming site.
|
|
30
30
|
|
|
@@ -35,6 +35,7 @@ class VideoSource:
|
|
|
35
35
|
"""
|
|
36
36
|
self.headers = {'user-agent': get_userAgent()}
|
|
37
37
|
self.url = url
|
|
38
|
+
self.proxy = proxy
|
|
38
39
|
self.is_series = is_series
|
|
39
40
|
self.media_id = media_id
|
|
40
41
|
self.iframe_src = None
|
|
@@ -55,7 +56,7 @@ class VideoSource:
|
|
|
55
56
|
}
|
|
56
57
|
|
|
57
58
|
try:
|
|
58
|
-
response = httpx.get(f"{self.url}/iframe/{self.media_id}", params=params, timeout=MAX_TIMEOUT)
|
|
59
|
+
response = httpx.get(f"{self.url}/iframe/{self.media_id}", headers=self.headers, params=params, timeout=MAX_TIMEOUT, proxy=self.proxy)
|
|
59
60
|
response.raise_for_status()
|
|
60
61
|
|
|
61
62
|
# Parse response with BeautifulSoup to get iframe source
|
|
@@ -81,6 +82,7 @@ class VideoSource:
|
|
|
81
82
|
self.window_video = WindowVideo(converter.get('video'))
|
|
82
83
|
self.window_streams = StreamsCollection(converter.get('streams'))
|
|
83
84
|
self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
|
|
85
|
+
time.sleep(0.5)
|
|
84
86
|
|
|
85
87
|
except Exception as e:
|
|
86
88
|
logging.error(f"Error parsing script: {e}")
|
|
@@ -21,10 +21,10 @@ from .title import download_title
|
|
|
21
21
|
|
|
22
22
|
# Variable
|
|
23
23
|
indice = 3
|
|
24
|
-
_useFor = "
|
|
24
|
+
_useFor = "Torrent"
|
|
25
25
|
_priority = 0
|
|
26
|
-
_engineDownload = "
|
|
27
|
-
_deprecate =
|
|
26
|
+
_engineDownload = "Torrent"
|
|
27
|
+
_deprecate = True
|
|
28
28
|
|
|
29
29
|
console = Console()
|
|
30
30
|
msg = Prompt()
|
|
@@ -62,7 +62,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
62
62
|
return media_search_manager
|
|
63
63
|
|
|
64
64
|
if len_database > 0:
|
|
65
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
65
|
+
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
|
|
66
66
|
download_title(select_title)
|
|
67
67
|
|
|
68
68
|
else:
|
|
@@ -24,10 +24,10 @@ from .series import download_series
|
|
|
24
24
|
|
|
25
25
|
# Variable
|
|
26
26
|
indice = 2
|
|
27
|
-
_useFor = "
|
|
27
|
+
_useFor = "Film_&_Serie"
|
|
28
28
|
_priority = 0
|
|
29
29
|
_engineDownload = "hls"
|
|
30
|
-
_deprecate =
|
|
30
|
+
_deprecate = False
|
|
31
31
|
|
|
32
32
|
msg = Prompt()
|
|
33
33
|
console = Console()
|
|
@@ -110,7 +110,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
110
110
|
bot = get_bot_instance()
|
|
111
111
|
|
|
112
112
|
if len_database > 0:
|
|
113
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
113
|
+
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
|
|
114
114
|
process_search_result(select_title, selections)
|
|
115
115
|
|
|
116
116
|
else:
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# 16.03.25
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
|
+
import re
|
|
4
5
|
|
|
5
6
|
|
|
6
7
|
# External library
|
|
@@ -56,51 +57,30 @@ def download_film(select_title: MediaItem) -> str:
|
|
|
56
57
|
|
|
57
58
|
start_message()
|
|
58
59
|
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
|
|
59
|
-
|
|
60
|
-
# Extract mostraguarda
|
|
60
|
+
|
|
61
|
+
# Extract mostraguarda URL
|
|
61
62
|
try:
|
|
62
63
|
response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
|
|
63
|
-
response.
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
console.print(f"[red]Error fetching the page: {e}")
|
|
67
|
-
|
|
68
|
-
if site_constant.TELEGRAM_BOT:
|
|
69
|
-
bot.send_message(f"ERRORE\n\nErrore durante il recupero della pagina.\n\n{e}", None)
|
|
70
|
-
return None
|
|
64
|
+
soup = BeautifulSoup(response.text, 'html.parser')
|
|
65
|
+
iframes = soup.find_all('iframe')
|
|
66
|
+
mostraguarda = iframes[0]['src']
|
|
71
67
|
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
iframe_tag = soup.find_all("iframe")
|
|
75
|
-
url_mostraGuarda = iframe_tag[0].get('data-src')
|
|
76
|
-
if not url_mostraGuarda:
|
|
77
|
-
console.print("Error: data-src attribute not found in iframe.")
|
|
78
|
-
if site_constant.TELEGRAM_BOT:
|
|
79
|
-
bot.send_message(f"ERRORE\n\nErrore: attributo data-src non trovato nell'iframe", None)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
|
|
80
70
|
|
|
81
71
|
# Extract supervideo URL
|
|
82
72
|
try:
|
|
83
|
-
response = httpx.get(
|
|
84
|
-
response.
|
|
73
|
+
response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
|
|
74
|
+
soup = BeautifulSoup(response.text, 'html.parser')
|
|
75
|
+
pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
|
|
76
|
+
supervideo_match = re.search(pattern, response.text)
|
|
77
|
+
supervideo_url = 'https:' + supervideo_match.group(0)
|
|
85
78
|
|
|
86
79
|
except Exception as e:
|
|
87
|
-
console.print(f"[red]
|
|
88
|
-
console.print("[yellow]Missing access credentials. This part of the code is still under development.")
|
|
89
|
-
if site_constant.TELEGRAM_BOT:
|
|
90
|
-
bot.send_message(f"ERRORE\n\nErrore durante il recupero del link mostra/guarda.\n\n{e}", None)
|
|
91
|
-
bot.send_message(f"ERRORE\n\nCredenziali di accesso mancanti.\nQuesta parte del codice è ancora in fase di sviluppo.", None)
|
|
92
|
-
return None
|
|
93
|
-
|
|
94
|
-
# Create supervio URL
|
|
95
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
96
|
-
player_links = soup.find("ul", class_="_player-mirrors")
|
|
97
|
-
player_items = player_links.find_all("li")
|
|
98
|
-
supervideo_url = "https:" + player_items[0].get("data-link")
|
|
99
|
-
if not supervideo_url:
|
|
100
|
-
return None
|
|
80
|
+
console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
|
|
101
81
|
|
|
102
82
|
# Init class
|
|
103
|
-
video_source = VideoSource(
|
|
83
|
+
video_source = VideoSource(supervideo_url)
|
|
104
84
|
master_playlist = video_source.get_playlist()
|
|
105
85
|
|
|
106
86
|
# Define the filename and path for the downloaded film
|
|
@@ -24,7 +24,7 @@ from .serie import download_series
|
|
|
24
24
|
|
|
25
25
|
# Variable
|
|
26
26
|
indice = 1
|
|
27
|
-
_useFor = "
|
|
27
|
+
_useFor = "Anime"
|
|
28
28
|
_priority = 0
|
|
29
29
|
_engineDownload = "mp4"
|
|
30
30
|
_deprecate = False
|
|
@@ -109,7 +109,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
109
109
|
bot = get_bot_instance()
|
|
110
110
|
|
|
111
111
|
if len_database > 0:
|
|
112
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
112
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
113
113
|
process_search_result(select_title, selections)
|
|
114
114
|
|
|
115
115
|
else:
|
|
@@ -43,40 +43,38 @@ class ScrapeSerieAnime:
|
|
|
43
43
|
def get_count_episodes(self):
|
|
44
44
|
"""
|
|
45
45
|
Retrieve total number of episodes for the selected media.
|
|
46
|
+
This includes partial episodes (like episode 6.5).
|
|
46
47
|
|
|
47
48
|
Returns:
|
|
48
|
-
int: Total episode count
|
|
49
|
+
int: Total episode count including partial episodes
|
|
49
50
|
"""
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
)
|
|
57
|
-
response.raise_for_status()
|
|
58
|
-
|
|
59
|
-
# Parse JSON response and return episode count
|
|
60
|
-
return response.json()["episodes_count"]
|
|
61
|
-
|
|
62
|
-
except Exception as e:
|
|
63
|
-
logging.error(f"Error fetching episode count: {e}")
|
|
64
|
-
return None
|
|
51
|
+
if self.episodes_cache is None:
|
|
52
|
+
self._fetch_all_episodes()
|
|
53
|
+
|
|
54
|
+
if self.episodes_cache:
|
|
55
|
+
return len(self.episodes_cache)
|
|
56
|
+
return None
|
|
65
57
|
|
|
66
58
|
def _fetch_all_episodes(self):
|
|
67
59
|
"""
|
|
68
60
|
Fetch all episodes data at once and cache it
|
|
69
61
|
"""
|
|
70
62
|
try:
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
63
|
+
# Get initial episode count
|
|
64
|
+
response = httpx.get(
|
|
65
|
+
url=f"{self.url}/info_api/{self.media_id}/",
|
|
66
|
+
headers=self.headers,
|
|
67
|
+
timeout=max_timeout
|
|
68
|
+
)
|
|
69
|
+
response.raise_for_status()
|
|
70
|
+
initial_count = response.json()["episodes_count"]
|
|
75
71
|
|
|
76
|
-
|
|
72
|
+
all_episodes = []
|
|
77
73
|
start_range = 1
|
|
78
|
-
|
|
79
|
-
|
|
74
|
+
|
|
75
|
+
# Fetch episodes in chunks
|
|
76
|
+
while start_range <= initial_count:
|
|
77
|
+
end_range = min(start_range + 119, initial_count)
|
|
80
78
|
|
|
81
79
|
response = httpx.get(
|
|
82
80
|
url=f"{self.url}/info_api/{self.media_id}/1",
|
|
@@ -18,8 +18,8 @@ from .film import download_film
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
# Variable
|
|
21
|
-
indice =
|
|
22
|
-
_useFor = "
|
|
21
|
+
indice = 6
|
|
22
|
+
_useFor = "Anime"
|
|
23
23
|
_priority = 0
|
|
24
24
|
_engineDownload = "mp4"
|
|
25
25
|
_deprecate = False
|
|
@@ -75,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
75
75
|
return media_search_manager
|
|
76
76
|
|
|
77
77
|
if len_database > 0:
|
|
78
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
78
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
79
79
|
process_search_result(select_title, selections)
|
|
80
80
|
|
|
81
81
|
else:
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# 09.06.24
|
|
2
|
+
|
|
3
|
+
from urllib.parse import quote_plus
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External library
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.prompt import Prompt
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Internal utilities
|
|
12
|
+
from StreamingCommunity.Api.Template import get_select_title
|
|
13
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
14
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Logic class
|
|
18
|
+
from .site import title_search, media_search_manager, table_show_manager
|
|
19
|
+
from .film import download_film
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Variable
|
|
23
|
+
indice = -1
|
|
24
|
+
_useFor = "Film"
|
|
25
|
+
_priority = 0
|
|
26
|
+
_engineDownload = "mp4"
|
|
27
|
+
_deprecate = True
|
|
28
|
+
|
|
29
|
+
msg = Prompt()
|
|
30
|
+
console = Console()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def process_search_result(select_title):
|
|
34
|
+
"""
|
|
35
|
+
Handles the search result and initiates the download for either a film or series.
|
|
36
|
+
"""
|
|
37
|
+
# !!! ADD TYPE DONT WORK FOR SERIE
|
|
38
|
+
download_film(select_title)
|
|
39
|
+
|
|
40
|
+
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None):
|
|
41
|
+
"""
|
|
42
|
+
Main function of the application for search.
|
|
43
|
+
|
|
44
|
+
Parameters:
|
|
45
|
+
string_to_search (str, optional): String to search for
|
|
46
|
+
get_onylDatabase (bool, optional): If True, return only the database object
|
|
47
|
+
direct_item (dict, optional): Direct item to process (bypass search)
|
|
48
|
+
"""
|
|
49
|
+
if direct_item:
|
|
50
|
+
select_title = MediaItem(**direct_item)
|
|
51
|
+
process_search_result(select_title)
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
if string_to_search is None:
|
|
55
|
+
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
56
|
+
|
|
57
|
+
# Search on database
|
|
58
|
+
len_database = title_search(quote_plus(string_to_search))
|
|
59
|
+
|
|
60
|
+
## If only the database is needed, return the manager
|
|
61
|
+
if get_onlyDatabase:
|
|
62
|
+
return media_search_manager
|
|
63
|
+
|
|
64
|
+
if len_database > 0:
|
|
65
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
66
|
+
process_search_result(select_title)
|
|
67
|
+
|
|
68
|
+
else:
|
|
69
|
+
|
|
70
|
+
# If no results are found, ask again
|
|
71
|
+
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
72
|
+
search()
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# 03.07.24
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# External library
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Internal utilities
|
|
11
|
+
from StreamingCommunity.Util.os import os_manager
|
|
12
|
+
from StreamingCommunity.Util.message import start_message
|
|
13
|
+
from StreamingCommunity.Lib.Downloader import HLS_Downloader
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Logic class
|
|
17
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
18
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Player
|
|
22
|
+
from StreamingCommunity.Api.Player.maxstream import VideoSource
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# Variable
|
|
26
|
+
console = Console()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def download_film(select_title: MediaItem) -> str:
|
|
30
|
+
"""
|
|
31
|
+
Downloads a film using the provided obj.
|
|
32
|
+
|
|
33
|
+
Parameters:
|
|
34
|
+
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
|
|
35
|
+
|
|
36
|
+
Return:
|
|
37
|
+
- str: output path
|
|
38
|
+
"""
|
|
39
|
+
start_message()
|
|
40
|
+
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
|
|
41
|
+
|
|
42
|
+
# Setup api manger
|
|
43
|
+
video_source = VideoSource(select_title.url)
|
|
44
|
+
|
|
45
|
+
# Define output path
|
|
46
|
+
title_name = os_manager.get_sanitize_file(select_title.name) +".mp4"
|
|
47
|
+
mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
48
|
+
|
|
49
|
+
# Get m3u8 master playlist
|
|
50
|
+
master_playlist = video_source.get_playlist()
|
|
51
|
+
|
|
52
|
+
# Download the film using the m3u8 playlist, and output filename
|
|
53
|
+
r_proc = HLS_Downloader(
|
|
54
|
+
m3u8_url=master_playlist,
|
|
55
|
+
output_path=os.path.join(mp4_path, title_name)
|
|
56
|
+
).start()
|
|
57
|
+
|
|
58
|
+
if r_proc['error'] is not None:
|
|
59
|
+
try: os.remove(r_proc['path'])
|
|
60
|
+
except: pass
|
|
61
|
+
|
|
62
|
+
return r_proc['path']
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# 03.07.24
|
|
2
|
+
|
|
3
|
+
# External libraries
|
|
4
|
+
import httpx
|
|
5
|
+
from bs4 import BeautifulSoup
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Internal utilities
|
|
10
|
+
from StreamingCommunity.Util.config_json import config_manager
|
|
11
|
+
from StreamingCommunity.Util.headers import get_userAgent
|
|
12
|
+
from StreamingCommunity.Util.table import TVShowManager
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Logic class
|
|
16
|
+
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
17
|
+
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# Variable
|
|
21
|
+
console = Console()
|
|
22
|
+
media_search_manager = MediaManager()
|
|
23
|
+
table_show_manager = TVShowManager()
|
|
24
|
+
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def title_search(query: str) -> int:
|
|
28
|
+
"""
|
|
29
|
+
Search for titles based on a search query.
|
|
30
|
+
|
|
31
|
+
Parameters:
|
|
32
|
+
- query (str): The query to search for.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
- int: The number of titles found.
|
|
36
|
+
"""
|
|
37
|
+
media_search_manager.clear()
|
|
38
|
+
table_show_manager.clear()
|
|
39
|
+
|
|
40
|
+
search_url = f"{site_constant.FULL_URL}/?s={query}"
|
|
41
|
+
console.print(f"[cyan]Search url: [yellow]{search_url}")
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
response = httpx.get(
|
|
45
|
+
search_url,
|
|
46
|
+
headers={'user-agent': get_userAgent()},
|
|
47
|
+
timeout=max_timeout,
|
|
48
|
+
follow_redirects=True,
|
|
49
|
+
verify=False
|
|
50
|
+
)
|
|
51
|
+
response.raise_for_status()
|
|
52
|
+
|
|
53
|
+
except Exception as e:
|
|
54
|
+
console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
55
|
+
return 0
|
|
56
|
+
|
|
57
|
+
# Create soup and find table
|
|
58
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
59
|
+
|
|
60
|
+
for card in soup.find_all("div", class_=["card", "mp-post", "horizontal"]):
|
|
61
|
+
try:
|
|
62
|
+
title_tag = card.find("h3", class_="card-title").find("a")
|
|
63
|
+
url = title_tag.get("href")
|
|
64
|
+
title = title_tag.get_text(strip=True)
|
|
65
|
+
|
|
66
|
+
title_info = {
|
|
67
|
+
'name': title,
|
|
68
|
+
'url': url,
|
|
69
|
+
'type': 'film'
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
media_search_manager.add_media(title_info)
|
|
73
|
+
|
|
74
|
+
except Exception as e:
|
|
75
|
+
print(f"Error parsing a film entry: {e}")
|
|
76
|
+
|
|
77
|
+
# Return the number of titles found
|
|
78
|
+
return media_search_manager.get_length()
|
|
@@ -20,8 +20,8 @@ from .series import download_series
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
# Variable
|
|
23
|
-
indice =
|
|
24
|
-
_useFor = "
|
|
23
|
+
indice = 4
|
|
24
|
+
_useFor = "Serie"
|
|
25
25
|
_priority = 0
|
|
26
26
|
_engineDownload = "hls"
|
|
27
27
|
_deprecate = False
|
|
@@ -75,7 +75,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
75
75
|
return media_search_manager
|
|
76
76
|
|
|
77
77
|
if len_database > 0:
|
|
78
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
78
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
79
79
|
process_search_result(select_title, selections)
|
|
80
80
|
|
|
81
81
|
else:
|
|
@@ -19,8 +19,8 @@ from .film import download_film
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
# Variable
|
|
22
|
-
indice =
|
|
23
|
-
_useFor = "
|
|
22
|
+
indice = 5
|
|
23
|
+
_useFor = "Film_&_Serie"
|
|
24
24
|
_priority = 1 # NOTE: Site search need the use of tmbd obj
|
|
25
25
|
_engineDownload = "hls"
|
|
26
26
|
_deprecate = False
|
|
@@ -84,7 +84,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
84
84
|
return media_search_manager
|
|
85
85
|
|
|
86
86
|
if len_database > 0:
|
|
87
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
87
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
88
88
|
process_search_result(select_title, selections)
|
|
89
89
|
|
|
90
90
|
else:
|