StreamingCommunity 1.9.1__py3-none-any.whl → 1.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/run.py +4 -5
- {StreamingCommunity-1.9.1.dist-info → StreamingCommunity-1.9.4.dist-info}/METADATA +37 -7
- StreamingCommunity-1.9.4.dist-info/RECORD +7 -0
- {StreamingCommunity-1.9.1.dist-info → StreamingCommunity-1.9.4.dist-info}/WHEEL +1 -1
- {StreamingCommunity-1.9.1.dist-info → StreamingCommunity-1.9.4.dist-info}/entry_points.txt +1 -0
- StreamingCommunity/Api/Player/Helper/Vixcloud/js_parser.py +0 -143
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +0 -166
- StreamingCommunity/Api/Player/ddl.py +0 -89
- StreamingCommunity/Api/Player/maxstream.py +0 -151
- StreamingCommunity/Api/Player/supervideo.py +0 -194
- StreamingCommunity/Api/Player/vixcloud.py +0 -224
- StreamingCommunity/Api/Site/1337xx/__init__.py +0 -50
- StreamingCommunity/Api/Site/1337xx/costant.py +0 -15
- StreamingCommunity/Api/Site/1337xx/site.py +0 -84
- StreamingCommunity/Api/Site/1337xx/title.py +0 -66
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +0 -50
- StreamingCommunity/Api/Site/altadefinizione/costant.py +0 -15
- StreamingCommunity/Api/Site/altadefinizione/film.py +0 -69
- StreamingCommunity/Api/Site/altadefinizione/site.py +0 -86
- StreamingCommunity/Api/Site/animeunity/__init__.py +0 -50
- StreamingCommunity/Api/Site/animeunity/costant.py +0 -15
- StreamingCommunity/Api/Site/animeunity/film_serie.py +0 -130
- StreamingCommunity/Api/Site/animeunity/site.py +0 -165
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +0 -97
- StreamingCommunity/Api/Site/bitsearch/__init__.py +0 -51
- StreamingCommunity/Api/Site/bitsearch/costant.py +0 -15
- StreamingCommunity/Api/Site/bitsearch/site.py +0 -84
- StreamingCommunity/Api/Site/bitsearch/title.py +0 -47
- StreamingCommunity/Api/Site/cb01new/__init__.py +0 -51
- StreamingCommunity/Api/Site/cb01new/costant.py +0 -15
- StreamingCommunity/Api/Site/cb01new/film.py +0 -69
- StreamingCommunity/Api/Site/cb01new/site.py +0 -74
- StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +0 -57
- StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +0 -16
- StreamingCommunity/Api/Site/ddlstreamitaly/series.py +0 -141
- StreamingCommunity/Api/Site/ddlstreamitaly/site.py +0 -93
- StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +0 -85
- StreamingCommunity/Api/Site/guardaserie/__init__.py +0 -52
- StreamingCommunity/Api/Site/guardaserie/costant.py +0 -15
- StreamingCommunity/Api/Site/guardaserie/series.py +0 -195
- StreamingCommunity/Api/Site/guardaserie/site.py +0 -84
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +0 -110
- StreamingCommunity/Api/Site/mostraguarda/__init__.py +0 -48
- StreamingCommunity/Api/Site/mostraguarda/costant.py +0 -15
- StreamingCommunity/Api/Site/mostraguarda/film.py +0 -94
- StreamingCommunity/Api/Site/piratebays/__init__.py +0 -50
- StreamingCommunity/Api/Site/piratebays/costant.py +0 -15
- StreamingCommunity/Api/Site/piratebays/site.py +0 -89
- StreamingCommunity/Api/Site/piratebays/title.py +0 -45
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +0 -55
- StreamingCommunity/Api/Site/streamingcommunity/costant.py +0 -15
- StreamingCommunity/Api/Site/streamingcommunity/film.py +0 -70
- StreamingCommunity/Api/Site/streamingcommunity/series.py +0 -205
- StreamingCommunity/Api/Site/streamingcommunity/site.py +0 -126
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +0 -113
- StreamingCommunity/Api/Template/Class/SearchType.py +0 -101
- StreamingCommunity/Api/Template/Util/__init__.py +0 -5
- StreamingCommunity/Api/Template/Util/get_domain.py +0 -137
- StreamingCommunity/Api/Template/Util/manage_ep.py +0 -153
- StreamingCommunity/Api/Template/Util/recall_search.py +0 -37
- StreamingCommunity/Api/Template/__init__.py +0 -3
- StreamingCommunity/Api/Template/site.py +0 -87
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +0 -968
- StreamingCommunity/Lib/Downloader/HLS/proxyes.py +0 -110
- StreamingCommunity/Lib/Downloader/HLS/segments.py +0 -538
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -156
- StreamingCommunity/Lib/Downloader/TOR/downloader.py +0 -222
- StreamingCommunity/Lib/Downloader/__init__.py +0 -5
- StreamingCommunity/Lib/Driver/driver_1.py +0 -76
- StreamingCommunity/Lib/FFmpeg/__init__.py +0 -4
- StreamingCommunity/Lib/FFmpeg/capture.py +0 -170
- StreamingCommunity/Lib/FFmpeg/command.py +0 -292
- StreamingCommunity/Lib/FFmpeg/util.py +0 -242
- StreamingCommunity/Lib/M3U8/__init__.py +0 -6
- StreamingCommunity/Lib/M3U8/decryptor.py +0 -164
- StreamingCommunity/Lib/M3U8/estimator.py +0 -176
- StreamingCommunity/Lib/M3U8/parser.py +0 -666
- StreamingCommunity/Lib/M3U8/url_fixer.py +0 -52
- StreamingCommunity/Lib/TMBD/__init__.py +0 -2
- StreamingCommunity/Lib/TMBD/obj_tmbd.py +0 -39
- StreamingCommunity/Lib/TMBD/tmdb.py +0 -346
- StreamingCommunity/Upload/update.py +0 -68
- StreamingCommunity/Upload/version.py +0 -5
- StreamingCommunity/Util/_jsonConfig.py +0 -204
- StreamingCommunity/Util/call_stack.py +0 -42
- StreamingCommunity/Util/color.py +0 -20
- StreamingCommunity/Util/console.py +0 -12
- StreamingCommunity/Util/ffmpeg_installer.py +0 -275
- StreamingCommunity/Util/headers.py +0 -147
- StreamingCommunity/Util/logger.py +0 -53
- StreamingCommunity/Util/message.py +0 -46
- StreamingCommunity/Util/os.py +0 -514
- StreamingCommunity/Util/table.py +0 -163
- StreamingCommunity-1.9.1.dist-info/RECORD +0 -95
- {StreamingCommunity-1.9.1.dist-info → StreamingCommunity-1.9.4.dist-info}/LICENSE +0 -0
- {StreamingCommunity-1.9.1.dist-info → StreamingCommunity-1.9.4.dist-info}/top_level.txt +0 -0
|
@@ -1,205 +0,0 @@
|
|
|
1
|
-
# 3.12.23
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
import time
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
# Internal utilities
|
|
9
|
-
from StreamingCommunity.Util.console import console, msg
|
|
10
|
-
from StreamingCommunity.Util.message import start_message
|
|
11
|
-
from StreamingCommunity.Util.call_stack import get_call_stack
|
|
12
|
-
from StreamingCommunity.Util.table import TVShowManager
|
|
13
|
-
from StreamingCommunity.Lib.Downloader import HLS_Downloader
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Logic class
|
|
17
|
-
from .util.ScrapeSerie import ScrapeSerie
|
|
18
|
-
from StreamingCommunity.Api.Template.Util import manage_selection, map_episode_title, validate_selection, validate_episode_selection, execute_search
|
|
19
|
-
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
# Player
|
|
23
|
-
from StreamingCommunity.Api.Player.vixcloud import VideoSource
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
# Variable
|
|
27
|
-
from .costant import ROOT_PATH, SITE_NAME, SERIES_FOLDER
|
|
28
|
-
table_show_manager = TVShowManager()
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
def download_video(tv_name: str, index_season_selected: int, index_episode_selected: int, scrape_serie: ScrapeSerie, video_source: VideoSource) -> None:
|
|
33
|
-
"""
|
|
34
|
-
Download a single episode video.
|
|
35
|
-
|
|
36
|
-
Parameters:
|
|
37
|
-
- tv_name (str): Name of the TV series.
|
|
38
|
-
- index_season_selected (int): Index of the selected season.
|
|
39
|
-
- index_episode_selected (int): Index of the selected episode.
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
start_message()
|
|
43
|
-
|
|
44
|
-
# Get info about episode
|
|
45
|
-
obj_episode = scrape_serie.obj_episode_manager.episodes[index_episode_selected - 1]
|
|
46
|
-
console.print(f"[yellow]Download: [red]{index_season_selected}:{index_episode_selected} {obj_episode.name}")
|
|
47
|
-
print()
|
|
48
|
-
|
|
49
|
-
# Define filename and path for the downloaded video
|
|
50
|
-
mp4_name = f"{map_episode_title(tv_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
|
|
51
|
-
mp4_path = os.path.join(ROOT_PATH, SITE_NAME, SERIES_FOLDER, tv_name, f"S{index_season_selected}")
|
|
52
|
-
|
|
53
|
-
# Retrieve scws and if available master playlist
|
|
54
|
-
video_source.get_iframe(obj_episode.id)
|
|
55
|
-
video_source.get_content()
|
|
56
|
-
master_playlist = video_source.get_playlist()
|
|
57
|
-
|
|
58
|
-
# Download the episode
|
|
59
|
-
r_proc = HLS_Downloader(
|
|
60
|
-
m3u8_playlist=master_playlist,
|
|
61
|
-
output_filename=os.path.join(mp4_path, mp4_name)
|
|
62
|
-
).start()
|
|
63
|
-
|
|
64
|
-
if r_proc == 404:
|
|
65
|
-
time.sleep(2)
|
|
66
|
-
|
|
67
|
-
# Re call search function
|
|
68
|
-
if msg.ask("[green]Do you want to continue [white]([red]y[white])[green] or return at home[white]([red]n[white]) ", choices=['y', 'n'], default='y', show_choices=True) == "n":
|
|
69
|
-
frames = get_call_stack()
|
|
70
|
-
execute_search(frames[-4])
|
|
71
|
-
|
|
72
|
-
if r_proc != None:
|
|
73
|
-
console.print("[green]Result: ")
|
|
74
|
-
console.print(r_proc)
|
|
75
|
-
|
|
76
|
-
def download_episode(tv_name: str, index_season_selected: int, scrape_serie: ScrapeSerie, video_source: VideoSource, download_all: bool = False) -> None:
|
|
77
|
-
"""
|
|
78
|
-
Download episodes of a selected season.
|
|
79
|
-
|
|
80
|
-
Parameters:
|
|
81
|
-
- tv_name (str): Name of the TV series.
|
|
82
|
-
- index_season_selected (int): Index of the selected season.
|
|
83
|
-
- download_all (bool): Download all episodes in the season.
|
|
84
|
-
"""
|
|
85
|
-
|
|
86
|
-
# Clean memory of all episodes and get the number of the season
|
|
87
|
-
scrape_serie.obj_episode_manager.clear()
|
|
88
|
-
season_number = scrape_serie.obj_season_manager.seasons[index_season_selected - 1].number
|
|
89
|
-
|
|
90
|
-
# Start message and collect information about episodes
|
|
91
|
-
start_message()
|
|
92
|
-
scrape_serie.collect_title_season(season_number)
|
|
93
|
-
episodes_count = scrape_serie.obj_episode_manager.get_length()
|
|
94
|
-
|
|
95
|
-
if download_all:
|
|
96
|
-
|
|
97
|
-
# Download all episodes without asking
|
|
98
|
-
for i_episode in range(1, episodes_count + 1):
|
|
99
|
-
download_video(tv_name, index_season_selected, i_episode, scrape_serie, video_source)
|
|
100
|
-
console.print(f"\n[red]End downloaded [yellow]season: [red]{index_season_selected}.")
|
|
101
|
-
|
|
102
|
-
else:
|
|
103
|
-
|
|
104
|
-
# Display episodes list and manage user selection
|
|
105
|
-
last_command = display_episodes_list(scrape_serie)
|
|
106
|
-
list_episode_select = manage_selection(last_command, episodes_count)
|
|
107
|
-
|
|
108
|
-
try:
|
|
109
|
-
list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
|
|
110
|
-
except ValueError as e:
|
|
111
|
-
console.print(f"[red]{str(e)}")
|
|
112
|
-
return
|
|
113
|
-
|
|
114
|
-
# Download selected episodes
|
|
115
|
-
for i_episode in list_episode_select:
|
|
116
|
-
download_video(tv_name, index_season_selected, i_episode, scrape_serie, video_source)
|
|
117
|
-
|
|
118
|
-
def download_series(select_season: MediaItem, version: str) -> None:
|
|
119
|
-
"""
|
|
120
|
-
Download episodes of a TV series based on user selection.
|
|
121
|
-
|
|
122
|
-
Parameters:
|
|
123
|
-
- select_season (MediaItem): Selected media item (TV series).
|
|
124
|
-
- domain (str): Domain from which to download.
|
|
125
|
-
- version (str): Version of the site.
|
|
126
|
-
"""
|
|
127
|
-
|
|
128
|
-
# Start message and set up video source
|
|
129
|
-
start_message()
|
|
130
|
-
|
|
131
|
-
# Init class
|
|
132
|
-
scrape_serie = ScrapeSerie(SITE_NAME)
|
|
133
|
-
video_source = VideoSource(SITE_NAME, True)
|
|
134
|
-
|
|
135
|
-
# Setup video source
|
|
136
|
-
scrape_serie.setup(version, select_season.id, select_season.slug)
|
|
137
|
-
video_source.setup(select_season.id)
|
|
138
|
-
|
|
139
|
-
# Collect information about seasons
|
|
140
|
-
scrape_serie.collect_info_seasons()
|
|
141
|
-
seasons_count = scrape_serie.obj_season_manager.get_length()
|
|
142
|
-
|
|
143
|
-
# Prompt user for season selection and download episodes
|
|
144
|
-
console.print(f"\n[green]Seasons found: [red]{seasons_count}")
|
|
145
|
-
index_season_selected = msg.ask(
|
|
146
|
-
"\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
|
|
147
|
-
"[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Manage and validate the selection
|
|
151
|
-
list_season_select = manage_selection(index_season_selected, seasons_count)
|
|
152
|
-
|
|
153
|
-
try:
|
|
154
|
-
list_season_select = validate_selection(list_season_select, seasons_count)
|
|
155
|
-
except ValueError as e:
|
|
156
|
-
console.print(f"[red]{str(e)}")
|
|
157
|
-
return
|
|
158
|
-
|
|
159
|
-
# Loop through the selected seasons and download episodes
|
|
160
|
-
for i_season in list_season_select:
|
|
161
|
-
if len(list_season_select) > 1 or index_season_selected == "*":
|
|
162
|
-
|
|
163
|
-
# Download all episodes if multiple seasons are selected or if '*' is used
|
|
164
|
-
download_episode(select_season.slug, i_season, scrape_serie, video_source, download_all=True)
|
|
165
|
-
else:
|
|
166
|
-
|
|
167
|
-
# Otherwise, let the user select specific episodes for the single season
|
|
168
|
-
download_episode(select_season.slug, i_season, scrape_serie, video_source, download_all=False)
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def display_episodes_list(scrape_serie) -> str:
|
|
172
|
-
"""
|
|
173
|
-
Display episodes list and handle user input.
|
|
174
|
-
|
|
175
|
-
Returns:
|
|
176
|
-
last_command (str): Last command entered by the user.
|
|
177
|
-
"""
|
|
178
|
-
|
|
179
|
-
# Set up table for displaying episodes
|
|
180
|
-
table_show_manager.set_slice_end(10)
|
|
181
|
-
|
|
182
|
-
# Add columns to the table
|
|
183
|
-
column_info = {
|
|
184
|
-
"Index": {'color': 'red'},
|
|
185
|
-
"Name": {'color': 'magenta'},
|
|
186
|
-
"Duration": {'color': 'green'}
|
|
187
|
-
}
|
|
188
|
-
table_show_manager.add_column(column_info)
|
|
189
|
-
|
|
190
|
-
# Populate the table with episodes information
|
|
191
|
-
for i, media in enumerate(scrape_serie.obj_episode_manager.episodes):
|
|
192
|
-
table_show_manager.add_tv_show({
|
|
193
|
-
'Index': str(media.number),
|
|
194
|
-
'Name': media.name,
|
|
195
|
-
'Duration': str(media.duration)
|
|
196
|
-
})
|
|
197
|
-
|
|
198
|
-
# Run the table and handle user input
|
|
199
|
-
last_command = table_show_manager.run()
|
|
200
|
-
|
|
201
|
-
if last_command == "q":
|
|
202
|
-
console.print("\n[red]Quit [white]...")
|
|
203
|
-
sys.exit(0)
|
|
204
|
-
|
|
205
|
-
return last_command
|
|
@@ -1,126 +0,0 @@
|
|
|
1
|
-
# 10.12.23
|
|
2
|
-
|
|
3
|
-
import sys
|
|
4
|
-
import json
|
|
5
|
-
import logging
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
# External libraries
|
|
9
|
-
import httpx
|
|
10
|
-
from bs4 import BeautifulSoup
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Internal utilities
|
|
14
|
-
from StreamingCommunity.Util.console import console
|
|
15
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
16
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
17
|
-
from StreamingCommunity.Util.table import TVShowManager
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
# Logic class
|
|
22
|
-
from StreamingCommunity.Api.Template import get_select_title
|
|
23
|
-
from StreamingCommunity.Api.Template.Util import search_domain
|
|
24
|
-
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
# Config
|
|
28
|
-
from .costant import SITE_NAME
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
# Variable
|
|
32
|
-
media_search_manager = MediaManager()
|
|
33
|
-
table_show_manager = TVShowManager()
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def get_version(text: str):
|
|
38
|
-
"""
|
|
39
|
-
Extracts the version from the HTML text of a webpage.
|
|
40
|
-
|
|
41
|
-
Parameters:
|
|
42
|
-
- text (str): The HTML text of the webpage.
|
|
43
|
-
|
|
44
|
-
Returns:
|
|
45
|
-
str: The version extracted from the webpage.
|
|
46
|
-
list: Top 10 titles headlines for today.
|
|
47
|
-
"""
|
|
48
|
-
try:
|
|
49
|
-
|
|
50
|
-
# Parse request to site
|
|
51
|
-
soup = BeautifulSoup(text, "html.parser")
|
|
52
|
-
|
|
53
|
-
# Extract version
|
|
54
|
-
version = json.loads(soup.find("div", {"id": "app"}).get("data-page"))['version']
|
|
55
|
-
#console.print(f"[cyan]Get version [white]=> [red]{version} \n")
|
|
56
|
-
|
|
57
|
-
return version
|
|
58
|
-
|
|
59
|
-
except Exception as e:
|
|
60
|
-
logging.error(f"Error extracting version: {e}")
|
|
61
|
-
raise
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
def get_version_and_domain():
|
|
65
|
-
"""
|
|
66
|
-
Retrieve the current version and domain of the site.
|
|
67
|
-
|
|
68
|
-
This function performs the following steps:
|
|
69
|
-
- Determines the correct domain to use for the site by searching for a specific meta tag.
|
|
70
|
-
- Fetches the content of the site to extract the version information.
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
# Find new domain if prev dont work
|
|
74
|
-
domain_to_use, base_url = search_domain(SITE_NAME, f"https://{SITE_NAME}")
|
|
75
|
-
|
|
76
|
-
# Extract version from the response
|
|
77
|
-
version = get_version(httpx.get(base_url, headers={'user-agent': get_headers()}).text)
|
|
78
|
-
|
|
79
|
-
return version, domain_to_use
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
def title_search(title_search: str, domain: str) -> int:
|
|
83
|
-
"""
|
|
84
|
-
Search for titles based on a search query.
|
|
85
|
-
|
|
86
|
-
Parameters:
|
|
87
|
-
- title_search (str): The title to search for.
|
|
88
|
-
- domain (str): The domain to search on.
|
|
89
|
-
|
|
90
|
-
Returns:
|
|
91
|
-
int: The number of titles found.
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
95
|
-
|
|
96
|
-
# Send request to search for titles ( replace à to a and space to "+" )
|
|
97
|
-
try:
|
|
98
|
-
response = httpx.get(
|
|
99
|
-
url=f"https://{SITE_NAME}.{domain}/api/search?q={title_search.replace(' ', '+')}",
|
|
100
|
-
headers={'user-agent': get_headers()},
|
|
101
|
-
timeout=max_timeout
|
|
102
|
-
)
|
|
103
|
-
response.raise_for_status()
|
|
104
|
-
|
|
105
|
-
except Exception as e:
|
|
106
|
-
console.print(f"Site: {SITE_NAME}, request search error: {e}")
|
|
107
|
-
|
|
108
|
-
# Add found titles to media search manager
|
|
109
|
-
for dict_title in response.json()['data']:
|
|
110
|
-
media_search_manager.add_media({
|
|
111
|
-
'id': dict_title.get('id'),
|
|
112
|
-
'slug': dict_title.get('slug'),
|
|
113
|
-
'name': dict_title.get('name'),
|
|
114
|
-
'type': dict_title.get('type'),
|
|
115
|
-
'score': dict_title.get('score')
|
|
116
|
-
})
|
|
117
|
-
|
|
118
|
-
# Return the number of titles found
|
|
119
|
-
return media_search_manager.get_length()
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
def run_get_select_title():
|
|
123
|
-
"""
|
|
124
|
-
Display a selection of titles and prompt the user to choose one.
|
|
125
|
-
"""
|
|
126
|
-
return get_select_title(table_show_manager, media_search_manager)
|
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
# 01.03.24
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# External libraries
|
|
7
|
-
import httpx
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# Internal utilities
|
|
11
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
12
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
13
|
-
from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager, EpisodeManager
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Variable
|
|
17
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class ScrapeSerie:
|
|
21
|
-
def __init__(self, site_name: str):
|
|
22
|
-
"""
|
|
23
|
-
Initialize the ScrapeSerie class for scraping TV series information.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
site_name (str): Name of the streaming site to scrape from
|
|
27
|
-
"""
|
|
28
|
-
self.is_series = False
|
|
29
|
-
self.headers = {'user-agent': get_headers()}
|
|
30
|
-
self.base_name = site_name
|
|
31
|
-
self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
|
|
32
|
-
|
|
33
|
-
def setup(self, version: str = None, media_id: int = None, series_name: str = None):
|
|
34
|
-
"""
|
|
35
|
-
Set up the scraper with specific media details.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
version (str, optional): Site version for request headers
|
|
39
|
-
media_id (int, optional): Unique identifier for the media
|
|
40
|
-
series_name (str, optional): Name of the TV series
|
|
41
|
-
"""
|
|
42
|
-
self.version = version
|
|
43
|
-
self.media_id = media_id
|
|
44
|
-
|
|
45
|
-
# If series name is provided, initialize series-specific managers
|
|
46
|
-
if series_name is not None:
|
|
47
|
-
self.is_series = True
|
|
48
|
-
self.series_name = series_name
|
|
49
|
-
self.obj_season_manager: SeasonManager = SeasonManager()
|
|
50
|
-
self.obj_episode_manager: EpisodeManager = EpisodeManager()
|
|
51
|
-
|
|
52
|
-
def collect_info_seasons(self) -> None:
|
|
53
|
-
"""
|
|
54
|
-
Retrieve season information for a TV series from the streaming site.
|
|
55
|
-
|
|
56
|
-
Raises:
|
|
57
|
-
Exception: If there's an error fetching season information
|
|
58
|
-
"""
|
|
59
|
-
self.headers = {
|
|
60
|
-
'user-agent': get_headers(),
|
|
61
|
-
'x-inertia': 'true',
|
|
62
|
-
'x-inertia-version': self.version,
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
try:
|
|
66
|
-
|
|
67
|
-
response = httpx.get(
|
|
68
|
-
url=f"https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}",
|
|
69
|
-
headers=self.headers,
|
|
70
|
-
timeout=max_timeout
|
|
71
|
-
)
|
|
72
|
-
response.raise_for_status()
|
|
73
|
-
|
|
74
|
-
# Extract seasons from JSON response
|
|
75
|
-
json_response = response.json().get('props', {}).get('title', {}).get('seasons', [])
|
|
76
|
-
|
|
77
|
-
# Add each season to the season manager
|
|
78
|
-
for dict_season in json_response:
|
|
79
|
-
self.obj_season_manager.add_season(dict_season)
|
|
80
|
-
|
|
81
|
-
except Exception as e:
|
|
82
|
-
logging.error(f"Error collecting season info: {e}")
|
|
83
|
-
raise
|
|
84
|
-
|
|
85
|
-
def collect_title_season(self, number_season: int) -> None:
|
|
86
|
-
"""
|
|
87
|
-
Retrieve episode information for a specific season.
|
|
88
|
-
|
|
89
|
-
Args:
|
|
90
|
-
number_season (int): Season number to fetch episodes for
|
|
91
|
-
|
|
92
|
-
Raises:
|
|
93
|
-
Exception: If there's an error fetching episode information
|
|
94
|
-
"""
|
|
95
|
-
try:
|
|
96
|
-
|
|
97
|
-
response = httpx.get(
|
|
98
|
-
url=f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}',
|
|
99
|
-
headers=self.headers,
|
|
100
|
-
timeout=max_timeout
|
|
101
|
-
)
|
|
102
|
-
response.raise_for_status()
|
|
103
|
-
|
|
104
|
-
# Extract episodes from JSON response
|
|
105
|
-
json_response = response.json().get('props', {}).get('loadedSeason', {}).get('episodes', [])
|
|
106
|
-
|
|
107
|
-
# Add each episode to the episode manager
|
|
108
|
-
for dict_episode in json_response:
|
|
109
|
-
self.obj_episode_manager.add_episode(dict_episode)
|
|
110
|
-
|
|
111
|
-
except Exception as e:
|
|
112
|
-
logging.error(f"Error collecting title season info: {e}")
|
|
113
|
-
raise
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
# 07.07.24
|
|
2
|
-
|
|
3
|
-
from typing import List, TypedDict
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class MediaItemData(TypedDict, total=False):
|
|
7
|
-
id: int # GENERAL
|
|
8
|
-
name: str # GENERAL
|
|
9
|
-
type: str # GENERAL
|
|
10
|
-
url: str # GENERAL
|
|
11
|
-
size: str # GENERAL
|
|
12
|
-
score: str # GENERAL
|
|
13
|
-
date: str # GENERAL
|
|
14
|
-
desc: str # GENERAL
|
|
15
|
-
|
|
16
|
-
seeder: int # TOR
|
|
17
|
-
leecher: int # TOR
|
|
18
|
-
|
|
19
|
-
slug: str # SC
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class MediaItemMeta(type):
|
|
24
|
-
def __new__(cls, name, bases, dct):
|
|
25
|
-
def init(self, **kwargs):
|
|
26
|
-
for key, value in kwargs.items():
|
|
27
|
-
setattr(self, key, value)
|
|
28
|
-
|
|
29
|
-
dct['__init__'] = init
|
|
30
|
-
|
|
31
|
-
def get_attr(self, item):
|
|
32
|
-
return self.__dict__.get(item, None)
|
|
33
|
-
|
|
34
|
-
dct['__getattr__'] = get_attr
|
|
35
|
-
|
|
36
|
-
def set_attr(self, key, value):
|
|
37
|
-
self.__dict__[key] = value
|
|
38
|
-
|
|
39
|
-
dct['__setattr__'] = set_attr
|
|
40
|
-
|
|
41
|
-
return super().__new__(cls, name, bases, dct)
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class MediaItem(metaclass=MediaItemMeta):
|
|
45
|
-
id: int # GENERAL
|
|
46
|
-
name: str # GENERAL
|
|
47
|
-
type: str # GENERAL
|
|
48
|
-
url: str # GENERAL
|
|
49
|
-
size: str # GENERAL
|
|
50
|
-
score: str # GENERAL
|
|
51
|
-
date: str # GENERAL
|
|
52
|
-
desc: str # GENERAL
|
|
53
|
-
|
|
54
|
-
seeder: int # TOR
|
|
55
|
-
leecher: int # TOR
|
|
56
|
-
|
|
57
|
-
slug: str # SC
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
class MediaManager:
|
|
61
|
-
def __init__(self):
|
|
62
|
-
self.media_list: List[MediaItem] = []
|
|
63
|
-
|
|
64
|
-
def add_media(self, data: dict) -> None:
|
|
65
|
-
"""
|
|
66
|
-
Add media to the list.
|
|
67
|
-
|
|
68
|
-
Args:
|
|
69
|
-
data (dict): Media data to add.
|
|
70
|
-
"""
|
|
71
|
-
self.media_list.append(MediaItem(**data))
|
|
72
|
-
|
|
73
|
-
def get(self, index: int) -> MediaItem:
|
|
74
|
-
"""
|
|
75
|
-
Get a media item from the list by index.
|
|
76
|
-
|
|
77
|
-
Args:
|
|
78
|
-
index (int): The index of the media item to retrieve.
|
|
79
|
-
|
|
80
|
-
Returns:
|
|
81
|
-
MediaItem: The media item at the specified index.
|
|
82
|
-
"""
|
|
83
|
-
return self.media_list[index]
|
|
84
|
-
|
|
85
|
-
def get_length(self) -> int:
|
|
86
|
-
"""
|
|
87
|
-
Get the number of media items in the list.
|
|
88
|
-
|
|
89
|
-
Returns:
|
|
90
|
-
int: Number of media items.
|
|
91
|
-
"""
|
|
92
|
-
return len(self.media_list)
|
|
93
|
-
|
|
94
|
-
def clear(self) -> None:
|
|
95
|
-
"""
|
|
96
|
-
This method clears the media list.
|
|
97
|
-
"""
|
|
98
|
-
self.media_list.clear()
|
|
99
|
-
|
|
100
|
-
def __str__(self):
|
|
101
|
-
return f"MediaManager(num_media={len(self.media_list)})"
|
|
@@ -1,137 +0,0 @@
|
|
|
1
|
-
# 18.06.24
|
|
2
|
-
|
|
3
|
-
import sys
|
|
4
|
-
from urllib.parse import urlparse
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# External libraries
|
|
8
|
-
import httpx
|
|
9
|
-
from googlesearch import search
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
# Internal utilities
|
|
13
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
14
|
-
from StreamingCommunity.Util.console import console, msg
|
|
15
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def google_search(query):
|
|
19
|
-
"""
|
|
20
|
-
Perform a Google search and return the first result.
|
|
21
|
-
|
|
22
|
-
Args:
|
|
23
|
-
query (str): The search query to execute on Google.
|
|
24
|
-
|
|
25
|
-
Returns:
|
|
26
|
-
str: The first URL result from the search, or None if no result is found.
|
|
27
|
-
"""
|
|
28
|
-
# Perform the search on Google and limit to 1 result
|
|
29
|
-
search_results = search(query, num_results=1)
|
|
30
|
-
|
|
31
|
-
# Extract the first result
|
|
32
|
-
first_result = next(search_results, None)
|
|
33
|
-
|
|
34
|
-
if not first_result:
|
|
35
|
-
console.print("[red]No results found.[/red]")
|
|
36
|
-
|
|
37
|
-
return first_result
|
|
38
|
-
|
|
39
|
-
def get_final_redirect_url(initial_url, max_timeout):
|
|
40
|
-
"""
|
|
41
|
-
Follow redirects from the initial URL and return the final URL after all redirects.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
initial_url (str): The URL to start with and follow redirects.
|
|
45
|
-
|
|
46
|
-
Returns:
|
|
47
|
-
str: The final URL after all redirects are followed.
|
|
48
|
-
"""
|
|
49
|
-
|
|
50
|
-
# Create a client with redirects enabled
|
|
51
|
-
try:
|
|
52
|
-
with httpx.Client(follow_redirects=True, timeout=max_timeout, headers={'user-agent': get_headers()}) as client:
|
|
53
|
-
response = client.get(initial_url)
|
|
54
|
-
response.raise_for_status()
|
|
55
|
-
|
|
56
|
-
# Capture the final URL after all redirects
|
|
57
|
-
final_url = response.url
|
|
58
|
-
|
|
59
|
-
return final_url
|
|
60
|
-
|
|
61
|
-
except Exception as e:
|
|
62
|
-
console.print(f"[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
|
|
63
|
-
return None
|
|
64
|
-
|
|
65
|
-
def search_domain(site_name: str, base_url: str):
|
|
66
|
-
"""
|
|
67
|
-
Search for a valid domain for the given site name and base URL.
|
|
68
|
-
|
|
69
|
-
Parameters:
|
|
70
|
-
- site_name (str): The name of the site to search the domain for.
|
|
71
|
-
- base_url (str): The base URL to construct complete URLs.
|
|
72
|
-
- follow_redirects (bool): To follow redirect url or not.
|
|
73
|
-
|
|
74
|
-
Returns:
|
|
75
|
-
tuple: The found domain and the complete URL.
|
|
76
|
-
"""
|
|
77
|
-
|
|
78
|
-
# Extract config domain
|
|
79
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
80
|
-
domain = str(config_manager.get_dict("SITE", site_name)['domain'])
|
|
81
|
-
|
|
82
|
-
try:
|
|
83
|
-
|
|
84
|
-
# Test the current domain
|
|
85
|
-
response_follow = httpx.get(f"{base_url}.{domain}", headers={'user-agent': get_headers()}, timeout=max_timeout, follow_redirects=True)
|
|
86
|
-
response_follow.raise_for_status()
|
|
87
|
-
|
|
88
|
-
except Exception as e:
|
|
89
|
-
|
|
90
|
-
query = base_url.split("/")[-1]
|
|
91
|
-
first_url = google_search(query)
|
|
92
|
-
console.print(f"[green]First url from google seach[white]: [red]{first_url}")
|
|
93
|
-
|
|
94
|
-
if first_url:
|
|
95
|
-
final_url = get_final_redirect_url(first_url, max_timeout)
|
|
96
|
-
|
|
97
|
-
if final_url != None:
|
|
98
|
-
console.print(f"\n[bold yellow]Suggestion:[/bold yellow] [white](Experimental)\n"
|
|
99
|
-
f"[cyan]New final URL[white]: [green]{final_url}")
|
|
100
|
-
|
|
101
|
-
def extract_domain(url):
|
|
102
|
-
parsed_url = urlparse(url)
|
|
103
|
-
domain = parsed_url.netloc
|
|
104
|
-
return domain.split(".")[-1]
|
|
105
|
-
|
|
106
|
-
new_domain_extract = extract_domain(str(final_url))
|
|
107
|
-
|
|
108
|
-
if msg.ask(f"[red]Do you want to auto update config.json - '[green]{site_name}[red]' with domain: [green]{new_domain_extract}", choices=["y", "n"], default="y").lower() == "y":
|
|
109
|
-
|
|
110
|
-
# Update domain in config.json
|
|
111
|
-
config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
|
|
112
|
-
config_manager.write_config()
|
|
113
|
-
|
|
114
|
-
# Return config domain
|
|
115
|
-
#console.print(f"[cyan]Return domain: [red]{new_domain_extract} \n")
|
|
116
|
-
return new_domain_extract, f"{base_url}.{new_domain_extract}"
|
|
117
|
-
|
|
118
|
-
else:
|
|
119
|
-
console.print("[bold red]\nManually change the domain in the JSON file.[/bold red]")
|
|
120
|
-
raise
|
|
121
|
-
|
|
122
|
-
else:
|
|
123
|
-
console.print("[bold red]No valid URL to follow redirects.[/bold red]")
|
|
124
|
-
|
|
125
|
-
# Ensure the URL is in string format before parsing
|
|
126
|
-
parsed_url = urlparse(str(response_follow.url))
|
|
127
|
-
parse_domain = parsed_url.netloc
|
|
128
|
-
tld = parse_domain.split('.')[-1]
|
|
129
|
-
|
|
130
|
-
if tld is not None:
|
|
131
|
-
|
|
132
|
-
# Update domain in config.json
|
|
133
|
-
config_manager.config['SITE'][site_name]['domain'] = tld
|
|
134
|
-
config_manager.write_config()
|
|
135
|
-
|
|
136
|
-
# Return config domain
|
|
137
|
-
return tld, f"{base_url}.{tld}"
|