StreamingCommunity 3.0.6__py3-none-any.whl → 3.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/Api/Player/maxstream.py +141 -0
- StreamingCommunity/Api/Player/vixcloud.py +5 -3
- StreamingCommunity/Api/Site/1337xx/__init__.py +2 -2
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +2 -2
- StreamingCommunity/Api/Site/altadefinizione/film.py +15 -35
- StreamingCommunity/Api/Site/animeunity/__init__.py +1 -1
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +21 -23
- StreamingCommunity/Api/Site/animeworld/__init__.py +1 -1
- StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py +2 -1
- StreamingCommunity/Api/Site/cb01new/__init__.py +72 -0
- StreamingCommunity/Api/Site/cb01new/film.py +62 -0
- StreamingCommunity/Api/Site/cb01new/site.py +78 -0
- StreamingCommunity/Api/Site/guardaserie/__init__.py +1 -1
- StreamingCommunity/Api/Site/raiplay/__init__.py +2 -2
- StreamingCommunity/Api/Site/raiplay/site.py +26 -94
- StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py +37 -17
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +87 -51
- StreamingCommunity/Api/Site/streamingcommunity/film.py +2 -2
- StreamingCommunity/Api/Site/streamingcommunity/series.py +4 -4
- StreamingCommunity/Api/Site/streamingcommunity/site.py +7 -4
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +6 -3
- StreamingCommunity/Api/Site/streamingwatch/__init__.py +10 -4
- StreamingCommunity/Api/Site/streamingwatch/site.py +12 -5
- StreamingCommunity/Api/Template/site.py +103 -58
- StreamingCommunity/Lib/Proxies/proxy.py +14 -174
- StreamingCommunity/TelegramHelp/config.json +62 -0
- StreamingCommunity/TelegramHelp/telegram_bot.py +4 -0
- StreamingCommunity/Upload/version.py +1 -1
- StreamingCommunity/Util/config_json.py +0 -4
- StreamingCommunity/Util/os.py +0 -6
- StreamingCommunity/run.py +2 -2
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/METADATA +31 -13
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/RECORD +37 -32
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/WHEEL +1 -1
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/entry_points.txt +0 -0
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/licenses/LICENSE +0 -0
- {streamingcommunity-3.0.6.dist-info → streamingcommunity-3.0.8.dist-info}/top_level.txt +0 -0
|
@@ -1,9 +1,5 @@
|
|
|
1
1
|
# 21.05.24
|
|
2
2
|
|
|
3
|
-
import threading
|
|
4
|
-
import queue
|
|
5
|
-
|
|
6
|
-
|
|
7
3
|
# External libraries
|
|
8
4
|
import httpx
|
|
9
5
|
from rich.console import Console
|
|
@@ -13,12 +9,9 @@ from rich.console import Console
|
|
|
13
9
|
from StreamingCommunity.Util.config_json import config_manager
|
|
14
10
|
from StreamingCommunity.Util.headers import get_userAgent
|
|
15
11
|
from StreamingCommunity.Util.table import TVShowManager
|
|
16
|
-
from StreamingCommunity.Lib.TMBD.tmdb import tmdb
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
# Logic class
|
|
20
12
|
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
21
13
|
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
|
|
14
|
+
from .util.ScrapeSerie import GetSerieInfo
|
|
22
15
|
|
|
23
16
|
|
|
24
17
|
# Variable
|
|
@@ -26,76 +19,33 @@ console = Console()
|
|
|
26
19
|
media_search_manager = MediaManager()
|
|
27
20
|
table_show_manager = TVShowManager()
|
|
28
21
|
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
29
|
-
MAX_THREADS = 12
|
|
30
22
|
|
|
31
23
|
|
|
32
|
-
def determine_media_type(
|
|
24
|
+
def determine_media_type(item):
|
|
33
25
|
"""
|
|
34
|
-
|
|
26
|
+
Determine if the item is a film or TV series by checking actual seasons count
|
|
27
|
+
using GetSerieInfo.
|
|
35
28
|
"""
|
|
36
29
|
try:
|
|
37
|
-
#
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
# If results found in only one category, use that
|
|
46
|
-
if movie_count > 0 and tv_count == 0:
|
|
30
|
+
# Extract program name from path_id
|
|
31
|
+
program_name = None
|
|
32
|
+
if item.get('path_id'):
|
|
33
|
+
parts = item['path_id'].strip('/').split('/')
|
|
34
|
+
if len(parts) >= 2:
|
|
35
|
+
program_name = parts[-1].split('.')[0]
|
|
36
|
+
|
|
37
|
+
if not program_name:
|
|
47
38
|
return "film"
|
|
48
|
-
elif tv_count > 0 and movie_count == 0:
|
|
49
|
-
return "tv"
|
|
50
|
-
|
|
51
|
-
# If both have results, compare popularity
|
|
52
|
-
if movie_count > 0 and tv_count > 0:
|
|
53
|
-
top_movie = movie_results["results"][0]
|
|
54
|
-
top_tv = tv_results["results"][0]
|
|
55
|
-
|
|
56
|
-
return "film" if top_movie.get("popularity", 0) > top_tv.get("popularity", 0) else "tv"
|
|
57
39
|
|
|
58
|
-
|
|
40
|
+
scraper = GetSerieInfo(program_name)
|
|
41
|
+
scraper.collect_info_title()
|
|
42
|
+
return "tv" if scraper.getNumberSeason() > 0 else "film"
|
|
59
43
|
|
|
60
44
|
except Exception as e:
|
|
61
|
-
console.
|
|
45
|
+
console.print(f"[red]Error determining media type: {e}[/red]")
|
|
62
46
|
return "film"
|
|
63
47
|
|
|
64
48
|
|
|
65
|
-
def worker_determine_type(work_queue, result_dict, worker_id):
|
|
66
|
-
"""
|
|
67
|
-
Worker function to process items from queue and determine media types.
|
|
68
|
-
|
|
69
|
-
Parameters:
|
|
70
|
-
- work_queue: Queue containing items to process
|
|
71
|
-
- result_dict: Dictionary to store results
|
|
72
|
-
- worker_id: ID of the worker thread
|
|
73
|
-
"""
|
|
74
|
-
while not work_queue.empty():
|
|
75
|
-
try:
|
|
76
|
-
index, item = work_queue.get(block=False)
|
|
77
|
-
title = item.get('titolo', '')
|
|
78
|
-
media_type = determine_media_type(title)
|
|
79
|
-
|
|
80
|
-
result_dict[index] = {
|
|
81
|
-
'id': item.get('id', ''),
|
|
82
|
-
'name': title,
|
|
83
|
-
'type': media_type,
|
|
84
|
-
'path_id': item.get('path_id', ''),
|
|
85
|
-
'url': f"https://www.raiplay.it{item.get('url', '')}",
|
|
86
|
-
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
work_queue.task_done()
|
|
90
|
-
|
|
91
|
-
except queue.Empty:
|
|
92
|
-
break
|
|
93
|
-
|
|
94
|
-
except Exception as e:
|
|
95
|
-
console.log(f"Worker {worker_id} error: {e}")
|
|
96
|
-
work_queue.task_done()
|
|
97
|
-
|
|
98
|
-
|
|
99
49
|
def title_search(query: str) -> int:
|
|
100
50
|
"""
|
|
101
51
|
Search for titles based on a search query.
|
|
@@ -141,33 +91,15 @@ def title_search(query: str) -> int:
|
|
|
141
91
|
data = response.json().get('agg').get('titoli').get('cards')
|
|
142
92
|
data = data[:15] if len(data) > 15 else data
|
|
143
93
|
|
|
144
|
-
#
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
for i in range(min(MAX_THREADS, len(data))):
|
|
155
|
-
thread = threading.Thread(
|
|
156
|
-
target=worker_determine_type,
|
|
157
|
-
args=(work_queue, result_dict, i),
|
|
158
|
-
daemon=True
|
|
159
|
-
)
|
|
160
|
-
threads.append(thread)
|
|
161
|
-
thread.start()
|
|
162
|
-
|
|
163
|
-
# Wait for all threads to complete
|
|
164
|
-
for thread in threads:
|
|
165
|
-
thread.join()
|
|
166
|
-
|
|
167
|
-
# Add all results to media manager in correct order
|
|
168
|
-
for i in range(len(data)):
|
|
169
|
-
if i in result_dict:
|
|
170
|
-
media_search_manager.add_media(result_dict[i])
|
|
94
|
+
# Process each item and add to media manager
|
|
95
|
+
for item in data:
|
|
96
|
+
media_search_manager.add_media({
|
|
97
|
+
'id': item.get('id', ''),
|
|
98
|
+
'name': item.get('titolo', ''),
|
|
99
|
+
'type': determine_media_type(item),
|
|
100
|
+
'path_id': item.get('path_id', ''),
|
|
101
|
+
'url': f"https://www.raiplay.it{item.get('url', '')}",
|
|
102
|
+
'image': f"https://www.raiplay.it{item.get('immagine', '')}",
|
|
103
|
+
})
|
|
171
104
|
|
|
172
|
-
# Return the number of titles found
|
|
173
105
|
return media_search_manager.get_length()
|
|
@@ -30,28 +30,48 @@ class GetSerieInfo:
|
|
|
30
30
|
try:
|
|
31
31
|
program_url = f"{self.base_url}/programmi/{self.program_name}.json"
|
|
32
32
|
response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
|
|
33
|
+
|
|
34
|
+
# If 404, content is not yet available
|
|
35
|
+
if response.status_code == 404:
|
|
36
|
+
logging.info(f"Content not yet available: {self.program_name}")
|
|
37
|
+
return
|
|
38
|
+
|
|
33
39
|
response.raise_for_status()
|
|
34
|
-
|
|
35
40
|
json_data = response.json()
|
|
36
41
|
|
|
37
42
|
# Look for seasons in the 'blocks' property
|
|
38
|
-
for block in json_data.get('blocks'):
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
43
|
+
for block in json_data.get('blocks', []):
|
|
44
|
+
|
|
45
|
+
# Check if block is a season block or episodi block
|
|
46
|
+
if block.get('type') == 'RaiPlay Multimedia Block':
|
|
47
|
+
if block.get('name', '').lower() == 'episodi':
|
|
48
|
+
self.publishing_block_id = block.get('id')
|
|
49
|
+
|
|
50
|
+
# Extract seasons from sets array
|
|
51
|
+
for season_set in block.get('sets', []):
|
|
52
|
+
if 'stagione' in season_set.get('name', '').lower():
|
|
53
|
+
self._add_season(season_set, block.get('id'))
|
|
54
|
+
|
|
55
|
+
elif 'stagione' in block.get('name', '').lower():
|
|
56
|
+
self.publishing_block_id = block.get('id')
|
|
57
|
+
|
|
58
|
+
# Extract season directly from block's sets
|
|
59
|
+
for season_set in block.get('sets', []):
|
|
60
|
+
self._add_season(season_set, block.get('id'))
|
|
61
|
+
|
|
62
|
+
except httpx.HTTPError as e:
|
|
54
63
|
logging.error(f"Error collecting series info: {e}")
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logging.error(f"Unexpected error collecting series info: {e}")
|
|
66
|
+
|
|
67
|
+
def _add_season(self, season_set: dict, block_id: str):
|
|
68
|
+
self.seasons_manager.add_season({
|
|
69
|
+
'id': season_set.get('id', ''),
|
|
70
|
+
'number': len(self.seasons_manager.seasons) + 1,
|
|
71
|
+
'name': season_set.get('name', ''),
|
|
72
|
+
'path': season_set.get('path_id', ''),
|
|
73
|
+
'episodes_count': season_set.get('episode_size', {}).get('number', 0)
|
|
74
|
+
})
|
|
55
75
|
|
|
56
76
|
def collect_info_season(self, number_season: int) -> None:
|
|
57
77
|
"""Get episodes for a specific season."""
|
|
@@ -12,6 +12,7 @@ from rich.prompt import Prompt
|
|
|
12
12
|
|
|
13
13
|
# Internal utilities
|
|
14
14
|
from StreamingCommunity.Api.Template import get_select_title
|
|
15
|
+
from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
|
|
15
16
|
from StreamingCommunity.Api.Template.config_loader import site_constant
|
|
16
17
|
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
17
18
|
from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
|
|
@@ -25,48 +26,72 @@ from .series import download_series
|
|
|
25
26
|
|
|
26
27
|
# Variable
|
|
27
28
|
indice = 0
|
|
28
|
-
_useFor = "Film_&_Serie"
|
|
29
|
+
_useFor = "Film_&_Serie" # "Movies_&_Series"
|
|
29
30
|
_priority = 0
|
|
30
31
|
_engineDownload = "hls"
|
|
31
32
|
_deprecate = False
|
|
32
33
|
|
|
33
34
|
msg = Prompt()
|
|
34
35
|
console = Console()
|
|
36
|
+
proxy = None
|
|
35
37
|
|
|
36
38
|
|
|
37
39
|
def get_user_input(string_to_search: str = None):
|
|
38
40
|
"""
|
|
39
41
|
Asks the user to input a search term.
|
|
40
42
|
Handles both Telegram bot input and direct input.
|
|
43
|
+
If string_to_search is provided, it's returned directly (after stripping).
|
|
41
44
|
"""
|
|
42
|
-
if string_to_search is None:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
45
|
+
if string_to_search is not None:
|
|
46
|
+
return string_to_search.strip()
|
|
47
|
+
|
|
48
|
+
if site_constant.TELEGRAM_BOT:
|
|
49
|
+
bot = get_bot_instance()
|
|
50
|
+
user_response = bot.ask(
|
|
51
|
+
"key_search", # Request type
|
|
52
|
+
"Enter the search term\nor type 'back' to return to the menu: ",
|
|
53
|
+
None
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if user_response is None:
|
|
57
|
+
bot.send_message("Timeout: No search term entered.", None)
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
if user_response.lower() == 'back':
|
|
61
|
+
bot.send_message("Returning to the main menu...", None)
|
|
62
|
+
|
|
63
|
+
try:
|
|
53
64
|
# Restart the script
|
|
54
65
|
subprocess.Popen([sys.executable] + sys.argv)
|
|
55
66
|
sys.exit()
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
67
|
+
|
|
68
|
+
except Exception as e:
|
|
69
|
+
bot.send_message(f"Error during restart attempt: {e}", None)
|
|
70
|
+
return None # Return None if restart fails
|
|
71
|
+
|
|
72
|
+
return user_response.strip()
|
|
73
|
+
|
|
74
|
+
else:
|
|
75
|
+
return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
60
76
|
|
|
61
|
-
def process_search_result(select_title, selections=None):
|
|
77
|
+
def process_search_result(select_title, selections=None, proxy=None):
|
|
62
78
|
"""
|
|
63
79
|
Handles the search result and initiates the download for either a film or series.
|
|
64
80
|
|
|
65
81
|
Parameters:
|
|
66
|
-
select_title (MediaItem): The selected media item
|
|
82
|
+
select_title (MediaItem): The selected media item. Can be None if selection fails.
|
|
67
83
|
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
68
|
-
{'season': season_selection, 'episode': episode_selection}
|
|
84
|
+
e.g., {'season': season_selection, 'episode': episode_selection}
|
|
85
|
+
proxy (str, optional): The proxy to use for downloads.
|
|
69
86
|
"""
|
|
87
|
+
if not select_title:
|
|
88
|
+
if site_constant.TELEGRAM_BOT:
|
|
89
|
+
bot = get_bot_instance()
|
|
90
|
+
bot.send_message("No title selected or selection cancelled.", None)
|
|
91
|
+
else:
|
|
92
|
+
console.print("[yellow]No title selected or selection cancelled.")
|
|
93
|
+
return
|
|
94
|
+
|
|
70
95
|
if select_title.type == 'tv':
|
|
71
96
|
season_selection = None
|
|
72
97
|
episode_selection = None
|
|
@@ -75,56 +100,67 @@ def process_search_result(select_title, selections=None):
|
|
|
75
100
|
season_selection = selections.get('season')
|
|
76
101
|
episode_selection = selections.get('episode')
|
|
77
102
|
|
|
78
|
-
download_series(select_title, season_selection, episode_selection)
|
|
79
|
-
|
|
103
|
+
download_series(select_title, season_selection, episode_selection, proxy)
|
|
104
|
+
|
|
80
105
|
else:
|
|
81
|
-
download_film(select_title)
|
|
106
|
+
download_film(select_title, proxy)
|
|
82
107
|
|
|
83
108
|
def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
|
|
84
109
|
"""
|
|
85
110
|
Main function of the application for search.
|
|
86
111
|
|
|
87
112
|
Parameters:
|
|
88
|
-
string_to_search (str, optional): String to search for
|
|
89
|
-
|
|
90
|
-
|
|
113
|
+
string_to_search (str, optional): String to search for. Can be passed from run.py.
|
|
114
|
+
If 'back', special handling might occur in get_user_input.
|
|
115
|
+
get_onlyDatabase (bool, optional): If True, return only the database search manager object.
|
|
116
|
+
direct_item (dict, optional): Direct item to process (bypasses search).
|
|
91
117
|
selections (dict, optional): Dictionary containing selection inputs that bypass manual input
|
|
92
|
-
|
|
118
|
+
for series (season/episode).
|
|
93
119
|
"""
|
|
120
|
+
bot = None
|
|
121
|
+
if site_constant.TELEGRAM_BOT:
|
|
122
|
+
bot = get_bot_instance()
|
|
123
|
+
|
|
124
|
+
# Check proxy if not already set
|
|
125
|
+
finder = ProxyFinder(site_constant.FULL_URL)
|
|
126
|
+
proxy = finder.find_fast_proxy()
|
|
127
|
+
|
|
94
128
|
if direct_item:
|
|
95
|
-
|
|
96
|
-
process_search_result(
|
|
129
|
+
select_title_obj = MediaItem(**direct_item)
|
|
130
|
+
process_search_result(select_title_obj, selections, proxy)
|
|
97
131
|
return
|
|
132
|
+
|
|
98
133
|
|
|
99
|
-
if string_to_search is None:
|
|
100
|
-
if site_constant.TELEGRAM_BOT:
|
|
101
|
-
bot = get_bot_instance()
|
|
102
|
-
string_to_search = bot.ask(
|
|
103
|
-
"key_search",
|
|
104
|
-
f"Enter the search term\nor type 'back' to return to the menu: ",
|
|
105
|
-
None
|
|
106
|
-
)
|
|
107
134
|
|
|
108
|
-
|
|
135
|
+
actual_search_query = get_user_input(string_to_search)
|
|
109
136
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
137
|
+
# Handle cases where user input is empty, or 'back' was handled (sys.exit or None return)
|
|
138
|
+
if not actual_search_query:
|
|
139
|
+
if bot:
|
|
140
|
+
if actual_search_query is None: # Specifically for timeout from bot.ask or failed restart
|
|
141
|
+
bot.send_message("Search term not provided or operation cancelled. Returning.", None)
|
|
142
|
+
return
|
|
115
143
|
|
|
116
|
-
#
|
|
117
|
-
|
|
144
|
+
# Perform search on the database using the obtained query
|
|
145
|
+
finder = ProxyFinder(site_constant.FULL_URL)
|
|
146
|
+
proxy = finder.find_fast_proxy()
|
|
147
|
+
len_database = title_search(actual_search_query, proxy)
|
|
118
148
|
|
|
119
|
-
# If only the database
|
|
149
|
+
# If only the database object (media_search_manager populated by title_search) is needed
|
|
120
150
|
if get_onlyDatabase:
|
|
121
|
-
return media_search_manager
|
|
151
|
+
return media_search_manager
|
|
122
152
|
|
|
123
153
|
if len_database > 0:
|
|
124
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
125
|
-
process_search_result(select_title, selections)
|
|
154
|
+
select_title = get_select_title(table_show_manager, media_search_manager, len_database)
|
|
155
|
+
process_search_result(select_title, selections, proxy)
|
|
126
156
|
|
|
127
157
|
else:
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
158
|
+
no_results_message = f"No results found for: '{actual_search_query}'"
|
|
159
|
+
if bot:
|
|
160
|
+
bot.send_message(no_results_message, None)
|
|
161
|
+
else:
|
|
162
|
+
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
|
|
163
|
+
|
|
164
|
+
# Do not call search() recursively here to avoid infinite loops on no results.
|
|
165
|
+
# The flow should return to the caller (e.g., main menu in run.py).
|
|
166
|
+
return
|
|
@@ -27,7 +27,7 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSource
|
|
|
27
27
|
console = Console()
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
def download_film(select_title: MediaItem) -> str:
|
|
30
|
+
def download_film(select_title: MediaItem, proxy: str = None) -> str:
|
|
31
31
|
"""
|
|
32
32
|
Downloads a film using the provided film ID, title name, and domain.
|
|
33
33
|
|
|
@@ -55,7 +55,7 @@ def download_film(select_title: MediaItem) -> str:
|
|
|
55
55
|
console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
|
|
56
56
|
|
|
57
57
|
# Init class
|
|
58
|
-
video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id)
|
|
58
|
+
video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id, proxy)
|
|
59
59
|
|
|
60
60
|
# Retrieve scws and if available master playlist
|
|
61
61
|
video_source.get_iframe(select_title.id)
|
|
@@ -142,7 +142,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
|
|
|
142
142
|
break
|
|
143
143
|
|
|
144
144
|
|
|
145
|
-
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
|
|
145
|
+
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
|
|
146
146
|
"""
|
|
147
147
|
Handle downloading a complete series.
|
|
148
148
|
|
|
@@ -154,8 +154,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
|
|
|
154
154
|
start_message()
|
|
155
155
|
|
|
156
156
|
# Init class
|
|
157
|
-
video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id)
|
|
158
|
-
scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug)
|
|
157
|
+
video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id, proxy)
|
|
158
|
+
scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug, proxy)
|
|
159
159
|
|
|
160
160
|
# Collect information about season
|
|
161
161
|
scrape_serie.getNumberSeason()
|
|
@@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
|
|
|
219
219
|
# Get script_id
|
|
220
220
|
script_id = TelegramSession.get_session()
|
|
221
221
|
if script_id != "unknown":
|
|
222
|
-
TelegramSession.deleteScriptId(script_id)
|
|
222
|
+
TelegramSession.deleteScriptId(script_id)
|
|
@@ -28,7 +28,7 @@ table_show_manager = TVShowManager()
|
|
|
28
28
|
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def title_search(query: str) -> int:
|
|
31
|
+
def title_search(query: str, proxy: str) -> int:
|
|
32
32
|
"""
|
|
33
33
|
Search for titles based on a search query.
|
|
34
34
|
|
|
@@ -48,7 +48,8 @@ def title_search(query: str) -> int:
|
|
|
48
48
|
response = httpx.get(
|
|
49
49
|
f"{site_constant.FULL_URL}/it",
|
|
50
50
|
headers={'user-agent': get_userAgent()},
|
|
51
|
-
timeout=max_timeout
|
|
51
|
+
timeout=max_timeout,
|
|
52
|
+
proxy=proxy
|
|
52
53
|
)
|
|
53
54
|
response.raise_for_status()
|
|
54
55
|
|
|
@@ -56,6 +57,7 @@ def title_search(query: str) -> int:
|
|
|
56
57
|
version = json.loads(soup.find('div', {'id': "app"}).get("data-page"))['version']
|
|
57
58
|
|
|
58
59
|
except Exception as e:
|
|
60
|
+
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
|
|
59
61
|
console.print(f"[red]Site: {site_constant.SITE_NAME} version, request error: {e}")
|
|
60
62
|
return 0
|
|
61
63
|
|
|
@@ -71,7 +73,8 @@ def title_search(query: str) -> int:
|
|
|
71
73
|
'x-inertia': 'true',
|
|
72
74
|
'x-inertia-version': version
|
|
73
75
|
},
|
|
74
|
-
timeout=max_timeout
|
|
76
|
+
timeout=max_timeout,
|
|
77
|
+
proxy=proxy
|
|
75
78
|
)
|
|
76
79
|
response.raise_for_status()
|
|
77
80
|
|
|
@@ -117,4 +120,4 @@ def title_search(query: str) -> int:
|
|
|
117
120
|
bot.send_message(f"Lista dei risultati:", choices)
|
|
118
121
|
|
|
119
122
|
# Return the number of titles found
|
|
120
|
-
return media_search_manager.get_length()
|
|
123
|
+
return media_search_manager.get_length()
|
|
@@ -20,7 +20,7 @@ max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
class GetSerieInfo:
|
|
23
|
-
def __init__(self, url, media_id: int = None, series_name: str = None):
|
|
23
|
+
def __init__(self, url, media_id: int = None, series_name: str = None, proxy = None):
|
|
24
24
|
"""
|
|
25
25
|
Initialize the GetSerieInfo class for scraping TV series information.
|
|
26
26
|
|
|
@@ -32,6 +32,7 @@ class GetSerieInfo:
|
|
|
32
32
|
self.is_series = False
|
|
33
33
|
self.headers = {'user-agent': get_userAgent()}
|
|
34
34
|
self.url = url
|
|
35
|
+
self.proxy = proxy
|
|
35
36
|
self.media_id = media_id
|
|
36
37
|
self.seasons_manager = SeasonManager()
|
|
37
38
|
|
|
@@ -50,7 +51,8 @@ class GetSerieInfo:
|
|
|
50
51
|
response = httpx.get(
|
|
51
52
|
url=f"{self.url}/titles/{self.media_id}-{self.series_name}",
|
|
52
53
|
headers=self.headers,
|
|
53
|
-
timeout=max_timeout
|
|
54
|
+
timeout=max_timeout,
|
|
55
|
+
proxy=self.proxy
|
|
54
56
|
)
|
|
55
57
|
response.raise_for_status()
|
|
56
58
|
|
|
@@ -104,7 +106,8 @@ class GetSerieInfo:
|
|
|
104
106
|
'x-inertia': 'true',
|
|
105
107
|
'x-inertia-version': self.version,
|
|
106
108
|
},
|
|
107
|
-
timeout=max_timeout
|
|
109
|
+
timeout=max_timeout,
|
|
110
|
+
proxy=self.proxy
|
|
108
111
|
)
|
|
109
112
|
|
|
110
113
|
# Extract episodes from JSON response
|
|
@@ -21,12 +21,13 @@ from .series import download_series
|
|
|
21
21
|
# Variable
|
|
22
22
|
indice = 7
|
|
23
23
|
_useFor = "Film_&_Serie"
|
|
24
|
-
_priority =
|
|
24
|
+
_priority = 0
|
|
25
25
|
_engineDownload = "hls"
|
|
26
26
|
_deprecate = False
|
|
27
27
|
|
|
28
28
|
msg = Prompt()
|
|
29
29
|
console = Console()
|
|
30
|
+
proxy = None
|
|
30
31
|
|
|
31
32
|
|
|
32
33
|
def get_user_input(string_to_search: str = None):
|
|
@@ -74,20 +75,25 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
|
|
|
74
75
|
select_title = MediaItem(**direct_item)
|
|
75
76
|
process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
|
|
76
77
|
return
|
|
78
|
+
|
|
79
|
+
# Check proxy if not already set
|
|
80
|
+
finder = ProxyFinder(site_constant.FULL_URL)
|
|
81
|
+
proxy = finder.find_fast_proxy()
|
|
77
82
|
|
|
78
83
|
if string_to_search is None:
|
|
79
84
|
string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
|
|
80
85
|
|
|
86
|
+
# Perform search on the database using the obtained query
|
|
81
87
|
finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
|
|
82
|
-
proxy
|
|
83
|
-
len_database = title_search(string_to_search,
|
|
88
|
+
proxy = finder.find_fast_proxy()
|
|
89
|
+
len_database = title_search(string_to_search, proxy)
|
|
84
90
|
|
|
85
91
|
# If only the database is needed, return the manager
|
|
86
92
|
if get_onlyDatabase:
|
|
87
93
|
return media_search_manager
|
|
88
94
|
|
|
89
95
|
if len_database > 0:
|
|
90
|
-
select_title = get_select_title(table_show_manager, media_search_manager)
|
|
96
|
+
select_title = get_select_title(table_show_manager, media_search_manager,len_database)
|
|
91
97
|
process_search_result(select_title, selections, proxy)
|
|
92
98
|
|
|
93
99
|
else:
|
|
@@ -27,9 +27,16 @@ table_show_manager = TVShowManager()
|
|
|
27
27
|
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
def extract_nonce(
|
|
30
|
+
def extract_nonce(proxy) -> str:
|
|
31
31
|
"""Extract nonce value from the page script"""
|
|
32
|
-
|
|
32
|
+
response = httpx.get(
|
|
33
|
+
site_constant.FULL_URL,
|
|
34
|
+
headers={'user-agent': get_userAgent()},
|
|
35
|
+
timeout=max_timeout,
|
|
36
|
+
proxy=proxy
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
soup = BeautifulSoup(response.content, 'html.parser')
|
|
33
40
|
script = soup.find('script', id='live-search-js-extra')
|
|
34
41
|
if script:
|
|
35
42
|
match = re.search(r'"admin_ajax_nonce":"([^"]+)"', script.text)
|
|
@@ -38,7 +45,7 @@ def extract_nonce(response_) -> str:
|
|
|
38
45
|
return ""
|
|
39
46
|
|
|
40
47
|
|
|
41
|
-
def title_search(query: str,
|
|
48
|
+
def title_search(query: str, proxy: str) -> int:
|
|
42
49
|
"""
|
|
43
50
|
Search for titles based on a search query.
|
|
44
51
|
|
|
@@ -51,12 +58,11 @@ def title_search(query: str, additionalData: list) -> int:
|
|
|
51
58
|
media_search_manager.clear()
|
|
52
59
|
table_show_manager.clear()
|
|
53
60
|
|
|
54
|
-
proxy, response_serie = additionalData
|
|
55
61
|
search_url = f"{site_constant.FULL_URL}/wp-admin/admin-ajax.php"
|
|
56
62
|
console.print(f"[cyan]Search url: [yellow]{search_url}")
|
|
57
63
|
|
|
58
64
|
try:
|
|
59
|
-
_wpnonce = extract_nonce(
|
|
65
|
+
_wpnonce = extract_nonce(proxy)
|
|
60
66
|
|
|
61
67
|
if not _wpnonce:
|
|
62
68
|
console.print("[red]Error: Failed to extract nonce")
|
|
@@ -82,6 +88,7 @@ def title_search(query: str, additionalData: list) -> int:
|
|
|
82
88
|
soup = BeautifulSoup(response.text, 'html.parser')
|
|
83
89
|
|
|
84
90
|
except Exception as e:
|
|
91
|
+
if "WinError" in str(e) or "Errno" in str(e): console.print("\n[bold yellow]Please make sure you have enabled and configured a valid proxy.[/bold yellow]")
|
|
85
92
|
console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
|
|
86
93
|
return 0
|
|
87
94
|
|