StreamingCommunity 2.3.0__py3-none-any.whl → 2.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of StreamingCommunity might be problematic. Click here for more details.
- StreamingCommunity/run.py +61 -7
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/METADATA +88 -18
- StreamingCommunity-2.5.0.dist-info/RECORD +8 -0
- StreamingCommunity/Api/Player/Helper/Vixcloud/js_parser.py +0 -143
- StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +0 -136
- StreamingCommunity/Api/Player/ddl.py +0 -89
- StreamingCommunity/Api/Player/maxstream.py +0 -151
- StreamingCommunity/Api/Player/supervideo.py +0 -194
- StreamingCommunity/Api/Player/vixcloud.py +0 -273
- StreamingCommunity/Api/Site/1337xx/__init__.py +0 -51
- StreamingCommunity/Api/Site/1337xx/costant.py +0 -15
- StreamingCommunity/Api/Site/1337xx/site.py +0 -89
- StreamingCommunity/Api/Site/1337xx/title.py +0 -66
- StreamingCommunity/Api/Site/altadefinizione/__init__.py +0 -51
- StreamingCommunity/Api/Site/altadefinizione/costant.py +0 -19
- StreamingCommunity/Api/Site/altadefinizione/film.py +0 -74
- StreamingCommunity/Api/Site/altadefinizione/site.py +0 -95
- StreamingCommunity/Api/Site/animeunity/__init__.py +0 -51
- StreamingCommunity/Api/Site/animeunity/costant.py +0 -19
- StreamingCommunity/Api/Site/animeunity/film_serie.py +0 -135
- StreamingCommunity/Api/Site/animeunity/site.py +0 -175
- StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +0 -97
- StreamingCommunity/Api/Site/cb01new/__init__.py +0 -52
- StreamingCommunity/Api/Site/cb01new/costant.py +0 -19
- StreamingCommunity/Api/Site/cb01new/film.py +0 -73
- StreamingCommunity/Api/Site/cb01new/site.py +0 -83
- StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +0 -56
- StreamingCommunity/Api/Site/ddlstreamitaly/costant.py +0 -20
- StreamingCommunity/Api/Site/ddlstreamitaly/series.py +0 -146
- StreamingCommunity/Api/Site/ddlstreamitaly/site.py +0 -99
- StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +0 -85
- StreamingCommunity/Api/Site/guardaserie/__init__.py +0 -51
- StreamingCommunity/Api/Site/guardaserie/costant.py +0 -19
- StreamingCommunity/Api/Site/guardaserie/series.py +0 -198
- StreamingCommunity/Api/Site/guardaserie/site.py +0 -90
- StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +0 -110
- StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +0 -52
- StreamingCommunity/Api/Site/ilcorsaronero/costant.py +0 -19
- StreamingCommunity/Api/Site/ilcorsaronero/site.py +0 -72
- StreamingCommunity/Api/Site/ilcorsaronero/title.py +0 -46
- StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +0 -149
- StreamingCommunity/Api/Site/mostraguarda/__init__.py +0 -49
- StreamingCommunity/Api/Site/mostraguarda/costant.py +0 -19
- StreamingCommunity/Api/Site/mostraguarda/film.py +0 -101
- StreamingCommunity/Api/Site/streamingcommunity/__init__.py +0 -56
- StreamingCommunity/Api/Site/streamingcommunity/costant.py +0 -19
- StreamingCommunity/Api/Site/streamingcommunity/film.py +0 -75
- StreamingCommunity/Api/Site/streamingcommunity/series.py +0 -206
- StreamingCommunity/Api/Site/streamingcommunity/site.py +0 -139
- StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +0 -123
- StreamingCommunity/Api/Template/Class/SearchType.py +0 -101
- StreamingCommunity/Api/Template/Util/__init__.py +0 -5
- StreamingCommunity/Api/Template/Util/get_domain.py +0 -137
- StreamingCommunity/Api/Template/Util/manage_ep.py +0 -179
- StreamingCommunity/Api/Template/Util/recall_search.py +0 -37
- StreamingCommunity/Api/Template/__init__.py +0 -3
- StreamingCommunity/Api/Template/site.py +0 -87
- StreamingCommunity/Lib/Downloader/HLS/downloader.py +0 -955
- StreamingCommunity/Lib/Downloader/HLS/proxyes.py +0 -110
- StreamingCommunity/Lib/Downloader/HLS/segments.py +0 -564
- StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -155
- StreamingCommunity/Lib/Downloader/TOR/downloader.py +0 -296
- StreamingCommunity/Lib/Downloader/__init__.py +0 -5
- StreamingCommunity/Lib/FFmpeg/__init__.py +0 -4
- StreamingCommunity/Lib/FFmpeg/capture.py +0 -170
- StreamingCommunity/Lib/FFmpeg/command.py +0 -296
- StreamingCommunity/Lib/FFmpeg/util.py +0 -249
- StreamingCommunity/Lib/M3U8/__init__.py +0 -6
- StreamingCommunity/Lib/M3U8/decryptor.py +0 -164
- StreamingCommunity/Lib/M3U8/estimator.py +0 -229
- StreamingCommunity/Lib/M3U8/parser.py +0 -666
- StreamingCommunity/Lib/M3U8/url_fixer.py +0 -52
- StreamingCommunity/Lib/TMBD/__init__.py +0 -2
- StreamingCommunity/Lib/TMBD/obj_tmbd.py +0 -39
- StreamingCommunity/Lib/TMBD/tmdb.py +0 -346
- StreamingCommunity/Upload/update.py +0 -67
- StreamingCommunity/Upload/version.py +0 -5
- StreamingCommunity/Util/_jsonConfig.py +0 -204
- StreamingCommunity/Util/call_stack.py +0 -42
- StreamingCommunity/Util/color.py +0 -20
- StreamingCommunity/Util/console.py +0 -12
- StreamingCommunity/Util/ffmpeg_installer.py +0 -351
- StreamingCommunity/Util/headers.py +0 -147
- StreamingCommunity/Util/logger.py +0 -53
- StreamingCommunity/Util/message.py +0 -64
- StreamingCommunity/Util/os.py +0 -545
- StreamingCommunity/Util/table.py +0 -229
- StreamingCommunity-2.3.0.dist-info/RECORD +0 -92
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/LICENSE +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/WHEEL +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/entry_points.txt +0 -0
- {StreamingCommunity-2.3.0.dist-info → StreamingCommunity-2.5.0.dist-info}/top_level.txt +0 -0
|
@@ -1,110 +0,0 @@
|
|
|
1
|
-
# 13.06.24
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
from typing import List, Dict
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# External libraries
|
|
8
|
-
import httpx
|
|
9
|
-
from bs4 import BeautifulSoup
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
# Internal utilities
|
|
13
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Logic class
|
|
17
|
-
from StreamingCommunity.Api.Template .Class.SearchType import MediaItem
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class GetSerieInfo:
|
|
21
|
-
def __init__(self, dict_serie: MediaItem) -> None:
|
|
22
|
-
"""
|
|
23
|
-
Initializes the GetSerieInfo object with default values.
|
|
24
|
-
|
|
25
|
-
Parameters:
|
|
26
|
-
dict_serie (MediaItem): Dictionary containing series information (optional).
|
|
27
|
-
"""
|
|
28
|
-
self.headers = {'user-agent': get_headers()}
|
|
29
|
-
self.url = dict_serie.url
|
|
30
|
-
self.tv_name = None
|
|
31
|
-
self.list_episodes = None
|
|
32
|
-
|
|
33
|
-
def get_seasons_number(self) -> int:
|
|
34
|
-
"""
|
|
35
|
-
Retrieves the number of seasons of a TV series.
|
|
36
|
-
|
|
37
|
-
Returns:
|
|
38
|
-
int: Number of seasons of the TV series.
|
|
39
|
-
"""
|
|
40
|
-
try:
|
|
41
|
-
|
|
42
|
-
# Make an HTTP request to the series URL
|
|
43
|
-
response = httpx.get(self.url, headers=self.headers, timeout=15)
|
|
44
|
-
response.raise_for_status()
|
|
45
|
-
|
|
46
|
-
# Parse HTML content of the page
|
|
47
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
48
|
-
|
|
49
|
-
# Find the container of seasons
|
|
50
|
-
table_content = soup.find('div', class_="tt_season")
|
|
51
|
-
|
|
52
|
-
# Count the number of seasons
|
|
53
|
-
seasons_number = len(table_content.find_all("li"))
|
|
54
|
-
|
|
55
|
-
# Extract the name of the series
|
|
56
|
-
self.tv_name = soup.find("h1", class_="front_title").get_text(strip=True)
|
|
57
|
-
|
|
58
|
-
return seasons_number
|
|
59
|
-
|
|
60
|
-
except Exception as e:
|
|
61
|
-
logging.error(f"Error parsing HTML page: {e}")
|
|
62
|
-
|
|
63
|
-
return -999
|
|
64
|
-
|
|
65
|
-
def get_episode_number(self, n_season: int) -> List[Dict[str, str]]:
|
|
66
|
-
"""
|
|
67
|
-
Retrieves the number of episodes for a specific season.
|
|
68
|
-
|
|
69
|
-
Parameters:
|
|
70
|
-
n_season (int): The season number.
|
|
71
|
-
|
|
72
|
-
Returns:
|
|
73
|
-
List[Dict[str, str]]: List of dictionaries containing episode information.
|
|
74
|
-
"""
|
|
75
|
-
try:
|
|
76
|
-
|
|
77
|
-
# Make an HTTP request to the series URL
|
|
78
|
-
response = httpx.get(self.url, headers=self.headers, timeout=15)
|
|
79
|
-
response.raise_for_status()
|
|
80
|
-
|
|
81
|
-
# Parse HTML content of the page
|
|
82
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
83
|
-
|
|
84
|
-
# Find the container of episodes for the specified season
|
|
85
|
-
table_content = soup.find('div', class_="tab-pane", id=f"season-{n_season}")
|
|
86
|
-
|
|
87
|
-
# Extract episode information
|
|
88
|
-
episode_content = table_content.find_all("li")
|
|
89
|
-
list_dict_episode = []
|
|
90
|
-
|
|
91
|
-
for episode_div in episode_content:
|
|
92
|
-
index = episode_div.find("a").get("data-num")
|
|
93
|
-
link = episode_div.find("a").get("data-link")
|
|
94
|
-
name = episode_div.find("a").get("data-title")
|
|
95
|
-
|
|
96
|
-
obj_episode = {
|
|
97
|
-
'number': index,
|
|
98
|
-
'name': name,
|
|
99
|
-
'url': link
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
list_dict_episode.append(obj_episode)
|
|
103
|
-
|
|
104
|
-
self.list_episodes = list_dict_episode
|
|
105
|
-
return list_dict_episode
|
|
106
|
-
|
|
107
|
-
except Exception as e:
|
|
108
|
-
logging.error(f"Error parsing HTML page: {e}")
|
|
109
|
-
|
|
110
|
-
return []
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
# 02.07.24
|
|
2
|
-
|
|
3
|
-
import asyncio
|
|
4
|
-
from urllib.parse import quote_plus
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
# Internal utilities
|
|
8
|
-
from StreamingCommunity.Util.console import console, msg
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
# Logic class
|
|
12
|
-
from .site import title_search, run_get_select_title, media_search_manager
|
|
13
|
-
from .title import download_title
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Variable
|
|
17
|
-
indice = 9
|
|
18
|
-
_useFor = "film_serie"
|
|
19
|
-
_deprecate = False
|
|
20
|
-
_priority = 2
|
|
21
|
-
_engineDownload = "tor"
|
|
22
|
-
from .costant import SITE_NAME
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def search(string_to_search: str = None, get_onylDatabase: bool = False):
|
|
26
|
-
"""
|
|
27
|
-
Main function of the application for film and series.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
if string_to_search is None:
|
|
31
|
-
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{SITE_NAME}").strip()
|
|
32
|
-
|
|
33
|
-
# Search on database
|
|
34
|
-
len_database = asyncio.run(title_search(quote_plus(string_to_search)))
|
|
35
|
-
|
|
36
|
-
# Return list of elements
|
|
37
|
-
if get_onylDatabase:
|
|
38
|
-
return media_search_manager
|
|
39
|
-
|
|
40
|
-
if len_database > 0:
|
|
41
|
-
|
|
42
|
-
# Select title from list
|
|
43
|
-
select_title = run_get_select_title()
|
|
44
|
-
|
|
45
|
-
# Download title
|
|
46
|
-
download_title(select_title)
|
|
47
|
-
|
|
48
|
-
else:
|
|
49
|
-
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
50
|
-
|
|
51
|
-
# Retry
|
|
52
|
-
search()
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# 09.06.24
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
|
|
11
|
-
ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
|
|
12
|
-
DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
|
|
13
|
-
|
|
14
|
-
SERIES_FOLDER = os.path.join(ROOT_PATH, config_manager.get('DEFAULT', 'serie_folder_name'))
|
|
15
|
-
MOVIE_FOLDER = os.path.join(ROOT_PATH, config_manager.get('DEFAULT', 'movie_folder_name'))
|
|
16
|
-
|
|
17
|
-
if config_manager.get_bool("DEFAULT", "add_siteName"):
|
|
18
|
-
SERIES_FOLDER = os.path.join(ROOT_PATH, SITE_NAME, config_manager.get('DEFAULT', 'serie_folder_name'))
|
|
19
|
-
MOVIE_FOLDER = os.path.join(ROOT_PATH, SITE_NAME, config_manager.get('DEFAULT', 'movie_folder_name'))
|
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
# 02.07.24
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
# Internal utilities
|
|
5
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
6
|
-
from StreamingCommunity.Util.table import TVShowManager
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
# Logic class
|
|
10
|
-
from StreamingCommunity.Api.Template import get_select_title
|
|
11
|
-
from StreamingCommunity.Api.Template.Util import search_domain
|
|
12
|
-
from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
|
|
13
|
-
from .util.ilCorsarScraper import IlCorsaroNeroScraper
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Variable
|
|
17
|
-
from .costant import SITE_NAME, DOMAIN_NOW
|
|
18
|
-
media_search_manager = MediaManager()
|
|
19
|
-
table_show_manager = TVShowManager()
|
|
20
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
21
|
-
disable_searchDomain = config_manager.get_bool("DEFAULT", "disable_searchDomain")
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
async def title_search(word_to_search: str) -> int:
|
|
25
|
-
"""
|
|
26
|
-
Search for titles based on a search query.
|
|
27
|
-
|
|
28
|
-
Parameters:
|
|
29
|
-
- title_search (str): The title to search for.
|
|
30
|
-
|
|
31
|
-
Returns:
|
|
32
|
-
- int: The number of titles found.
|
|
33
|
-
"""
|
|
34
|
-
media_search_manager.clear()
|
|
35
|
-
table_show_manager.clear()
|
|
36
|
-
|
|
37
|
-
# Find new domain if prev dont work
|
|
38
|
-
domain_to_use = DOMAIN_NOW
|
|
39
|
-
|
|
40
|
-
if not disable_searchDomain:
|
|
41
|
-
domain_to_use, base_url = search_domain(SITE_NAME, f"https://{SITE_NAME}")
|
|
42
|
-
|
|
43
|
-
# Create scraper and collect result
|
|
44
|
-
print("\n")
|
|
45
|
-
scraper = IlCorsaroNeroScraper(f"https://{SITE_NAME}.{domain_to_use}/", 1)
|
|
46
|
-
results = await scraper.search(word_to_search)
|
|
47
|
-
|
|
48
|
-
for i, torrent in enumerate(results):
|
|
49
|
-
try:
|
|
50
|
-
|
|
51
|
-
media_search_manager.add_media({
|
|
52
|
-
'name': torrent['name'],
|
|
53
|
-
'type': torrent['type'],
|
|
54
|
-
'seed': torrent['seed'],
|
|
55
|
-
'leech': torrent['leech'],
|
|
56
|
-
'size': torrent['size'],
|
|
57
|
-
'date': torrent['date'],
|
|
58
|
-
'url': torrent['url']
|
|
59
|
-
})
|
|
60
|
-
|
|
61
|
-
except Exception as e:
|
|
62
|
-
print(f"Error parsing a film entry: {e}")
|
|
63
|
-
|
|
64
|
-
# Return the number of titles found
|
|
65
|
-
return media_search_manager.get_length()
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def run_get_select_title():
|
|
69
|
-
"""
|
|
70
|
-
Display a selection of titles and prompt the user to choose one.
|
|
71
|
-
"""
|
|
72
|
-
return get_select_title(table_show_manager, media_search_manager)
|
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
# 02.07.24
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util.console import console
|
|
8
|
-
from StreamingCommunity.Util.os import os_manager
|
|
9
|
-
from StreamingCommunity.Util.message import start_message
|
|
10
|
-
from StreamingCommunity.Lib.Downloader import TOR_downloader
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Logic class
|
|
14
|
-
from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
# Config
|
|
18
|
-
from .costant import MOVIE_FOLDER
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def download_title(select_title: MediaItem):
|
|
22
|
-
"""
|
|
23
|
-
Downloads a media item and saves it as an MP4 file.
|
|
24
|
-
|
|
25
|
-
Parameters:
|
|
26
|
-
- select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
start_message()
|
|
30
|
-
console.print(f"[yellow]Download: [red]{select_title.name} \n")
|
|
31
|
-
print()
|
|
32
|
-
|
|
33
|
-
# Define output path
|
|
34
|
-
title_name = os_manager.get_sanitize_file(select_title.name)
|
|
35
|
-
mp4_path = os_manager.get_sanitize_path(
|
|
36
|
-
os.path.join(MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
37
|
-
)
|
|
38
|
-
|
|
39
|
-
# Create output folder
|
|
40
|
-
os_manager.create_path(mp4_path)
|
|
41
|
-
|
|
42
|
-
# Tor manager
|
|
43
|
-
manager = TOR_downloader()
|
|
44
|
-
manager.add_magnet_link(select_title.url)
|
|
45
|
-
manager.start_download()
|
|
46
|
-
manager.move_downloaded_files(mp4_path)
|
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
# 12.14.24
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
import asyncio
|
|
5
|
-
from typing import List, Dict, Optional
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
# External libraries
|
|
9
|
-
import httpx
|
|
10
|
-
from bs4 import BeautifulSoup
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
# Internal utilities
|
|
14
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
15
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
16
|
-
from StreamingCommunity.Util.console import console
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
# Variable
|
|
20
|
-
max_timeout = config_manager.get_int("REQUESTS", "timeout")
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class IlCorsaroNeroScraper:
|
|
24
|
-
def __init__(self, base_url: str, max_page: int = 1):
|
|
25
|
-
self.base_url = base_url
|
|
26
|
-
self.max_page = max_page
|
|
27
|
-
self.headers = {
|
|
28
|
-
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
|
29
|
-
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
|
30
|
-
'cache-control': 'max-age=0',
|
|
31
|
-
'priority': 'u=0, i',
|
|
32
|
-
'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
|
|
33
|
-
'sec-ch-ua-mobile': '?0',
|
|
34
|
-
'sec-ch-ua-platform': '"Windows"',
|
|
35
|
-
'sec-fetch-dest': 'document',
|
|
36
|
-
'sec-fetch-mode': 'navigate',
|
|
37
|
-
'sec-fetch-site': 'same-origin',
|
|
38
|
-
'sec-fetch-user': '?1',
|
|
39
|
-
'upgrade-insecure-requests': '1',
|
|
40
|
-
'user-agent': get_headers()
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
async def fetch_url(self, url: str) -> Optional[str]:
|
|
44
|
-
"""
|
|
45
|
-
Fetch the HTML content of a given URL.
|
|
46
|
-
"""
|
|
47
|
-
try:
|
|
48
|
-
console.print(f"[cyan]Fetching url[white]: [red]{url}")
|
|
49
|
-
async with httpx.AsyncClient(headers=self.headers, follow_redirects=True, timeout=max_timeout) as client:
|
|
50
|
-
response = await client.get(url)
|
|
51
|
-
|
|
52
|
-
# If the request was successful, return the HTML content
|
|
53
|
-
response.raise_for_status()
|
|
54
|
-
return response.text
|
|
55
|
-
|
|
56
|
-
except Exception as e:
|
|
57
|
-
logging.error(f"Error fetching {url}: {e}")
|
|
58
|
-
return None
|
|
59
|
-
|
|
60
|
-
def parse_torrents(self, html: str) -> List[Dict[str, str]]:
|
|
61
|
-
"""
|
|
62
|
-
Parse the HTML content and extract torrent details.
|
|
63
|
-
"""
|
|
64
|
-
torrents = []
|
|
65
|
-
soup = BeautifulSoup(html, "html.parser")
|
|
66
|
-
table = soup.find("tbody")
|
|
67
|
-
|
|
68
|
-
for row in table.find_all("tr"):
|
|
69
|
-
try:
|
|
70
|
-
columns = row.find_all("td")
|
|
71
|
-
|
|
72
|
-
torrents.append({
|
|
73
|
-
'type': columns[0].get_text(strip=True),
|
|
74
|
-
'name': row.find("th").find("a").get_text(strip=True),
|
|
75
|
-
'seed': columns[1].get_text(strip=True),
|
|
76
|
-
'leech': columns[2].get_text(strip=True),
|
|
77
|
-
'size': columns[3].get_text(strip=True),
|
|
78
|
-
'date': columns[4].get_text(strip=True),
|
|
79
|
-
'url': "https://ilcorsaronero.link" + row.find("th").find("a").get("href")
|
|
80
|
-
})
|
|
81
|
-
|
|
82
|
-
except Exception as e:
|
|
83
|
-
logging.error(f"Error parsing row: {e}")
|
|
84
|
-
continue
|
|
85
|
-
|
|
86
|
-
return torrents
|
|
87
|
-
|
|
88
|
-
async def fetch_real_url(self, url: str) -> Optional[str]:
|
|
89
|
-
"""
|
|
90
|
-
Fetch the real torrent URL from the detailed page.
|
|
91
|
-
"""
|
|
92
|
-
response_html = await self.fetch_url(url)
|
|
93
|
-
if not response_html:
|
|
94
|
-
return None
|
|
95
|
-
|
|
96
|
-
soup = BeautifulSoup(response_html, "html.parser")
|
|
97
|
-
links = soup.find_all("a")
|
|
98
|
-
|
|
99
|
-
# Find and return the magnet link
|
|
100
|
-
for link in links:
|
|
101
|
-
if "magnet" in str(link):
|
|
102
|
-
return link.get("href")
|
|
103
|
-
|
|
104
|
-
return None
|
|
105
|
-
|
|
106
|
-
async def search(self, query: str) -> List[Dict[str, str]]:
|
|
107
|
-
"""
|
|
108
|
-
Search for torrents based on the query string.
|
|
109
|
-
"""
|
|
110
|
-
all_torrents = []
|
|
111
|
-
|
|
112
|
-
# Loop through each page
|
|
113
|
-
for page in range(self.max_page):
|
|
114
|
-
url = f'{self.base_url}search?q={query}&page={page}'
|
|
115
|
-
|
|
116
|
-
html = await self.fetch_url(url)
|
|
117
|
-
if not html:
|
|
118
|
-
console.print(f"[bold red]No HTML content for page {page}[/bold red]")
|
|
119
|
-
break
|
|
120
|
-
|
|
121
|
-
torrents = self.parse_torrents(html)
|
|
122
|
-
if not torrents:
|
|
123
|
-
console.print(f"[bold red]No torrents found on page {page}[/bold red]")
|
|
124
|
-
break
|
|
125
|
-
|
|
126
|
-
# Use asyncio.gather to fetch all real URLs concurrently
|
|
127
|
-
tasks = [self.fetch_real_url(result['url']) for result in torrents]
|
|
128
|
-
real_urls = await asyncio.gather(*tasks)
|
|
129
|
-
|
|
130
|
-
# Attach real URLs to the torrent data
|
|
131
|
-
for i, result in enumerate(torrents):
|
|
132
|
-
result['url'] = real_urls[i]
|
|
133
|
-
|
|
134
|
-
all_torrents.extend(torrents)
|
|
135
|
-
|
|
136
|
-
return all_torrents
|
|
137
|
-
|
|
138
|
-
async def main():
|
|
139
|
-
scraper = IlCorsaroNeroScraper("https://ilcorsaronero.link/")
|
|
140
|
-
results = await scraper.search("cars")
|
|
141
|
-
|
|
142
|
-
if results:
|
|
143
|
-
for i, torrent in enumerate(results):
|
|
144
|
-
console.print(f"[bold green]{i} = {torrent}[/bold green] \n")
|
|
145
|
-
else:
|
|
146
|
-
console.print("[bold red]No torrents found.[/bold red]")
|
|
147
|
-
|
|
148
|
-
if __name__ == '__main__':
|
|
149
|
-
asyncio.run(main())
|
|
@@ -1,49 +0,0 @@
|
|
|
1
|
-
# 26.05.24
|
|
2
|
-
|
|
3
|
-
from urllib.parse import quote_plus
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util.console import console, msg
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# Logic class
|
|
11
|
-
from StreamingCommunity.Lib.TMBD import tmdb, Json_film
|
|
12
|
-
from .film import download_film
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
# Variable
|
|
16
|
-
indice = 9
|
|
17
|
-
_useFor = "film"
|
|
18
|
-
_deprecate = False
|
|
19
|
-
_priority = 2
|
|
20
|
-
_engineDownload = "hls"
|
|
21
|
-
from .costant import SITE_NAME
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def search(string_to_search: str = None, get_onylDatabase: bool = False):
|
|
25
|
-
"""
|
|
26
|
-
Main function of the application for film and series.
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
if string_to_search is None:
|
|
30
|
-
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{SITE_NAME}").strip()
|
|
31
|
-
|
|
32
|
-
# Not available for the moment
|
|
33
|
-
if get_onylDatabase:
|
|
34
|
-
return 0
|
|
35
|
-
|
|
36
|
-
# Search on database
|
|
37
|
-
movie_id = tmdb.search_movie(quote_plus(string_to_search))
|
|
38
|
-
|
|
39
|
-
if movie_id is not None:
|
|
40
|
-
movie_details: Json_film = tmdb.get_movie_details(tmdb_id=movie_id)
|
|
41
|
-
|
|
42
|
-
# Download only film
|
|
43
|
-
download_film(movie_details)
|
|
44
|
-
|
|
45
|
-
else:
|
|
46
|
-
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
47
|
-
|
|
48
|
-
# Retry
|
|
49
|
-
search()
|
|
@@ -1,19 +0,0 @@
|
|
|
1
|
-
# 26.05.24
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util._jsonConfig import config_manager
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
|
|
11
|
-
ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
|
|
12
|
-
DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
|
|
13
|
-
|
|
14
|
-
SERIES_FOLDER = os.path.join(ROOT_PATH, config_manager.get('DEFAULT', 'serie_folder_name'))
|
|
15
|
-
MOVIE_FOLDER = os.path.join(ROOT_PATH, config_manager.get('DEFAULT', 'movie_folder_name'))
|
|
16
|
-
|
|
17
|
-
if config_manager.get_bool("DEFAULT", "add_siteName"):
|
|
18
|
-
SERIES_FOLDER = os.path.join(ROOT_PATH, SITE_NAME, config_manager.get('DEFAULT', 'serie_folder_name'))
|
|
19
|
-
MOVIE_FOLDER = os.path.join(ROOT_PATH, SITE_NAME, config_manager.get('DEFAULT', 'movie_folder_name'))
|
|
@@ -1,101 +0,0 @@
|
|
|
1
|
-
# 17.09.24
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import sys
|
|
5
|
-
import time
|
|
6
|
-
import logging
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
# External libraries
|
|
10
|
-
import httpx
|
|
11
|
-
from bs4 import BeautifulSoup
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
# Internal utilities
|
|
15
|
-
from StreamingCommunity.Util.console import console, msg
|
|
16
|
-
from StreamingCommunity.Util.os import os_manager
|
|
17
|
-
from StreamingCommunity.Util.message import start_message
|
|
18
|
-
from StreamingCommunity.Util.call_stack import get_call_stack
|
|
19
|
-
from StreamingCommunity.Util.headers import get_headers
|
|
20
|
-
from StreamingCommunity.Lib.Downloader import HLS_Downloader
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
# Logic class
|
|
24
|
-
from StreamingCommunity.Api.Template.Util import execute_search
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
# Player
|
|
28
|
-
from StreamingCommunity.Api.Player.supervideo import VideoSource
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
# TMBD
|
|
32
|
-
from StreamingCommunity.Lib.TMBD import Json_film
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
# Config
|
|
36
|
-
from .costant import SITE_NAME, DOMAIN_NOW, MOVIE_FOLDER
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
def download_film(movie_details: Json_film) -> str:
|
|
40
|
-
"""
|
|
41
|
-
Downloads a film using the provided tmbd id.
|
|
42
|
-
|
|
43
|
-
Parameters:
|
|
44
|
-
- movie_details (Json_film): Class with info about film title.
|
|
45
|
-
|
|
46
|
-
Return:
|
|
47
|
-
- str: output path
|
|
48
|
-
"""
|
|
49
|
-
|
|
50
|
-
# Start message and display film information
|
|
51
|
-
start_message()
|
|
52
|
-
console.print(f"[yellow]Download: [red]{movie_details.title} \n")
|
|
53
|
-
|
|
54
|
-
# Make request to main site
|
|
55
|
-
try:
|
|
56
|
-
url = f"https://{SITE_NAME}.{DOMAIN_NOW}/set-movie-a/{movie_details.imdb_id}"
|
|
57
|
-
response = httpx.get(url, headers={'User-Agent': get_headers()})
|
|
58
|
-
response.raise_for_status()
|
|
59
|
-
|
|
60
|
-
except:
|
|
61
|
-
logging.error(f"Not found in the server. Dict: {movie_details}")
|
|
62
|
-
raise
|
|
63
|
-
|
|
64
|
-
if "not found" in str(response.text):
|
|
65
|
-
logging.error(f"Cant find in the server, Element: {movie_details}")
|
|
66
|
-
raise
|
|
67
|
-
|
|
68
|
-
# Extract supervideo url
|
|
69
|
-
soup = BeautifulSoup(response.text, "html.parser")
|
|
70
|
-
player_links = soup.find("ul", class_ = "_player-mirrors").find_all("li")
|
|
71
|
-
supervideo_url = "https:" + player_links[0].get("data-link")
|
|
72
|
-
|
|
73
|
-
# Set domain and media ID for the video source
|
|
74
|
-
video_source = VideoSource(url=supervideo_url)
|
|
75
|
-
|
|
76
|
-
# Define output path
|
|
77
|
-
title_name = os_manager.get_sanitize_file(movie_details.title) + ".mp4"
|
|
78
|
-
mp4_path = os.path.join(MOVIE_FOLDER, title_name.replace(".mp4", ""))
|
|
79
|
-
|
|
80
|
-
# Get m3u8 master playlist
|
|
81
|
-
master_playlist = video_source.get_playlist()
|
|
82
|
-
|
|
83
|
-
# Download the film using the m3u8 playlist, and output filename
|
|
84
|
-
r_proc = HLS_Downloader(
|
|
85
|
-
m3u8_playlist=master_playlist,
|
|
86
|
-
output_filename=os.path.join(mp4_path, title_name)
|
|
87
|
-
).start()
|
|
88
|
-
|
|
89
|
-
"""if r_proc == 404:
|
|
90
|
-
time.sleep(2)
|
|
91
|
-
|
|
92
|
-
# Re call search function
|
|
93
|
-
if msg.ask("[green]Do you want to continue [white]([red]y[white])[green] or return at home[white]([red]n[white]) ", choices=['y', 'n'], default='y', show_choices=True) == "n":
|
|
94
|
-
frames = get_call_stack()
|
|
95
|
-
execute_search(frames[-4])"""
|
|
96
|
-
|
|
97
|
-
if r_proc != None:
|
|
98
|
-
console.print("[green]Result: ")
|
|
99
|
-
console.print(r_proc)
|
|
100
|
-
|
|
101
|
-
return os.path.join(mp4_path, title_name)
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
# 21.05.24
|
|
2
|
-
|
|
3
|
-
from urllib.parse import quote_plus
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
# Internal utilities
|
|
7
|
-
from StreamingCommunity.Util.console import console, msg
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
# Logic class
|
|
11
|
-
from .site import get_version_and_domain, title_search, run_get_select_title, media_search_manager
|
|
12
|
-
from .film import download_film
|
|
13
|
-
from .series import download_series
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# Variable
|
|
17
|
-
indice = 0
|
|
18
|
-
_useFor = "film_serie"
|
|
19
|
-
_deprecate = False
|
|
20
|
-
_priority = 1
|
|
21
|
-
_engineDownload = "hls"
|
|
22
|
-
from .costant import SITE_NAME
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def search(string_to_search: str = None, get_onylDatabase: bool = False):
|
|
26
|
-
"""
|
|
27
|
-
Main function of the application for film and series.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
if string_to_search is None:
|
|
31
|
-
string_to_search = msg.ask(f"\n[purple]Insert word to search in [green]{SITE_NAME}").strip()
|
|
32
|
-
|
|
33
|
-
# Get site domain and version and get result of the search
|
|
34
|
-
site_version, domain = get_version_and_domain()
|
|
35
|
-
len_database = title_search(quote_plus(string_to_search), domain)
|
|
36
|
-
|
|
37
|
-
# Return list of elements
|
|
38
|
-
if get_onylDatabase:
|
|
39
|
-
return media_search_manager
|
|
40
|
-
|
|
41
|
-
if len_database > 0:
|
|
42
|
-
|
|
43
|
-
# Select title from list
|
|
44
|
-
select_title = run_get_select_title()
|
|
45
|
-
|
|
46
|
-
if select_title.type == 'tv':
|
|
47
|
-
download_series(select_title, site_version)
|
|
48
|
-
|
|
49
|
-
else:
|
|
50
|
-
download_film(select_title)
|
|
51
|
-
|
|
52
|
-
else:
|
|
53
|
-
console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
|
|
54
|
-
|
|
55
|
-
# Retry
|
|
56
|
-
search()
|