StreamingCommunity 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (97) hide show
  1. StreamingCommunity/Src/Api/Player/Helper/Vixcloud/js_parser.py +140 -0
  2. StreamingCommunity/Src/Api/Player/Helper/Vixcloud/util.py +166 -0
  3. StreamingCommunity/Src/Api/Player/ddl.py +89 -0
  4. StreamingCommunity/Src/Api/Player/maxstream.py +151 -0
  5. StreamingCommunity/Src/Api/Player/supervideo.py +194 -0
  6. StreamingCommunity/Src/Api/Player/vixcloud.py +212 -0
  7. StreamingCommunity/Src/Api/Site/1337xx/__init__.py +50 -0
  8. StreamingCommunity/Src/Api/Site/1337xx/costant.py +15 -0
  9. StreamingCommunity/Src/Api/Site/1337xx/site.py +84 -0
  10. StreamingCommunity/Src/Api/Site/1337xx/title.py +66 -0
  11. StreamingCommunity/Src/Api/Site/altadefinizione/__init__.py +50 -0
  12. StreamingCommunity/Src/Api/Site/altadefinizione/costant.py +15 -0
  13. StreamingCommunity/Src/Api/Site/altadefinizione/film.py +69 -0
  14. StreamingCommunity/Src/Api/Site/altadefinizione/site.py +86 -0
  15. StreamingCommunity/Src/Api/Site/animeunity/__init__.py +50 -0
  16. StreamingCommunity/Src/Api/Site/animeunity/anime.py +126 -0
  17. StreamingCommunity/Src/Api/Site/animeunity/costant.py +15 -0
  18. StreamingCommunity/Src/Api/Site/animeunity/film_serie.py +131 -0
  19. StreamingCommunity/Src/Api/Site/animeunity/site.py +165 -0
  20. StreamingCommunity/Src/Api/Site/animeunity/util/ScrapeSerie.py +97 -0
  21. StreamingCommunity/Src/Api/Site/bitsearch/__init__.py +51 -0
  22. StreamingCommunity/Src/Api/Site/bitsearch/costant.py +15 -0
  23. StreamingCommunity/Src/Api/Site/bitsearch/site.py +84 -0
  24. StreamingCommunity/Src/Api/Site/bitsearch/title.py +47 -0
  25. StreamingCommunity/Src/Api/Site/cb01new/__init__.py +51 -0
  26. StreamingCommunity/Src/Api/Site/cb01new/costant.py +15 -0
  27. StreamingCommunity/Src/Api/Site/cb01new/film.py +69 -0
  28. StreamingCommunity/Src/Api/Site/cb01new/site.py +74 -0
  29. StreamingCommunity/Src/Api/Site/ddlstreamitaly/Player/ScrapeSerie.py +83 -0
  30. StreamingCommunity/Src/Api/Site/ddlstreamitaly/__init__.py +57 -0
  31. StreamingCommunity/Src/Api/Site/ddlstreamitaly/costant.py +16 -0
  32. StreamingCommunity/Src/Api/Site/ddlstreamitaly/series.py +142 -0
  33. StreamingCommunity/Src/Api/Site/ddlstreamitaly/site.py +93 -0
  34. StreamingCommunity/Src/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +83 -0
  35. StreamingCommunity/Src/Api/Site/guardaserie/Player/ScrapeSerie.py +110 -0
  36. StreamingCommunity/Src/Api/Site/guardaserie/__init__.py +52 -0
  37. StreamingCommunity/Src/Api/Site/guardaserie/costant.py +15 -0
  38. StreamingCommunity/Src/Api/Site/guardaserie/series.py +195 -0
  39. StreamingCommunity/Src/Api/Site/guardaserie/site.py +84 -0
  40. StreamingCommunity/Src/Api/Site/guardaserie/util/ScrapeSerie.py +110 -0
  41. StreamingCommunity/Src/Api/Site/mostraguarda/__init__.py +48 -0
  42. StreamingCommunity/Src/Api/Site/mostraguarda/costant.py +15 -0
  43. StreamingCommunity/Src/Api/Site/mostraguarda/film.py +94 -0
  44. StreamingCommunity/Src/Api/Site/piratebays/__init__.py +50 -0
  45. StreamingCommunity/Src/Api/Site/piratebays/costant.py +15 -0
  46. StreamingCommunity/Src/Api/Site/piratebays/site.py +89 -0
  47. StreamingCommunity/Src/Api/Site/piratebays/title.py +45 -0
  48. StreamingCommunity/Src/Api/Site/streamingcommunity/__init__.py +55 -0
  49. StreamingCommunity/Src/Api/Site/streamingcommunity/costant.py +15 -0
  50. StreamingCommunity/Src/Api/Site/streamingcommunity/film.py +70 -0
  51. StreamingCommunity/Src/Api/Site/streamingcommunity/series.py +203 -0
  52. StreamingCommunity/Src/Api/Site/streamingcommunity/site.py +126 -0
  53. StreamingCommunity/Src/Api/Site/streamingcommunity/util/ScrapeSerie.py +113 -0
  54. StreamingCommunity/Src/Api/Template/Class/SearchType.py +101 -0
  55. StreamingCommunity/Src/Api/Template/Util/__init__.py +5 -0
  56. StreamingCommunity/Src/Api/Template/Util/get_domain.py +137 -0
  57. StreamingCommunity/Src/Api/Template/Util/manage_ep.py +153 -0
  58. StreamingCommunity/Src/Api/Template/Util/recall_search.py +37 -0
  59. StreamingCommunity/Src/Api/Template/__init__.py +3 -0
  60. StreamingCommunity/Src/Api/Template/site.py +87 -0
  61. StreamingCommunity/Src/Lib/Downloader/HLS/downloader.py +968 -0
  62. StreamingCommunity/Src/Lib/Downloader/HLS/proxyes.py +110 -0
  63. StreamingCommunity/Src/Lib/Downloader/HLS/segments.py +540 -0
  64. StreamingCommunity/Src/Lib/Downloader/MP4/downloader.py +156 -0
  65. StreamingCommunity/Src/Lib/Downloader/TOR/downloader.py +222 -0
  66. StreamingCommunity/Src/Lib/Downloader/__init__.py +5 -0
  67. StreamingCommunity/Src/Lib/Driver/driver_1.py +76 -0
  68. StreamingCommunity/Src/Lib/FFmpeg/__init__.py +4 -0
  69. StreamingCommunity/Src/Lib/FFmpeg/capture.py +170 -0
  70. StreamingCommunity/Src/Lib/FFmpeg/command.py +292 -0
  71. StreamingCommunity/Src/Lib/FFmpeg/util.py +242 -0
  72. StreamingCommunity/Src/Lib/M3U8/__init__.py +6 -0
  73. StreamingCommunity/Src/Lib/M3U8/decryptor.py +129 -0
  74. StreamingCommunity/Src/Lib/M3U8/estimator.py +173 -0
  75. StreamingCommunity/Src/Lib/M3U8/parser.py +666 -0
  76. StreamingCommunity/Src/Lib/M3U8/url_fixer.py +52 -0
  77. StreamingCommunity/Src/Lib/TMBD/__init__.py +2 -0
  78. StreamingCommunity/Src/Lib/TMBD/obj_tmbd.py +39 -0
  79. StreamingCommunity/Src/Lib/TMBD/tmdb.py +346 -0
  80. StreamingCommunity/Src/Upload/update.py +64 -0
  81. StreamingCommunity/Src/Upload/version.py +5 -0
  82. StreamingCommunity/Src/Util/_jsonConfig.py +204 -0
  83. StreamingCommunity/Src/Util/call_stack.py +42 -0
  84. StreamingCommunity/Src/Util/color.py +20 -0
  85. StreamingCommunity/Src/Util/console.py +12 -0
  86. StreamingCommunity/Src/Util/headers.py +147 -0
  87. StreamingCommunity/Src/Util/logger.py +53 -0
  88. StreamingCommunity/Src/Util/message.py +46 -0
  89. StreamingCommunity/Src/Util/os.py +417 -0
  90. StreamingCommunity/Src/Util/table.py +163 -0
  91. StreamingCommunity/run.py +196 -0
  92. StreamingCommunity-1.7.6.dist-info/LICENSE +674 -0
  93. StreamingCommunity-1.7.6.dist-info/METADATA +348 -0
  94. StreamingCommunity-1.7.6.dist-info/RECORD +97 -0
  95. StreamingCommunity-1.7.6.dist-info/WHEEL +5 -0
  96. StreamingCommunity-1.7.6.dist-info/entry_points.txt +2 -0
  97. StreamingCommunity-1.7.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,194 @@
1
+ # 26.05.24
2
+
3
+ import re
4
+ import logging
5
+
6
+
7
+ # External libraries
8
+ import httpx
9
+ import jsbeautifier
10
+ from bs4 import BeautifulSoup
11
+
12
+
13
+ # Internal utilities
14
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
15
+ from StreamingCommunity.Src.Util.headers import get_headers
16
+
17
+
18
+ # Variable
19
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
20
+
21
+
22
+ class VideoSource:
23
+ def __init__(self, url: str) -> None:
24
+ """
25
+ Initializes the VideoSource object with default values.
26
+
27
+ Attributes:
28
+ - url (str): The URL of the video source.
29
+ """
30
+ self.headers = {
31
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
32
+ 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
33
+ 'User-Agent': get_headers()
34
+ }
35
+ self.client = httpx.Client()
36
+ self.url = url
37
+
38
+ def make_request(self, url: str) -> str:
39
+ """
40
+ Make an HTTP GET request to the provided URL.
41
+
42
+ Parameters:
43
+ - url (str): The URL to make the request to.
44
+
45
+ Returns:
46
+ - str: The response content if successful, None otherwise.
47
+ """
48
+
49
+ try:
50
+ response = self.client.get(
51
+ url=url,
52
+ headers=self.headers,
53
+ follow_redirects=True,
54
+ timeout=max_timeout
55
+ )
56
+ response.raise_for_status()
57
+ return response.text
58
+
59
+ except Exception as e:
60
+ logging.error(f"Request failed: {e}")
61
+ return None
62
+
63
+ def parse_html(self, html_content: str) -> BeautifulSoup:
64
+ """
65
+ Parse the provided HTML content using BeautifulSoup.
66
+
67
+ Parameters:
68
+ - html_content (str): The HTML content to parse.
69
+
70
+ Returns:
71
+ - BeautifulSoup: Parsed HTML content if successful, None otherwise.
72
+ """
73
+
74
+ try:
75
+ soup = BeautifulSoup(html_content, "html.parser")
76
+ return soup
77
+
78
+ except Exception as e:
79
+ logging.error(f"Failed to parse HTML content: {e}")
80
+ return None
81
+
82
+ def get_iframe(self, soup):
83
+ """
84
+ Extracts the source URL of the second iframe in the provided BeautifulSoup object.
85
+
86
+ Parameters:
87
+ - soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML.
88
+
89
+ Returns:
90
+ - str: The source URL of the second iframe, or None if not found.
91
+ """
92
+ iframes = soup.find_all("iframe")
93
+ if iframes and len(iframes) > 1:
94
+ return iframes[1].get("src")
95
+
96
+ return None
97
+
98
+ def find_content(self, url):
99
+ """
100
+ Makes a request to the specified URL and parses the HTML content.
101
+
102
+ Parameters:
103
+ - url (str): The URL to fetch content from.
104
+
105
+ Returns:
106
+ - BeautifulSoup: A BeautifulSoup object representing the parsed HTML content, or None if the request fails.
107
+ """
108
+ content = self.make_request(url)
109
+ if content:
110
+ return self.parse_html(content)
111
+
112
+ return None
113
+
114
+ def get_result_node_js(self, soup):
115
+ """
116
+ Prepares and runs a Node.js script from the provided BeautifulSoup object to retrieve the video URL.
117
+
118
+ Parameters:
119
+ - soup (BeautifulSoup): A BeautifulSoup object representing the parsed HTML content.
120
+
121
+ Returns:
122
+ - str: The output from the Node.js script, or None if the script cannot be found or executed.
123
+ """
124
+ for script in soup.find_all("script"):
125
+ if "eval" in str(script):
126
+ return jsbeautifier.beautify(script.text)
127
+
128
+ return None
129
+
130
+ def get_playlist(self) -> str:
131
+ """
132
+ Download a video from the provided URL.
133
+
134
+ Returns:
135
+ str: The URL of the downloaded video if successful, None otherwise.
136
+ """
137
+ try:
138
+ html_content = self.make_request(self.url)
139
+ if not html_content:
140
+ logging.error("Failed to fetch HTML content.")
141
+ return None
142
+
143
+ soup = self.parse_html(html_content)
144
+ if not soup:
145
+ logging.error("Failed to parse HTML content.")
146
+ return None
147
+
148
+ # Find master playlist
149
+ data_js = self.get_result_node_js(soup)
150
+
151
+ if data_js is not None:
152
+ match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
153
+
154
+ if match:
155
+ return match.group(1)
156
+
157
+ else:
158
+
159
+ iframe_src = self.get_iframe(soup)
160
+ if not iframe_src:
161
+ logging.error("No iframe found.")
162
+ return None
163
+
164
+ down_page_soup = self.find_content(iframe_src)
165
+ if not down_page_soup:
166
+ logging.error("Failed to fetch down page content.")
167
+ return None
168
+
169
+ pattern = r'data-link="(//supervideo[^"]+)"'
170
+ match = re.search(pattern, str(down_page_soup))
171
+ if not match:
172
+ logging.error("No player available for download.")
173
+ return None
174
+
175
+ supervideo_url = "https:" + match.group(1)
176
+ supervideo_soup = self.find_content(supervideo_url)
177
+ if not supervideo_soup:
178
+ logging.error("Failed to fetch supervideo content.")
179
+ return None
180
+
181
+ # Find master playlist
182
+ data_js = self.get_result_node_js(supervideo_soup)
183
+
184
+ match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', data_js)
185
+
186
+ if match:
187
+ return match.group(1)
188
+
189
+ return None
190
+
191
+ except Exception as e:
192
+ logging.error(f"An error occurred: {e}")
193
+ return None
194
+
@@ -0,0 +1,212 @@
1
+ # 01.03.24
2
+
3
+ import sys
4
+ import logging
5
+ from urllib.parse import urlparse, urlencode, urlunparse
6
+
7
+
8
+ # External libraries
9
+ import httpx
10
+ from bs4 import BeautifulSoup
11
+
12
+
13
+ # Internal utilities
14
+ from StreamingCommunity.Src.Util.headers import get_headers
15
+ from StreamingCommunity.Src.Util.console import console, Panel
16
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
17
+ from .Helper.Vixcloud.util import WindowVideo, WindowParameter, StreamsCollection
18
+ from .Helper.Vixcloud.js_parser import JavaScriptParser
19
+
20
+
21
+ # Variable
22
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
23
+
24
+
25
+ class VideoSource:
26
+ def __init__(self, site_name: str, is_series: bool):
27
+ """
28
+ Initialize video source for streaming site.
29
+
30
+ Args:
31
+ site_name (str): Name of streaming site
32
+ is_series (bool): Flag for series or movie content
33
+ """
34
+ self.headers = {'user-agent': get_headers()}
35
+ self.base_name = site_name
36
+ self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
37
+ self.is_series = is_series
38
+
39
+ def setup(self, media_id: int):
40
+ """
41
+ Configure media-specific context.
42
+
43
+ Args:
44
+ media_id (int): Unique identifier for media item
45
+ """
46
+ self.media_id = media_id
47
+
48
+ def get_iframe(self, episode_id: int) -> None:
49
+ """
50
+ Retrieve iframe source for specified episode.
51
+
52
+ Args:
53
+ episode_id (int): Unique identifier for episode
54
+ """
55
+ params = {}
56
+
57
+ if self.is_series:
58
+ params = {
59
+ 'episode_id': episode_id,
60
+ 'next_episode': '1'
61
+ }
62
+
63
+ try:
64
+
65
+ # Make a request to get iframe source
66
+ response = httpx.get(
67
+ url=f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}",
68
+ params=params,
69
+ timeout=max_timeout
70
+ )
71
+ response.raise_for_status()
72
+
73
+ # Parse response with BeautifulSoup to get iframe source
74
+ soup = BeautifulSoup(response.text, "html.parser")
75
+ self.iframe_src = soup.find("iframe").get("src")
76
+
77
+ except Exception as e:
78
+ logging.error(f"Error getting iframe source: {e}")
79
+ raise
80
+
81
+ def parse_script(self, script_text: str) -> None:
82
+ """
83
+ Convert raw script to structured video metadata.
84
+
85
+ Args:
86
+ script_text (str): Raw JavaScript/HTML script content
87
+ """
88
+ try:
89
+ converter = JavaScriptParser.parse(js_string=str(script_text))
90
+
91
+ # Create window video, streams and parameter objects
92
+ self.window_video = WindowVideo(converter.get('video'))
93
+ self.window_streams = StreamsCollection(converter.get('streams'))
94
+ self.window_parameter = WindowParameter(converter.get('masterPlaylist'))
95
+
96
+ except Exception as e:
97
+ logging.error(f"Error parsing script: {e}")
98
+ raise
99
+
100
+ def get_content(self) -> None:
101
+ """
102
+ Fetch and process video content from iframe source.
103
+
104
+ Workflow:
105
+ - Validate iframe source
106
+ - Retrieve content
107
+ - Parse embedded script
108
+ """
109
+ try:
110
+ if self.iframe_src is not None:
111
+
112
+ # Make a request to get content
113
+ try:
114
+ response = httpx.get(
115
+ url=self.iframe_src,
116
+ headers=self.headers,
117
+ timeout=max_timeout
118
+ )
119
+ response.raise_for_status()
120
+
121
+ except Exception as e:
122
+ print("\n")
123
+ console.print(Panel("[red bold]Coming soon", title="Notification", title_align="left", border_style="yellow"))
124
+ sys.exit(0)
125
+
126
+ # Parse response with BeautifulSoup to get content
127
+ soup = BeautifulSoup(response.text, "html.parser")
128
+ script = soup.find("body").find("script").text
129
+
130
+ # Parse script to get video information
131
+ self.parse_script(script_text=script)
132
+
133
+ except Exception as e:
134
+ logging.error(f"Error getting content: {e}")
135
+ raise
136
+
137
+ def get_playlist(self) -> str:
138
+ """
139
+ Generate authenticated playlist URL.
140
+
141
+ Returns:
142
+ str: Fully constructed playlist URL with authentication parameters
143
+ """
144
+ params = {}
145
+
146
+ if self.window_video.quality == 1080:
147
+ params['h'] = 1
148
+
149
+ if "b=1" in self.window_parameter.url:
150
+ params['b'] = 1
151
+
152
+ params.update({
153
+ "token": self.window_parameter.token,
154
+ "expires": self.window_parameter.expires
155
+ })
156
+
157
+ query_string = urlencode(params)
158
+ return urlunparse(urlparse(self.window_parameter.url)._replace(query=query_string))
159
+
160
+
161
+ class VideoSourceAnime(VideoSource):
162
+ def __init__(self, site_name: str):
163
+ """
164
+ Initialize anime-specific video source.
165
+
166
+ Args:
167
+ site_name (str): Name of anime streaming site
168
+
169
+ Extends base VideoSource with anime-specific initialization
170
+ """
171
+ self.headers = {'user-agent': get_headers()}
172
+ self.base_name = site_name
173
+ self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
174
+ self.src_mp4 = None
175
+
176
+ def get_embed(self, episode_id: int):
177
+ """
178
+ Retrieve embed URL and extract video source.
179
+
180
+ Args:
181
+ episode_id (int): Unique identifier for episode
182
+
183
+ Returns:
184
+ str: Parsed script content
185
+ """
186
+ try:
187
+
188
+ response = httpx.get(
189
+ url=f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}",
190
+ headers=self.headers,
191
+ timeout=max_timeout
192
+ )
193
+ response.raise_for_status()
194
+
195
+ # Extract and clean embed URL
196
+ embed_url = response.text.strip()
197
+ self.iframe_src = embed_url
198
+
199
+ # Fetch video content using embed URL
200
+ video_response = httpx.get(embed_url)
201
+ video_response.raise_for_status()
202
+
203
+ # Parse response with BeautifulSoup to get content of the scriot
204
+ soup = BeautifulSoup(video_response.text, "html.parser")
205
+ script = soup.find("body").find("script").text
206
+ self.src_mp4 = soup.find("body").find_all("script")[1].text.split(" = ")[1].replace("'", "")
207
+
208
+ return script
209
+
210
+ except Exception as e:
211
+ logging.error(f"Error fetching embed URL: {e}")
212
+ return None
@@ -0,0 +1,50 @@
1
+ # 02.07.24
2
+
3
+ from unidecode import unidecode
4
+
5
+
6
+ # Internal utilities
7
+ from StreamingCommunity.Src.Util.console import console, msg
8
+
9
+
10
+ # Logic class
11
+ from .site import title_search, run_get_select_title, media_search_manager
12
+ from .title import download_title
13
+
14
+
15
+ # Variable
16
+ indice = 8
17
+ _useFor = "film_serie"
18
+ _deprecate = False
19
+ _priority = 2
20
+ _engineDownload = "tor"
21
+
22
+
23
+ def search(string_to_search: str = None, get_onylDatabase: bool = False):
24
+ """
25
+ Main function of the application for film and series.
26
+ """
27
+
28
+ if string_to_search is None:
29
+ string_to_search = msg.ask("\n[purple]Insert word to search in all site").strip()
30
+
31
+ # Search on database
32
+ len_database = title_search(unidecode(string_to_search))
33
+
34
+ # Return list of elements
35
+ if get_onylDatabase:
36
+ return media_search_manager
37
+
38
+ if len_database > 0:
39
+
40
+ # Select title from list
41
+ select_title = run_get_select_title()
42
+
43
+ # Download title
44
+ download_title(select_title)
45
+
46
+ else:
47
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
48
+
49
+ # Retry
50
+ search()
@@ -0,0 +1,15 @@
1
+ # 09.06.24
2
+
3
+ import os
4
+
5
+
6
+ # Internal utilities
7
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
8
+
9
+
10
+ SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
11
+ ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
12
+ DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
13
+
14
+ SERIES_FOLDER = "Serie"
15
+ MOVIE_FOLDER = "Film"
@@ -0,0 +1,84 @@
1
+ # 02.07.24
2
+
3
+ # External libraries
4
+ import httpx
5
+ from bs4 import BeautifulSoup
6
+
7
+
8
+ # Internal utilities
9
+ from StreamingCommunity.Src.Util.console import console
10
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
11
+ from StreamingCommunity.Src.Util.headers import get_headers
12
+ from StreamingCommunity.Src.Util.table import TVShowManager
13
+
14
+
15
+ # Logic class
16
+ from StreamingCommunity.Src.Api.Template import get_select_title
17
+ from StreamingCommunity.Src.Api.Template.Util import search_domain
18
+ from StreamingCommunity.Src.Api.Template.Class.SearchType import MediaManager
19
+
20
+
21
+ # Variable
22
+ from .costant import SITE_NAME
23
+ media_search_manager = MediaManager()
24
+ table_show_manager = TVShowManager()
25
+
26
+
27
+ def title_search(word_to_search: str) -> int:
28
+ """
29
+ Search for titles based on a search query.
30
+
31
+ Parameters:
32
+ - title_search (str): The title to search for.
33
+
34
+ Returns:
35
+ - int: The number of titles found.
36
+ """
37
+
38
+ # Find new domain if prev dont work
39
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
40
+ domain_to_use, _ = search_domain(SITE_NAME, f"https://{SITE_NAME}")
41
+
42
+ # Construct the full site URL and load the search page
43
+ try:
44
+ response = httpx.get(
45
+ url=f"https://{SITE_NAME}.{domain_to_use}/search/{word_to_search}/1/",
46
+ headers={'user-agent': get_headers()},
47
+ follow_redirects=True,
48
+ timeout=max_timeout
49
+ )
50
+ response.raise_for_status()
51
+
52
+ except Exception as e:
53
+ console.print(f"Site: {SITE_NAME}, request search error: {e}")
54
+
55
+ # Create soup and find table
56
+ soup = BeautifulSoup(response.text, "html.parser")
57
+
58
+ # Scrape div film in table on single page
59
+ for tr in soup.find_all('tr'):
60
+ try:
61
+
62
+ title_info = {
63
+ 'name': tr.find_all("a")[1].get_text(strip=True),
64
+ 'url': tr.find_all("a")[1].get("href"),
65
+ 'seader': tr.find_all("td")[-5].get_text(strip=True),
66
+ 'leacher': tr.find_all("td")[-4].get_text(strip=True),
67
+ 'date': tr.find_all("td")[-3].get_text(strip=True).replace("'", ""),
68
+ 'size': tr.find_all("td")[-2].get_text(strip=True)
69
+ }
70
+
71
+ media_search_manager.add_media(title_info)
72
+
73
+ except:
74
+ continue
75
+
76
+ # Return the number of titles found
77
+ return media_search_manager.get_length()
78
+
79
+
80
+ def run_get_select_title():
81
+ """
82
+ Display a selection of titles and prompt the user to choose one.
83
+ """
84
+ return get_select_title(table_show_manager, media_search_manager)
@@ -0,0 +1,66 @@
1
+ # 02.07.24
2
+
3
+ import os
4
+
5
+
6
+ # External libraries
7
+ import httpx
8
+ from bs4 import BeautifulSoup
9
+
10
+
11
+ # Internal utilities
12
+ from StreamingCommunity.Src.Util.console import console
13
+ from StreamingCommunity.Src.Util.os import os_manager
14
+ from StreamingCommunity.Src.Util.message import start_message
15
+ from StreamingCommunity.Src.Util.headers import get_headers
16
+ from StreamingCommunity.Src.Lib.Downloader import TOR_downloader
17
+
18
+
19
+ # Logic class
20
+ from StreamingCommunity.Src.Api.Template.Class.SearchType import MediaItem
21
+
22
+
23
+ # Config
24
+ from .costant import ROOT_PATH, DOMAIN_NOW, SITE_NAME, MOVIE_FOLDER
25
+
26
+
27
+ def download_title(select_title: MediaItem):
28
+ """
29
+ Downloads a media item and saves it as an MP4 file.
30
+
31
+ Parameters:
32
+ - select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
33
+ """
34
+
35
+ start_message()
36
+ console.print(f"[yellow]Download: [red]{select_title.name} \n")
37
+ print()
38
+
39
+ # Define output path
40
+ title_name = os_manager.get_sanitize_file(select_title.name)
41
+ mp4_path = os_manager.get_sanitize_path(
42
+ os.path.join(ROOT_PATH, SITE_NAME, MOVIE_FOLDER, title_name.replace(".mp4", ""))
43
+ )
44
+
45
+ # Create output folder
46
+ os_manager.create_path(mp4_path)
47
+
48
+ # Make request to page with magnet
49
+ full_site_name = f"{SITE_NAME}.{DOMAIN_NOW}"
50
+ response = httpx.get(
51
+ url="https://" + full_site_name + select_title.url,
52
+ headers={
53
+ 'user-agent': get_headers()
54
+ },
55
+ follow_redirects=True
56
+ )
57
+
58
+ # Create soup and find table
59
+ soup = BeautifulSoup(response.text, "html.parser")
60
+ final_url = soup.find("a", class_="torrentdown1").get("href")
61
+
62
+ # Tor manager
63
+ manager = TOR_downloader()
64
+ manager.add_magnet_link(final_url)
65
+ manager.start_download()
66
+ manager.move_downloaded_files(mp4_path)
@@ -0,0 +1,50 @@
1
+ # 26.05.24
2
+
3
+ from unidecode import unidecode
4
+
5
+
6
+ # Internal utilities
7
+ from StreamingCommunity.Src.Util.console import console, msg
8
+
9
+
10
+ # Logic class
11
+ from .site import title_search, run_get_select_title, media_search_manager
12
+ from .film import download_film
13
+
14
+
15
+ # Variable
16
+ indice = 2
17
+ _useFor = "film"
18
+ _deprecate = False
19
+ _priority = 2
20
+ _engineDownload = "hls"
21
+
22
+
23
+ def search(string_to_search: str = None, get_onylDatabase: bool = False):
24
+ """
25
+ Main function of the application for film and series.
26
+ """
27
+
28
+ if string_to_search is None:
29
+ string_to_search = msg.ask("\n[purple]Insert word to search in all site").strip()
30
+
31
+ # Search on database
32
+ len_database = title_search(unidecode(string_to_search))
33
+
34
+ # Return list of elements
35
+ if get_onylDatabase:
36
+ return media_search_manager
37
+
38
+ if len_database > 0:
39
+
40
+ # Select title from list
41
+ select_title = run_get_select_title()
42
+
43
+ # Download only film
44
+ download_film(select_title)
45
+
46
+ else:
47
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
48
+
49
+ # Retry
50
+ search()
@@ -0,0 +1,15 @@
1
+ # 26.05.24
2
+
3
+ import os
4
+
5
+
6
+ # Internal utilities
7
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
8
+
9
+
10
+ SITE_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
11
+ ROOT_PATH = config_manager.get('DEFAULT', 'root_path')
12
+ DOMAIN_NOW = config_manager.get_dict('SITE', SITE_NAME)['domain']
13
+
14
+ MOVIE_FOLDER = "Movie"
15
+ SERIES_FOLDER= "Serie"