StreamingCommunity 3.2.1__py3-none-any.whl → 3.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (53) hide show
  1. StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +4 -0
  2. StreamingCommunity/Api/Player/hdplayer.py +2 -2
  3. StreamingCommunity/Api/Player/mixdrop.py +1 -1
  4. StreamingCommunity/Api/Player/vixcloud.py +4 -5
  5. StreamingCommunity/Api/Site/crunchyroll/__init__.py +103 -0
  6. StreamingCommunity/Api/Site/crunchyroll/film.py +83 -0
  7. StreamingCommunity/Api/Site/crunchyroll/series.py +182 -0
  8. StreamingCommunity/Api/Site/crunchyroll/site.py +113 -0
  9. StreamingCommunity/Api/Site/crunchyroll/util/ScrapeSerie.py +218 -0
  10. StreamingCommunity/Api/Site/crunchyroll/util/get_license.py +227 -0
  11. StreamingCommunity/Api/Site/guardaserie/site.py +1 -2
  12. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +9 -8
  13. StreamingCommunity/Api/Site/mediasetinfinity/__init__.py +96 -0
  14. StreamingCommunity/Api/Site/mediasetinfinity/film.py +76 -0
  15. StreamingCommunity/Api/Site/mediasetinfinity/series.py +177 -0
  16. StreamingCommunity/Api/Site/mediasetinfinity/site.py +112 -0
  17. StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +259 -0
  18. StreamingCommunity/Api/Site/mediasetinfinity/util/fix_mpd.py +64 -0
  19. StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py +217 -0
  20. StreamingCommunity/Api/Site/streamingcommunity/__init__.py +6 -17
  21. StreamingCommunity/Api/Site/streamingcommunity/film.py +2 -2
  22. StreamingCommunity/Api/Site/streamingcommunity/series.py +9 -9
  23. StreamingCommunity/Api/Site/streamingcommunity/site.py +2 -4
  24. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +3 -6
  25. StreamingCommunity/Api/Site/streamingwatch/__init__.py +6 -14
  26. StreamingCommunity/Api/Site/streamingwatch/film.py +2 -2
  27. StreamingCommunity/Api/Site/streamingwatch/series.py +9 -9
  28. StreamingCommunity/Api/Site/streamingwatch/site.py +5 -7
  29. StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +2 -2
  30. StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py +131 -0
  31. StreamingCommunity/Lib/Downloader/DASH/decrypt.py +79 -0
  32. StreamingCommunity/Lib/Downloader/DASH/downloader.py +220 -0
  33. StreamingCommunity/Lib/Downloader/DASH/parser.py +249 -0
  34. StreamingCommunity/Lib/Downloader/DASH/segments.py +332 -0
  35. StreamingCommunity/Lib/Downloader/HLS/downloader.py +1 -14
  36. StreamingCommunity/Lib/Downloader/HLS/segments.py +3 -3
  37. StreamingCommunity/Lib/Downloader/MP4/downloader.py +0 -5
  38. StreamingCommunity/Lib/FFmpeg/capture.py +3 -3
  39. StreamingCommunity/Lib/FFmpeg/command.py +1 -1
  40. StreamingCommunity/TelegramHelp/config.json +3 -5
  41. StreamingCommunity/Upload/version.py +1 -1
  42. StreamingCommunity/Util/os.py +21 -0
  43. StreamingCommunity/run.py +1 -1
  44. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/METADATA +4 -2
  45. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/RECORD +49 -35
  46. StreamingCommunity/Api/Site/1337xx/__init__.py +0 -72
  47. StreamingCommunity/Api/Site/1337xx/site.py +0 -82
  48. StreamingCommunity/Api/Site/1337xx/title.py +0 -61
  49. StreamingCommunity/Lib/Proxies/proxy.py +0 -72
  50. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/WHEEL +0 -0
  51. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/entry_points.txt +0 -0
  52. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/licenses/LICENSE +0 -0
  53. {streamingcommunity-3.2.1.dist-info → streamingcommunity-3.2.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,217 @@
1
+ # 16.03.25
2
+
3
+ from urllib.parse import urlencode
4
+ import xml.etree.ElementTree as ET
5
+
6
+
7
+ # External library
8
+ import httpx
9
+
10
+
11
+ # Internal utilities
12
+ from StreamingCommunity.Util.config_json import config_manager
13
+ from StreamingCommunity.Util.headers import get_headers, get_userAgent
14
+
15
+
16
+ # Variable
17
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
18
+ bearer_token = None
19
+ playback_json = None
20
+
21
+
22
+ def get_bearer_token():
23
+ """
24
+ Gets the BEARER_TOKEN for authentication.
25
+
26
+ Returns:
27
+ str: The bearer token string.
28
+ """
29
+ global bearer_token
30
+
31
+ if bearer_token:
32
+ return bearer_token
33
+
34
+ LOGIN_URL = "https://api-ott-prod-fe.mediaset.net/PROD/play/idm/anonymous/login/v2.0"
35
+
36
+ try:
37
+ response = httpx.post(
38
+ LOGIN_URL,
39
+ json={'client_id': 'client_id', 'appName': 'embed//mediasetplay-embed'},
40
+ follow_redirects=True,
41
+ timeout=MAX_TIMEOUT
42
+ )
43
+ response.raise_for_status()
44
+
45
+ # Extract the bearer token from the response
46
+ data = response.json()
47
+ bearer_token = data["response"]["beToken"]
48
+ return bearer_token
49
+
50
+ except Exception as e:
51
+ raise RuntimeError(f"Failed to get bearer token: {e}")
52
+
53
+ def get_playback_url(BEARER_TOKEN, CONTENT_ID):
54
+ """
55
+ Gets the playback URL for the specified content.
56
+
57
+ Args:
58
+ BEARER_TOKEN (str): The authentication token.
59
+ CONTENT_ID (str): The content identifier.
60
+
61
+ Returns:
62
+ dict: The playback JSON object.
63
+ """
64
+ global playback_json
65
+
66
+ if playback_json is not None:
67
+ return playback_json
68
+
69
+ headers = get_headers()
70
+ headers['authorization'] = f'Bearer {BEARER_TOKEN}'
71
+
72
+ json_data = {
73
+ 'contentId': CONTENT_ID,
74
+ 'streamType': 'VOD',
75
+ 'delivery': 'Streaming',
76
+ 'createDevice': True,
77
+ 'overrideAppName': 'web//mediasetplay-web/5.11.8-f16d93c',
78
+ }
79
+
80
+ try:
81
+ response = httpx.post(
82
+ 'https://api-ott-prod-fe.mediaset.net/PROD/play/playback/check/v2.0',
83
+ headers=headers,
84
+ json=json_data,
85
+ follow_redirects=True,
86
+ timeout=MAX_TIMEOUT
87
+ )
88
+ response.raise_for_status()
89
+ resp_json = response.json()
90
+
91
+ # Check for PL022 error (Infinity+ rights)
92
+ if 'error' in resp_json and resp_json['error'].get('code') == 'PL022':
93
+ raise RuntimeError("Infinity+ required for this content.")
94
+
95
+ # Check for PL402 error (TVOD not purchased)
96
+ if 'error' in resp_json and resp_json['error'].get('code') == 'PL402':
97
+ raise RuntimeError("Content available for rental: you must rent it first.")
98
+
99
+ playback_json = resp_json['response']['mediaSelector']
100
+ return playback_json
101
+
102
+ except Exception as e:
103
+ raise RuntimeError(f"Failed to get playback URL: {e}")
104
+
105
+ def parse_tracking_data(tracking_value):
106
+ """
107
+ Parses the trackingData string into a dictionary.
108
+
109
+ Args:
110
+ tracking_value (str): The tracking data string.
111
+
112
+ Returns:
113
+ dict: Parsed tracking data.
114
+ """
115
+ return dict(item.split('=', 1) for item in tracking_value.split('|') if '=' in item)
116
+
117
+ def parse_smil_for_tracking_and_video(smil_xml):
118
+ """
119
+ Extracts all video_src and trackingData pairs from the SMIL.
120
+
121
+ Args:
122
+ smil_xml (str): The SMIL XML as a string.
123
+
124
+ Returns:
125
+ list: A list of dicts: {'video_src': ..., 'tracking_info': ...}
126
+ """
127
+ results = []
128
+ root = ET.fromstring(smil_xml)
129
+ ns = {'smil': root.tag.split('}')[0].strip('{')}
130
+
131
+ # Search all <par>
132
+ for par in root.findall('.//smil:par', ns):
133
+ video_src = None
134
+ tracking_info = None
135
+
136
+ # Search <video> inside <par>
137
+ video_elem = par.find('.//smil:video', ns)
138
+ if video_elem is not None:
139
+ video_src = video_elem.attrib.get('src')
140
+
141
+ # Search <ref> inside <par>
142
+ ref_elem = par.find('.//smil:ref', ns)
143
+ if ref_elem is not None:
144
+ # Search <param name="trackingData">
145
+ for param in ref_elem.findall('.//smil:param', ns):
146
+ if param.attrib.get('name') == 'trackingData':
147
+ tracking_value = param.attrib.get('value')
148
+ if tracking_value:
149
+ tracking_info = parse_tracking_data(tracking_value)
150
+ break
151
+
152
+ if video_src and tracking_info:
153
+ results.append({'video_src': video_src, 'tracking_info': tracking_info})
154
+
155
+ return results
156
+
157
+ def get_tracking_info(BEARER_TOKEN, PLAYBACK_JSON):
158
+ """
159
+ Retrieves tracking information from the playback JSON.
160
+
161
+ Args:
162
+ BEARER_TOKEN (str): The authentication token.
163
+ PLAYBACK_JSON (dict): The playback JSON object.
164
+
165
+ Returns:
166
+ list or None: List of tracking info dicts, or None if request fails.
167
+ """
168
+ params = {
169
+ "format": "SMIL",
170
+ "auth": BEARER_TOKEN,
171
+ "formats": "MPEG-DASH",
172
+ "assetTypes": "HR,browser,widevine,geoIT|geoNo:HR,browser,geoIT|geoNo:SD,browser,widevine,geoIT|geoNo:SD,browser,geoIT|geoNo:SS,browser,widevine,geoIT|geoNo:SS,browser,geoIT|geoNo",
173
+ "balance": "true",
174
+ "auto": "true",
175
+ "tracking": "true",
176
+ "delivery": "Streaming"
177
+ }
178
+
179
+ if 'publicUrl' in PLAYBACK_JSON:
180
+ params['publicUrl'] = PLAYBACK_JSON['publicUrl']
181
+
182
+ try:
183
+ response = httpx.get(
184
+ PLAYBACK_JSON['url'],
185
+ headers={'user-agent': get_userAgent()},
186
+ params=params,
187
+ follow_redirects=True,
188
+ timeout=MAX_TIMEOUT
189
+ )
190
+ response.raise_for_status()
191
+
192
+ smil_xml = response.text
193
+ results = parse_smil_for_tracking_and_video(smil_xml)
194
+ return results
195
+
196
+ except Exception as e:
197
+ return None
198
+
199
+ def generate_license_url(BEARER_TOKEN, tracking_info):
200
+ """
201
+ Generates the URL to obtain the Widevine license.
202
+
203
+ Args:
204
+ BEARER_TOKEN (str): The authentication token.
205
+ tracking_info (dict): The tracking info dictionary.
206
+
207
+ Returns:
208
+ str: The full license URL.
209
+ """
210
+ params = {
211
+ 'releasePid': tracking_info['tracking_info'].get('pid'),
212
+ 'account': f"http://access.auth.theplatform.com/data/Account/{tracking_info['tracking_info'].get('aid')}",
213
+ 'schema': '1.0',
214
+ 'token': BEARER_TOKEN,
215
+ }
216
+
217
+ return f"{'https://widevine.entitlement.theplatform.eu/wv/web/ModularDrm/getRawWidevineLicense'}?{urlencode(params)}"
@@ -12,7 +12,6 @@ from rich.prompt import Prompt
12
12
 
13
13
  # Internal utilities
14
14
  from StreamingCommunity.Api.Template import get_select_title
15
- from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
16
15
  from StreamingCommunity.Api.Template.config_loader import site_constant
17
16
  from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
18
17
  from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
@@ -33,7 +32,6 @@ _deprecate = False
33
32
 
34
33
  msg = Prompt()
35
34
  console = Console()
36
- proxy = None
37
35
 
38
36
 
39
37
  def get_user_input(string_to_search: str = None):
@@ -74,7 +72,7 @@ def get_user_input(string_to_search: str = None):
74
72
  else:
75
73
  return msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
76
74
 
77
- def process_search_result(select_title, selections=None, proxy=None):
75
+ def process_search_result(select_title, selections=None):
78
76
  """
79
77
  Handles the search result and initiates the download for either a film or series.
80
78
 
@@ -82,7 +80,6 @@ def process_search_result(select_title, selections=None, proxy=None):
82
80
  select_title (MediaItem): The selected media item. Can be None if selection fails.
83
81
  selections (dict, optional): Dictionary containing selection inputs that bypass manual input
84
82
  e.g., {'season': season_selection, 'episode': episode_selection}
85
- proxy (str, optional): The proxy to use for downloads.
86
83
  """
87
84
  if not select_title:
88
85
  if site_constant.TELEGRAM_BOT:
@@ -100,10 +97,10 @@ def process_search_result(select_title, selections=None, proxy=None):
100
97
  season_selection = selections.get('season')
101
98
  episode_selection = selections.get('episode')
102
99
 
103
- download_series(select_title, season_selection, episode_selection, proxy)
100
+ download_series(select_title, season_selection, episode_selection)
104
101
 
105
102
  else:
106
- download_film(select_title, proxy)
103
+ download_film(select_title)
107
104
 
108
105
  def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
109
106
  """
@@ -121,17 +118,11 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
121
118
  if site_constant.TELEGRAM_BOT:
122
119
  bot = get_bot_instance()
123
120
 
124
- # Check proxy if not already set
125
- finder = ProxyFinder(site_constant.FULL_URL)
126
- proxy = finder.find_fast_proxy()
127
-
128
121
  if direct_item:
129
122
  select_title_obj = MediaItem(**direct_item)
130
- process_search_result(select_title_obj, selections, proxy)
123
+ process_search_result(select_title_obj, selections)
131
124
  return
132
125
 
133
-
134
-
135
126
  actual_search_query = get_user_input(string_to_search)
136
127
 
137
128
  # Handle cases where user input is empty, or 'back' was handled (sys.exit or None return)
@@ -142,9 +133,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
142
133
  return
143
134
 
144
135
  # Perform search on the database using the obtained query
145
- finder = ProxyFinder(site_constant.FULL_URL)
146
- proxy = finder.find_fast_proxy()
147
- len_database = title_search(actual_search_query, proxy)
136
+ len_database = title_search(actual_search_query)
148
137
 
149
138
  # If only the database object (media_search_manager populated by title_search) is needed
150
139
  if get_onlyDatabase:
@@ -152,7 +141,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
152
141
 
153
142
  if len_database > 0:
154
143
  select_title = get_select_title(table_show_manager, media_search_manager, len_database)
155
- process_search_result(select_title, selections, proxy)
144
+ process_search_result(select_title, selections)
156
145
 
157
146
  else:
158
147
  no_results_message = f"No results found for: '{actual_search_query}'"
@@ -27,7 +27,7 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSource
27
27
  console = Console()
28
28
 
29
29
 
30
- def download_film(select_title: MediaItem, proxy: str = None) -> str:
30
+ def download_film(select_title: MediaItem) -> str:
31
31
  """
32
32
  Downloads a film using the provided film ID, title name, and domain.
33
33
 
@@ -55,7 +55,7 @@ def download_film(select_title: MediaItem, proxy: str = None) -> str:
55
55
  console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
56
56
 
57
57
  # Init class
58
- video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id, proxy)
58
+ video_source = VideoSource(f"{site_constant.FULL_URL}/it", False, select_title.id)
59
59
 
60
60
  # Retrieve scws and if available master playlist
61
61
  video_source.get_iframe(select_title.id)
@@ -142,7 +142,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, vid
142
142
  break
143
143
 
144
144
 
145
- def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
145
+ def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
146
146
  """
147
147
  Handle downloading a complete series.
148
148
 
@@ -154,8 +154,8 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
154
154
  start_message()
155
155
 
156
156
  # Init class
157
- video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id, proxy)
158
- scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug, proxy)
157
+ video_source = VideoSource(f"{site_constant.FULL_URL}/it", True, select_season.id)
158
+ scrape_serie = GetSerieInfo(f"{site_constant.FULL_URL}/it", select_season.id, select_season.slug)
159
159
 
160
160
  # Collect information about season
161
161
  scrape_serie.getNumberSeason()
@@ -200,11 +200,11 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
200
200
 
201
201
  # Loop through the selected seasons and download episodes
202
202
  for i_season in list_season_select:
203
- season = None
204
- for s in scrape_serie.seasons_manager.seasons:
205
- if s.number == i_season:
206
- season = s
207
- break
203
+ try:
204
+ season = scrape_serie.seasons_manager.seasons[i_season - 1]
205
+ except IndexError:
206
+ console.print(f"[red]Season index {i_season} not found! Available seasons: {[s.number for s in scrape_serie.seasons_manager.seasons]}")
207
+ continue
208
208
  season_number = season.number
209
209
 
210
210
  if len(list_season_select) > 1 or index_season_selected == "*":
@@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
219
219
  # Get script_id
220
220
  script_id = TelegramSession.get_session()
221
221
  if script_id != "unknown":
222
- TelegramSession.deleteScriptId(script_id)
222
+ TelegramSession.deleteScriptId(script_id)
@@ -28,7 +28,7 @@ table_show_manager = TVShowManager()
28
28
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
29
29
 
30
30
 
31
- def title_search(query: str, proxy: str) -> int:
31
+ def title_search(query: str) -> int:
32
32
  """
33
33
  Search for titles based on a search query.
34
34
 
@@ -49,7 +49,6 @@ def title_search(query: str, proxy: str) -> int:
49
49
  f"{site_constant.FULL_URL}/it",
50
50
  headers={'user-agent': get_userAgent()},
51
51
  timeout=max_timeout,
52
- proxy=proxy,
53
52
  follow_redirects=True
54
53
  )
55
54
  response.raise_for_status()
@@ -74,8 +73,7 @@ def title_search(query: str, proxy: str) -> int:
74
73
  'x-inertia': 'true',
75
74
  'x-inertia-version': version
76
75
  },
77
- timeout=max_timeout,
78
- proxy=proxy
76
+ timeout=max_timeout
79
77
  )
80
78
  response.raise_for_status()
81
79
 
@@ -20,7 +20,7 @@ max_timeout = config_manager.get_int("REQUESTS", "timeout")
20
20
 
21
21
 
22
22
  class GetSerieInfo:
23
- def __init__(self, url, media_id: int = None, series_name: str = None, proxy = None):
23
+ def __init__(self, url, media_id: int = None, series_name: str = None):
24
24
  """
25
25
  Initialize the GetSerieInfo class for scraping TV series information.
26
26
 
@@ -32,7 +32,6 @@ class GetSerieInfo:
32
32
  self.is_series = False
33
33
  self.headers = {'user-agent': get_userAgent()}
34
34
  self.url = url
35
- self.proxy = proxy
36
35
  self.media_id = media_id
37
36
  self.seasons_manager = SeasonManager()
38
37
 
@@ -51,8 +50,7 @@ class GetSerieInfo:
51
50
  response = httpx.get(
52
51
  url=f"{self.url}/titles/{self.media_id}-{self.series_name}",
53
52
  headers=self.headers,
54
- timeout=max_timeout,
55
- proxy=self.proxy
53
+ timeout=max_timeout
56
54
  )
57
55
  response.raise_for_status()
58
56
 
@@ -106,8 +104,7 @@ class GetSerieInfo:
106
104
  'x-inertia': 'true',
107
105
  'x-inertia-version': self.version,
108
106
  },
109
- timeout=max_timeout,
110
- proxy=self.proxy
107
+ timeout=max_timeout
111
108
  )
112
109
 
113
110
  # Extract episodes from JSON response
@@ -7,7 +7,6 @@ from rich.prompt import Prompt
7
7
 
8
8
  # Internal utilities
9
9
  from StreamingCommunity.Api.Template import get_select_title
10
- from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
11
10
  from StreamingCommunity.Api.Template.config_loader import site_constant
12
11
  from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
13
12
 
@@ -27,7 +26,6 @@ _deprecate = False
27
26
 
28
27
  msg = Prompt()
29
28
  console = Console()
30
- proxy = None
31
29
 
32
30
 
33
31
  def get_user_input(string_to_search: str = None):
@@ -38,7 +36,7 @@ def get_user_input(string_to_search: str = None):
38
36
  string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
39
37
  return string_to_search
40
38
 
41
- def process_search_result(select_title, selections=None, proxy=None):
39
+ def process_search_result(select_title, selections=None):
42
40
  """
43
41
  Handles the search result and initiates the download for either a film or series.
44
42
 
@@ -55,10 +53,10 @@ def process_search_result(select_title, selections=None, proxy=None):
55
53
  season_selection = selections.get('season')
56
54
  episode_selection = selections.get('episode')
57
55
 
58
- download_series(select_title, season_selection, episode_selection, proxy)
56
+ download_series(select_title, season_selection, episode_selection)
59
57
 
60
58
  else:
61
- download_film(select_title, proxy)
59
+ download_film(select_title)
62
60
 
63
61
  def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
64
62
  """
@@ -73,20 +71,14 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
73
71
  """
74
72
  if direct_item:
75
73
  select_title = MediaItem(**direct_item)
76
- process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
74
+ process_search_result(select_title, selections)
77
75
  return
78
76
 
79
- # Check proxy if not already set
80
- finder = ProxyFinder(site_constant.FULL_URL)
81
- proxy = finder.find_fast_proxy()
82
-
83
77
  if string_to_search is None:
84
78
  string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
85
79
 
86
80
  # Perform search on the database using the obtained query
87
- finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
88
- proxy = finder.find_fast_proxy()
89
- len_database = title_search(string_to_search, proxy)
81
+ len_database = title_search(string_to_search)
90
82
 
91
83
  # If only the database is needed, return the manager
92
84
  if get_onlyDatabase:
@@ -94,7 +86,7 @@ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_
94
86
 
95
87
  if len_database > 0:
96
88
  select_title = get_select_title(table_show_manager, media_search_manager,len_database)
97
- process_search_result(select_title, selections, proxy)
89
+ process_search_result(select_title, selections)
98
90
 
99
91
  else:
100
92
  # If no results are found, ask again
@@ -26,7 +26,7 @@ from StreamingCommunity.Api.Player.hdplayer import VideoSource
26
26
  console = Console()
27
27
 
28
28
 
29
- def download_film(select_title: MediaItem, proxy) -> str:
29
+ def download_film(select_title: MediaItem) -> str:
30
30
  """
31
31
  Downloads a film using the provided film ID, title name, and domain.
32
32
 
@@ -41,7 +41,7 @@ def download_film(select_title: MediaItem, proxy) -> str:
41
41
  console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
42
42
 
43
43
  # Get master playlists
44
- video_source = VideoSource(proxy)
44
+ video_source = VideoSource()
45
45
  master_playlist = video_source.get_m3u8_url(select_title.url)
46
46
 
47
47
  # Define the filename and path for the downloaded film
@@ -36,7 +36,7 @@ msg = Prompt()
36
36
  console = Console()
37
37
 
38
38
 
39
- def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo, proxy=None) -> Tuple[str,bool]:
39
+ def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
40
40
  """
41
41
  Downloads a specific episode from a specified season.
42
42
 
@@ -60,7 +60,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
60
60
  mp4_path = os.path.join(site_constant.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
61
61
 
62
62
  # Retrieve scws and if available master playlist
63
- video_source = VideoSource(proxy)
63
+ video_source = VideoSource()
64
64
  master_playlist = video_source.get_m3u8_url(obj_episode.url)
65
65
 
66
66
  # Download the episode
@@ -76,7 +76,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
76
76
  return r_proc['path'], r_proc['stopped']
77
77
 
78
78
 
79
- def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None, proxy = None) -> None:
79
+ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
80
80
  """
81
81
  Handle downloading episodes for a specific season.
82
82
 
@@ -92,7 +92,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
92
92
 
93
93
  if download_all:
94
94
  for i_episode in range(1, episodes_count + 1):
95
- path, stopped = download_video(index_season_selected, i_episode, scrape_serie, proxy)
95
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
96
96
 
97
97
  if stopped:
98
98
  break
@@ -113,12 +113,12 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
113
113
 
114
114
  # Download selected episodes if not stopped
115
115
  for i_episode in list_episode_select:
116
- path, stopped = download_video(index_season_selected, i_episode, scrape_serie, proxy)
116
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
117
117
 
118
118
  if stopped:
119
119
  break
120
120
 
121
- def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None, proxy = None) -> None:
121
+ def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
122
122
  """
123
123
  Handle downloading a complete series.
124
124
 
@@ -127,7 +127,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
127
127
  - season_selection (str, optional): Pre-defined season selection that bypasses manual input
128
128
  - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
129
129
  """
130
- scrape_serie = GetSerieInfo(select_season.url, proxy)
130
+ scrape_serie = GetSerieInfo(select_season.url)
131
131
 
132
132
  # Get total number of seasons
133
133
  seasons_count = scrape_serie.getNumberSeason()
@@ -154,7 +154,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
154
154
  for i_season in list_season_select:
155
155
  if len(list_season_select) > 1 or index_season_selected == "*":
156
156
  # Download all episodes if multiple seasons are selected or if '*' is used
157
- download_episode(i_season, scrape_serie, download_all=True, proxy=proxy)
157
+ download_episode(i_season, scrape_serie, download_all=True)
158
158
  else:
159
159
  # Otherwise, let the user select specific episodes for the single season
160
- download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection, proxy=proxy)
160
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
@@ -27,13 +27,12 @@ table_show_manager = TVShowManager()
27
27
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
28
28
 
29
29
 
30
- def extract_nonce(proxy) -> str:
30
+ def extract_nonce() -> str:
31
31
  """Extract nonce value from the page script"""
32
32
  response = httpx.get(
33
33
  site_constant.FULL_URL,
34
34
  headers={'user-agent': get_userAgent()},
35
- timeout=max_timeout,
36
- proxy=proxy
35
+ timeout=max_timeout
37
36
  )
38
37
 
39
38
  soup = BeautifulSoup(response.content, 'html.parser')
@@ -45,7 +44,7 @@ def extract_nonce(proxy) -> str:
45
44
  return ""
46
45
 
47
46
 
48
- def title_search(query: str, proxy: str) -> int:
47
+ def title_search(query: str) -> int:
49
48
  """
50
49
  Search for titles based on a search query.
51
50
 
@@ -62,7 +61,7 @@ def title_search(query: str, proxy: str) -> int:
62
61
  console.print(f"[cyan]Search url: [yellow]{search_url}")
63
62
 
64
63
  try:
65
- _wpnonce = extract_nonce(proxy)
64
+ _wpnonce = extract_nonce()
66
65
 
67
66
  if not _wpnonce:
68
67
  console.print("[red]Error: Failed to extract nonce")
@@ -81,8 +80,7 @@ def title_search(query: str, proxy: str) -> int:
81
80
  'user-agent': get_userAgent()
82
81
  },
83
82
  data=data,
84
- timeout=max_timeout,
85
- proxy=proxy
83
+ timeout=max_timeout
86
84
  )
87
85
  response.raise_for_status()
88
86
  soup = BeautifulSoup(response.text, 'html.parser')
@@ -19,13 +19,13 @@ max_timeout = config_manager.get_int("REQUESTS", "timeout")
19
19
 
20
20
 
21
21
  class GetSerieInfo:
22
- def __init__(self, url, proxy: str = None):
22
+ def __init__(self, url):
23
23
  self.headers = {'user-agent': get_userAgent()}
24
24
  self.url = url
25
25
  self.seasons_manager = SeasonManager()
26
26
  self.series_name = None
27
27
 
28
- self.client = httpx.Client(headers=self.headers, proxy=proxy, timeout=max_timeout)
28
+ self.client = httpx.Client(headers=self.headers, timeout=max_timeout)
29
29
 
30
30
  def collect_info_season(self) -> None:
31
31
  """