StreamingCommunity 3.3.8__py3-none-any.whl → 3.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (64) hide show
  1. StreamingCommunity/Api/Player/hdplayer.py +0 -5
  2. StreamingCommunity/Api/Player/mediapolisvod.py +4 -13
  3. StreamingCommunity/Api/Player/supervideo.py +3 -8
  4. StreamingCommunity/Api/Player/sweetpixel.py +1 -9
  5. StreamingCommunity/Api/Player/vixcloud.py +5 -16
  6. StreamingCommunity/Api/Site/altadefinizione/film.py +4 -15
  7. StreamingCommunity/Api/Site/altadefinizione/site.py +2 -7
  8. StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py +2 -7
  9. StreamingCommunity/Api/Site/animeunity/site.py +9 -24
  10. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +11 -27
  11. StreamingCommunity/Api/Site/animeworld/film.py +4 -2
  12. StreamingCommunity/Api/Site/animeworld/site.py +3 -11
  13. StreamingCommunity/Api/Site/animeworld/util/ScrapeSerie.py +1 -4
  14. StreamingCommunity/Api/Site/crunchyroll/film.py +17 -8
  15. StreamingCommunity/Api/Site/crunchyroll/series.py +8 -9
  16. StreamingCommunity/Api/Site/crunchyroll/site.py +14 -16
  17. StreamingCommunity/Api/Site/crunchyroll/util/ScrapeSerie.py +18 -65
  18. StreamingCommunity/Api/Site/crunchyroll/util/get_license.py +97 -106
  19. StreamingCommunity/Api/Site/guardaserie/site.py +4 -12
  20. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +3 -10
  21. StreamingCommunity/Api/Site/mediasetinfinity/film.py +11 -12
  22. StreamingCommunity/Api/Site/mediasetinfinity/series.py +1 -2
  23. StreamingCommunity/Api/Site/mediasetinfinity/site.py +3 -11
  24. StreamingCommunity/Api/Site/mediasetinfinity/util/ScrapeSerie.py +39 -50
  25. StreamingCommunity/Api/Site/mediasetinfinity/util/fix_mpd.py +3 -3
  26. StreamingCommunity/Api/Site/mediasetinfinity/util/get_license.py +8 -26
  27. StreamingCommunity/Api/Site/raiplay/film.py +6 -7
  28. StreamingCommunity/Api/Site/raiplay/series.py +1 -12
  29. StreamingCommunity/Api/Site/raiplay/site.py +8 -24
  30. StreamingCommunity/Api/Site/raiplay/util/ScrapeSerie.py +15 -22
  31. StreamingCommunity/Api/Site/raiplay/util/get_license.py +3 -12
  32. StreamingCommunity/Api/Site/streamingcommunity/film.py +5 -16
  33. StreamingCommunity/Api/Site/streamingcommunity/site.py +3 -22
  34. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +11 -26
  35. StreamingCommunity/Api/Site/streamingwatch/__init__.py +1 -0
  36. StreamingCommunity/Api/Site/streamingwatch/film.py +4 -2
  37. StreamingCommunity/Api/Site/streamingwatch/series.py +1 -1
  38. StreamingCommunity/Api/Site/streamingwatch/site.py +4 -18
  39. StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +0 -3
  40. StreamingCommunity/Api/Template/config_loader.py +0 -7
  41. StreamingCommunity/Lib/Downloader/DASH/cdm_helpher.py +8 -3
  42. StreamingCommunity/Lib/Downloader/DASH/decrypt.py +55 -1
  43. StreamingCommunity/Lib/Downloader/DASH/downloader.py +139 -55
  44. StreamingCommunity/Lib/Downloader/DASH/parser.py +458 -101
  45. StreamingCommunity/Lib/Downloader/DASH/segments.py +131 -74
  46. StreamingCommunity/Lib/Downloader/HLS/downloader.py +31 -50
  47. StreamingCommunity/Lib/Downloader/HLS/segments.py +266 -365
  48. StreamingCommunity/Lib/Downloader/MP4/downloader.py +1 -1
  49. StreamingCommunity/Lib/FFmpeg/capture.py +37 -5
  50. StreamingCommunity/Lib/FFmpeg/command.py +35 -93
  51. StreamingCommunity/Lib/M3U8/estimator.py +0 -1
  52. StreamingCommunity/Lib/TMBD/tmdb.py +2 -4
  53. StreamingCommunity/TelegramHelp/config.json +0 -1
  54. StreamingCommunity/Upload/version.py +1 -1
  55. StreamingCommunity/Util/config_json.py +28 -21
  56. StreamingCommunity/Util/http_client.py +28 -0
  57. StreamingCommunity/Util/os.py +16 -6
  58. {streamingcommunity-3.3.8.dist-info → streamingcommunity-3.4.0.dist-info}/METADATA +1 -3
  59. streamingcommunity-3.4.0.dist-info/RECORD +111 -0
  60. streamingcommunity-3.3.8.dist-info/RECORD +0 -111
  61. {streamingcommunity-3.3.8.dist-info → streamingcommunity-3.4.0.dist-info}/WHEEL +0 -0
  62. {streamingcommunity-3.3.8.dist-info → streamingcommunity-3.4.0.dist-info}/entry_points.txt +0 -0
  63. {streamingcommunity-3.3.8.dist-info → streamingcommunity-3.4.0.dist-info}/licenses/LICENSE +0 -0
  64. {streamingcommunity-3.3.8.dist-info → streamingcommunity-3.4.0.dist-info}/top_level.txt +0 -0
@@ -5,18 +5,14 @@ from urllib.parse import urlparse
5
5
 
6
6
 
7
7
  # External libraries
8
- import httpx
9
8
  from bs4 import BeautifulSoup
10
9
 
11
10
 
12
11
  # Internal utilities
13
- from StreamingCommunity.Util.config_json import config_manager
14
12
  from StreamingCommunity.Util.headers import get_headers, get_userAgent
13
+ from StreamingCommunity.Util.http_client import create_client
15
14
  from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager
16
15
 
17
- # Variable
18
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
19
-
20
16
 
21
17
  class GetSerieInfo:
22
18
  def __init__(self, url):
@@ -46,20 +42,14 @@ class GetSerieInfo:
46
42
 
47
43
  def _get_series_data(self):
48
44
  """Get series data through the API"""
49
- headers = {'User-Agent': get_userAgent()}
50
- params = {'byGuid': self.serie_id}
51
-
52
- with httpx.Client(timeout=max_timeout, follow_redirects=True) as client:
53
- response = client.get(
54
- f'https://feed.entertainment.tv.theplatform.eu/f/{self.public_id}/mediaset-prod-all-series-v2',
55
- params=params,
56
- headers=headers
57
- )
58
-
59
- if response.status_code == 200:
45
+ try:
46
+ params = {'byGuid': self.serie_id}
47
+ response = create_client(headers=self.headers).get(f'https://feed.entertainment.tv.theplatform.eu/f/{self.public_id}/mediaset-prod-all-series-v2', params=params)
48
+ response.raise_for_status()
60
49
  return response.json()
61
- else:
62
- logging.error(f"Failed to get series data: {response.status_code}")
50
+
51
+ except Exception as e:
52
+ logging.error(f"Failed to get series data with error: {e}")
63
53
  return None
64
54
 
65
55
  def _process_available_seasons(self, data):
@@ -106,32 +96,30 @@ class GetSerieInfo:
106
96
 
107
97
  def _extract_season_sb_ids(self, stagioni_disponibili):
108
98
  """Extract sb IDs from season pages"""
109
- with httpx.Client(timeout=max_timeout, follow_redirects=True) as client:
110
- for season in stagioni_disponibili:
111
- response_page = client.get(
112
- season['page_url'],
113
- headers={'User-Agent': get_userAgent()}
114
- )
115
-
116
- print("Response for _extract_season_sb_ids:", response_page.status_code, " season index:", season['tvSeasonNumber'])
117
- soup = BeautifulSoup(response_page.text, 'html.parser')
118
-
119
- # Try first with 'Episodi', then with 'Puntate intere'
120
- link = soup.find('a', string='Episodi')
121
- if not link:
122
- #print("Using word: Puntate intere")
123
- link = soup.find('a', string='Puntate intere')
124
-
125
- if link is None:
126
- link = soup.find('a', class_ = 'titleCarousel')
127
-
128
- if link and link.has_attr('href'):
129
- if not link.string == 'Puntate intere':
130
- print("Using word: Episodi")
131
-
132
- season['sb'] = link['href'].split(',')[-1]
133
- else:
134
- logging.warning(f"Link 'Episodi' or 'Puntate intere' not found for season {season['tvSeasonNumber']}")
99
+ client = create_client()
100
+
101
+ for season in stagioni_disponibili:
102
+ response_page = client.get(season['page_url'], headers={'User-Agent': get_userAgent()})
103
+
104
+ print("Response for _extract_season_sb_ids:", response_page.status_code, " season index:", season['tvSeasonNumber'])
105
+ soup = BeautifulSoup(response_page.text, 'html.parser')
106
+
107
+ # Try first with 'Episodi', then with 'Puntate intere'
108
+ link = soup.find('a', string='Episodi')
109
+ if not link:
110
+ #print("Using word: Puntate intere")
111
+ link = soup.find('a', string='Puntate intere')
112
+
113
+ if link is None:
114
+ link = soup.find('a', class_ = 'titleCarousel')
115
+
116
+ if link and link.has_attr('href'):
117
+ if not link.string == 'Puntate intere':
118
+ print("Using word: Episodi")
119
+
120
+ season['sb'] = link['href'].split(',')[-1]
121
+ else:
122
+ logging.warning(f"Link 'Episodi' or 'Puntate intere' not found for season {season['tvSeasonNumber']}")
135
123
 
136
124
  def _get_season_episodes(self, season):
137
125
  """Get episodes for a specific season"""
@@ -147,11 +135,11 @@ class GetSerieInfo:
147
135
  'range': '0-100',
148
136
  }
149
137
  episode_url = f"https://feed.entertainment.tv.theplatform.eu/f/{self.public_id}/mediaset-prod-all-programs-v2"
150
-
151
- with httpx.Client(timeout=max_timeout, follow_redirects=True) as client:
152
- episode_response = client.get(episode_url, headers=episode_headers, params=params)
153
138
 
154
- if episode_response.status_code == 200:
139
+ try:
140
+ episode_response = create_client(headers=episode_headers).get(episode_url, params=params)
141
+ episode_response.raise_for_status()
142
+
155
143
  episode_data = episode_response.json()
156
144
  season['episodes'] = []
157
145
 
@@ -166,8 +154,9 @@ class GetSerieInfo:
166
154
  season['episodes'].append(episode_info)
167
155
 
168
156
  print(f"Found {len(season['episodes'])} episodes for season {season['tvSeasonNumber']}")
169
- else:
170
- logging.error(f"Failed to get episodes for season {season['tvSeasonNumber']}: {episode_response.status_code}")
157
+
158
+ except Exception as e:
159
+ logging.error(f"Failed to get episodes for season {season['tvSeasonNumber']} with error: {e}")
171
160
 
172
161
  def collect_season(self) -> None:
173
162
  """
@@ -3,8 +3,8 @@
3
3
  from urllib.parse import urlparse, urlunparse
4
4
 
5
5
 
6
- # External library
7
- import httpx
6
+ # Internal utilities
7
+ from StreamingCommunity.Util.http_client import create_client
8
8
 
9
9
 
10
10
  def try_mpd(url, qualities):
@@ -42,7 +42,7 @@ def try_mpd(url, qualities):
42
42
  mpd_url = urlunparse(parsed._replace(path=new_path)).strip()
43
43
 
44
44
  try:
45
- r = httpx.head(mpd_url, timeout=5)
45
+ r = create_client().head(mpd_url)
46
46
  if r.status_code == 200:
47
47
  return mpd_url
48
48
 
@@ -7,19 +7,17 @@ import xml.etree.ElementTree as ET
7
7
 
8
8
 
9
9
  # External library
10
- import httpx
11
10
  from bs4 import BeautifulSoup
12
11
  from rich.console import Console
13
12
 
14
13
 
15
14
  # Internal utilities
16
- from StreamingCommunity.Util.config_json import config_manager
15
+ from StreamingCommunity.Util.http_client import create_client
17
16
  from StreamingCommunity.Util.headers import get_headers, get_userAgent
18
17
 
19
18
 
20
19
  # Variable
21
20
  console = Console()
22
- MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
23
21
  network_data = []
24
22
  class_mediaset_api = None
25
23
 
@@ -51,17 +49,13 @@ class MediasetAPI:
51
49
  'appName': self.app_name,
52
50
  'client_id': self.client_id,
53
51
  }
54
- response = httpx.post(
55
- 'https://api-ott-prod-fe.mediaset.net/PROD/play/idm/anonymous/login/v2.0',
56
- headers=self.headers,
57
- json=json_data,
58
- )
52
+ response = create_client(headers=self.headers).post('https://api-ott-prod-fe.mediaset.net/PROD/play/idm/anonymous/login/v2.0', json=json_data)
59
53
  return response.json()['response']['beToken']
60
54
 
61
55
  def fetch_html(self, timeout=10):
62
- r = httpx.get("https://mediasetinfinity.mediaset.it/", timeout=timeout, headers=self.headers)
63
- r.raise_for_status()
64
- return r.text
56
+ response = create_client(headers=self.headers).get("https://mediasetinfinity.mediaset.it/")
57
+ response.raise_for_status()
58
+ return response.text
65
59
 
66
60
  def find_relevant_script(self, html):
67
61
  soup = BeautifulSoup(html, "html.parser")
@@ -79,7 +73,7 @@ class MediasetAPI:
79
73
  html = self.fetch_html()
80
74
  scripts = self.find_relevant_script(html)[0:1]
81
75
  pairs = self.extract_pairs_from_scripts(scripts)
82
- return next((h for h, k in pairs.items() if k == "$2a"), None)
76
+ return list(pairs.keys())[-5]
83
77
 
84
78
  def generate_request_headers(self):
85
79
  return {
@@ -124,13 +118,7 @@ def get_playback_url(CONTENT_ID):
124
118
  }
125
119
 
126
120
  try:
127
- response = httpx.post(
128
- 'https://api-ott-prod-fe.mediaset.net/PROD/play/playback/check/v2.0',
129
- headers=headers,
130
- json=json_data,
131
- follow_redirects=True,
132
- timeout=MAX_TIMEOUT
133
- )
121
+ response = create_client(headers=headers).post('https://api-ott-prod-fe.mediaset.net/PROD/play/playback/check/v2.0', json=json_data)
134
122
  response.raise_for_status()
135
123
  resp_json = response.json()
136
124
 
@@ -255,13 +243,7 @@ def get_tracking_info(PLAYBACK_JSON):
255
243
  params['publicUrl'] = PLAYBACK_JSON['publicUrl']
256
244
 
257
245
  try:
258
- response = httpx.get(
259
- PLAYBACK_JSON['url'],
260
- headers={'user-agent': get_userAgent()},
261
- params=params,
262
- follow_redirects=True,
263
- timeout=MAX_TIMEOUT
264
- )
246
+ response = create_client(headers={'user-agent': get_userAgent()}).get(PLAYBACK_JSON['url'], params=params)
265
247
  response.raise_for_status()
266
248
 
267
249
  results = parse_smil_for_media_info(response.text)
@@ -5,17 +5,16 @@ from typing import Tuple
5
5
 
6
6
 
7
7
  # External library
8
- import httpx
9
8
  from rich.console import Console
10
9
 
11
10
 
12
11
  # Internal utilities
13
12
  from StreamingCommunity.Util.os import os_manager
13
+ from StreamingCommunity.Util.config_json import config_manager
14
14
  from StreamingCommunity.Util.headers import get_headers
15
- from StreamingCommunity.Util.os import get_wvd_path
15
+ from StreamingCommunity.Util.http_client import create_client
16
16
  from StreamingCommunity.Util.message import start_message
17
17
 
18
-
19
18
  # Logic class
20
19
  from .util.get_license import generate_license_url
21
20
  from StreamingCommunity.Api.Template.config_loader import site_constant
@@ -29,6 +28,7 @@ from StreamingCommunity.Api.Player.mediapolisvod import VideoSource
29
28
 
30
29
  # Variable
31
30
  console = Console()
31
+ extension_output = config_manager.get("M3U8_CONVERSION", "extension")
32
32
 
33
33
 
34
34
  def download_film(select_title: MediaItem) -> Tuple[str, bool]:
@@ -46,13 +46,13 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
46
46
  console.print(f"\n[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
47
47
 
48
48
  # Extract m3u8 URL from the film's URL
49
- response = httpx.get(select_title.url + ".json", headers=get_headers(), timeout=10)
49
+ response = create_client(headers=get_headers()).get(select_title.url + ".json")
50
50
  first_item_path = "https://www.raiplay.it" + response.json().get("first_item_path")
51
51
  master_playlist = VideoSource.extract_m3u8_url(first_item_path)
52
52
 
53
53
  # Define the filename and path for the downloaded film
54
- mp4_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
55
- mp4_path = os.path.join(site_constant.MOVIE_FOLDER, mp4_name.replace(".mp4", ""))
54
+ mp4_name = os_manager.get_sanitize_file(select_title.name, select_title.date) + extension_output
55
+ mp4_path = os.path.join(site_constant.MOVIE_FOLDER, mp4_name.replace(extension_output, ""))
56
56
 
57
57
  # HLS
58
58
  if ".mpd" not in master_playlist:
@@ -66,7 +66,6 @@ def download_film(select_title: MediaItem) -> Tuple[str, bool]:
66
66
  license_url = generate_license_url(select_title.mpd_id)
67
67
 
68
68
  dash_process = DASH_Downloader(
69
- cdm_device=get_wvd_path(),
70
69
  license_url=license_url,
71
70
  mpd_url=master_playlist,
72
71
  output_path=os.path.join(mp4_path, mp4_name),
@@ -11,7 +11,6 @@ from rich.prompt import Prompt
11
11
 
12
12
  # Internal utilities
13
13
  from StreamingCommunity.Util.headers import get_headers, get_userAgent
14
- from StreamingCommunity.Util.os import get_wvd_path
15
14
  from StreamingCommunity.Util.message import start_message
16
15
 
17
16
 
@@ -91,7 +90,6 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
91
90
  }
92
91
 
93
92
  dash_process = DASH_Downloader(
94
- cdm_device=get_wvd_path(),
95
93
  license_url=full_license_url.split("?")[0],
96
94
  mpd_url=master_playlist,
97
95
  output_path=os.path.join(mp4_path, mp4_name),
@@ -166,16 +164,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
166
164
  - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
167
165
  """
168
166
  start_message()
169
-
170
- # Extract program name from path_id
171
- program_name = None
172
- if select_season.path_id:
173
- parts = select_season.path_id.strip('/').split('/')
174
- if len(parts) >= 2:
175
- program_name = parts[-1].split('.')[0]
176
-
177
- # Init scraper
178
- scrape_serie = GetSerieInfo(program_name)
167
+ scrape_serie = GetSerieInfo(select_season.path_id)
179
168
 
180
169
  # Get seasons info
181
170
  scrape_serie.collect_info_title()
@@ -1,13 +1,12 @@
1
1
  # 21.05.24
2
2
 
3
3
  # External libraries
4
- import httpx
5
4
  from rich.console import Console
6
5
 
7
6
 
8
7
  # Internal utilities
9
- from StreamingCommunity.Util.config_json import config_manager
10
- from StreamingCommunity.Util.headers import get_userAgent
8
+ from StreamingCommunity.Util.headers import get_headers
9
+ from StreamingCommunity.Util.http_client import create_client
11
10
  from StreamingCommunity.Util.table import TVShowManager
12
11
  from StreamingCommunity.Api.Template.config_loader import site_constant
13
12
  from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
@@ -21,7 +20,6 @@ from .util.ScrapeSerie import GetSerieInfo
21
20
  console = Console()
22
21
  media_search_manager = MediaManager()
23
22
  table_show_manager = TVShowManager()
24
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
25
23
 
26
24
 
27
25
  def determine_media_type(item):
@@ -30,24 +28,13 @@ def determine_media_type(item):
30
28
  using GetSerieInfo.
31
29
  """
32
30
  try:
33
- # Extract program name from path_id
34
- program_name = None
35
- if item.get('path_id'):
36
- parts = item['path_id'].strip('/').split('/')
37
- if len(parts) >= 2:
38
- program_name = parts[-1].split('.')[0]
39
-
40
- if not program_name:
41
- return "film"
42
-
43
- # Dio stranamente guarda che giro bisogna fare per avere il tipo di media.
44
- scraper = GetSerieInfo(program_name)
31
+ scraper = GetSerieInfo(item.get('path_id'))
45
32
  scraper.collect_info_title()
46
33
  return scraper.prog_tipology, scraper.prog_description, scraper.prog_year
47
34
 
48
35
  except Exception as e:
49
36
  console.print(f"[red]Error determining media type: {e}[/red]")
50
- return "film"
37
+ return None, None, None
51
38
 
52
39
 
53
40
  def title_search(query: str) -> int:
@@ -78,13 +65,7 @@ def title_search(query: str) -> int:
78
65
  }
79
66
 
80
67
  try:
81
- response = httpx.post(
82
- search_url,
83
- headers={'user-agent': get_userAgent()},
84
- json=json_data,
85
- timeout=max_timeout,
86
- follow_redirects=True
87
- )
68
+ response = create_client(headers=get_headers()).post(search_url, json=json_data)
88
69
  response.raise_for_status()
89
70
 
90
71
  except Exception as e:
@@ -97,6 +78,9 @@ def title_search(query: str) -> int:
97
78
  # Process each item and add to media manager
98
79
  for item in data:
99
80
  media_type, prog_description, prog_year = determine_media_type(item)
81
+ if media_type is None:
82
+ continue
83
+
100
84
  media_search_manager.add_media({
101
85
  'id': item.get('id', ''),
102
86
  'name': item.get('titolo', ''),
@@ -3,27 +3,20 @@
3
3
  import logging
4
4
 
5
5
 
6
- # External libraries
7
- import httpx
8
-
9
-
10
6
  # Internal utilities
11
7
  from StreamingCommunity.Util.headers import get_headers
12
- from StreamingCommunity.Util.config_json import config_manager
8
+ from StreamingCommunity.Util.http_client import create_client
13
9
  from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager
14
10
 
15
11
 
16
- # Variable
17
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
18
-
19
12
 
20
13
  class GetSerieInfo:
21
- def __init__(self, program_name: str):
14
+ def __init__(self, path_id: str):
22
15
  """Initialize the GetSerieInfo class."""
23
16
  self.base_url = "https://www.raiplay.it"
24
- self.program_name = program_name
25
- self.series_name = program_name
26
- self.prog_tipology = None
17
+ self.path_id = path_id
18
+ self.series_name = None
19
+ self.prog_tipology = "film"
27
20
  self.prog_description = None
28
21
  self.prog_year = None
29
22
  self.seasons_manager = SeasonManager()
@@ -31,24 +24,21 @@ class GetSerieInfo:
31
24
  def collect_info_title(self) -> None:
32
25
  """Get series info including seasons."""
33
26
  try:
34
- program_url = f"{self.base_url}/programmi/{self.program_name}.json"
35
- response = httpx.get(url=program_url, headers=get_headers(), timeout=max_timeout)
27
+ program_url = f"{self.base_url}/{self.path_id}"
28
+ response = create_client(headers=get_headers()).get(program_url)
36
29
 
37
30
  # If 404, content is not yet available
38
31
  if response.status_code == 404:
39
- logging.info(f"Content not yet available: {self.program_name}")
32
+ logging.info(f"Content not yet available: {program_url}")
40
33
  return
41
34
 
42
35
  response.raise_for_status()
43
36
  json_data = response.json()
44
37
 
45
- # Dio santissimo ma chi ha fatto le cose cosi di merda.
46
- type_check_1 = "tv" if json_data.get('program_info', {}).get('layout', 'single') == 'multi' else "film"
47
- #type_check_2 = "tv" if "tv" in json_data.get('track_info', {}).get('typology', '') else "film"
48
-
49
- self.prog_tipology = type_check_1
38
+ # Get basic program info
50
39
  self.prog_description = json_data.get('program_info', '').get('vanity', '')
51
40
  self.prog_year = json_data.get('program_info', '').get('year', '')
41
+ self.series_name = json_data.get('program_info', '').get('title', '')
52
42
 
53
43
  # Look for seasons in the 'blocks' property
54
44
  for block in json_data.get('blocks', []):
@@ -60,11 +50,14 @@ class GetSerieInfo:
60
50
 
61
51
  # Extract seasons from sets array
62
52
  for season_set in block.get('sets', []):
53
+ self.prog_tipology = "tv"
54
+
63
55
  if 'stagione' in season_set.get('name', '').lower():
64
56
  self._add_season(season_set, block.get('id'))
65
57
 
66
58
  elif 'stagione' in block.get('name', '').lower():
67
59
  self.publishing_block_id = block.get('id')
60
+ self.prog_tipology = "tv"
68
61
 
69
62
  # Extract season directly from block's sets
70
63
  for season_set in block.get('sets', []):
@@ -88,8 +81,8 @@ class GetSerieInfo:
88
81
  season = self.seasons_manager.get_season_by_number(number_season)
89
82
 
90
83
  # Se stai leggendo questo codice spieami perche hai fatto cosi.
91
- url = f"{self.base_url}/programmi/{self.program_name}/{self.publishing_block_id}/{season.id}/episodes.json"
92
- response = httpx.get(url=url, headers=get_headers(), timeout=max_timeout)
84
+ url = f"{self.base_url}/{self.path_id.replace('.json', '')}/{self.publishing_block_id}/{season.id}/episodes.json"
85
+ response = create_client(headers=get_headers()).get(url)
93
86
  response.raise_for_status()
94
87
 
95
88
  episodes_data = response.json()
@@ -1,20 +1,11 @@
1
1
  # 16.03.25
2
2
 
3
3
 
4
- # External library
5
- import httpx
6
-
7
-
8
4
  # Internal utilities
9
- from StreamingCommunity.Util.config_json import config_manager
5
+ from StreamingCommunity.Util.http_client import create_client
10
6
  from StreamingCommunity.Util.headers import get_headers
11
7
 
12
8
 
13
- # Variable
14
- MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
15
-
16
-
17
-
18
9
  def generate_license_url(mpd_id: str):
19
10
  """
20
11
  Generates the URL to obtain the Widevine license.
@@ -29,8 +20,8 @@ def generate_license_url(mpd_id: str):
29
20
  'cont': mpd_id,
30
21
  'output': '62',
31
22
  }
32
-
33
- response = httpx.get('https://mediapolisvod.rai.it/relinker/relinkerServlet.htm', params=params, headers=get_headers(), timeout=MAX_TIMEOUT)
23
+
24
+ response = create_client(headers=get_headers()).get('https://mediapolisvod.rai.it/relinker/relinkerServlet.htm', params=params)
34
25
  response.raise_for_status()
35
26
 
36
27
  # Extract the license URL from the response in two lines
@@ -9,8 +9,9 @@ from rich.console import Console
9
9
 
10
10
  # Internal utilities
11
11
  from StreamingCommunity.Util.os import os_manager
12
+ from StreamingCommunity.Util.config_json import config_manager
12
13
  from StreamingCommunity.Util.message import start_message
13
- from StreamingCommunity.TelegramHelp.telegram_bot import TelegramSession, get_bot_instance
14
+ from StreamingCommunity.TelegramHelp.telegram_bot import TelegramSession
14
15
 
15
16
 
16
17
  # Logic class
@@ -25,6 +26,7 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSource
25
26
 
26
27
  # Variable
27
28
  console = Console()
29
+ extension_output = config_manager.get("M3U8_CONVERSION", "extension")
28
30
 
29
31
 
30
32
  def download_film(select_title: MediaItem) -> str:
@@ -38,19 +40,6 @@ def download_film(select_title: MediaItem) -> str:
38
40
  Return:
39
41
  - str: output path
40
42
  """
41
- if site_constant.TELEGRAM_BOT:
42
- bot = get_bot_instance()
43
- bot.send_message(f"Download in corso:\n{select_title.name}", None)
44
-
45
- # Viene usato per lo screen
46
- console.print(f"## Download: [red]{select_title.name} ##")
47
-
48
- # Get script_id
49
- script_id = TelegramSession.get_session()
50
- if script_id != "unknown":
51
- TelegramSession.updateScriptId(script_id, select_title.name)
52
-
53
- # Start message and display film information
54
43
  start_message()
55
44
  console.print(f"\n[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
56
45
 
@@ -67,8 +56,8 @@ def download_film(select_title: MediaItem) -> str:
67
56
  return None
68
57
 
69
58
  # Define the filename and path for the downloaded film
70
- title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
71
- mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
59
+ title_name = os_manager.get_sanitize_file(select_title.name, select_title.date) + extension_output
60
+ mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(extension_output, ""))
72
61
 
73
62
  # Download the film using the m3u8 playlist, and output filename
74
63
  hls_process = HLS_Downloader(
@@ -4,14 +4,13 @@ import json
4
4
 
5
5
 
6
6
  # External libraries
7
- import httpx
8
7
  from bs4 import BeautifulSoup
9
8
  from rich.console import Console
10
9
 
11
10
 
12
11
  # Internal utilities
13
- from StreamingCommunity.Util.config_json import config_manager
14
12
  from StreamingCommunity.Util.headers import get_userAgent
13
+ from StreamingCommunity.Util.http_client import create_client
15
14
  from StreamingCommunity.Util.table import TVShowManager
16
15
  from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
17
16
 
@@ -25,8 +24,6 @@ from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
25
24
  console = Console()
26
25
  media_search_manager = MediaManager()
27
26
  table_show_manager = TVShowManager()
28
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
29
- ssl_verify = config_manager.get_bool("REQUESTS", "verify")
30
27
 
31
28
 
32
29
  def title_search(query: str) -> int:
@@ -46,13 +43,7 @@ def title_search(query: str) -> int:
46
43
  table_show_manager.clear()
47
44
 
48
45
  try:
49
- response = httpx.get(
50
- f"{site_constant.FULL_URL}/it",
51
- headers={'user-agent': get_userAgent()},
52
- timeout=max_timeout,
53
- verify=ssl_verify,
54
- follow_redirects=True
55
- )
46
+ response = create_client(headers={'user-agent': get_userAgent()}).get(f"{site_constant.FULL_URL}/it")
56
47
  response.raise_for_status()
57
48
 
58
49
  soup = BeautifulSoup(response.text, 'html.parser')
@@ -66,17 +57,7 @@ def title_search(query: str) -> int:
66
57
  console.print(f"[cyan]Search url: [yellow]{search_url}")
67
58
 
68
59
  try:
69
- response = httpx.get(
70
- search_url,
71
- headers = {
72
- 'referer': site_constant.FULL_URL,
73
- 'user-agent': get_userAgent(),
74
- 'x-inertia': 'true',
75
- 'x-inertia-version': version
76
- },
77
- timeout=max_timeout,
78
- verify=ssl_verify
79
- )
60
+ response = create_client(headers={'user-agent': get_userAgent(), 'x-inertia': 'true', 'x-inertia-version': version}).get(search_url)
80
61
  response.raise_for_status()
81
62
 
82
63
  except Exception as e: