StreamingCommunity 3.0.9__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (28) hide show
  1. StreamingCommunity/Api/Player/ddl.py +4 -2
  2. StreamingCommunity/Api/Player/hdplayer.py +2 -1
  3. StreamingCommunity/Api/Player/maxstream.py +7 -7
  4. StreamingCommunity/Api/Player/mediapolisvod.py +4 -4
  5. StreamingCommunity/Api/Player/mixdrop.py +6 -5
  6. StreamingCommunity/Api/Player/supervideo.py +7 -5
  7. StreamingCommunity/Api/Player/sweetpixel.py +4 -3
  8. StreamingCommunity/Api/Player/vixcloud.py +20 -14
  9. StreamingCommunity/Api/Site/altadefinizione/film.py +9 -1
  10. StreamingCommunity/Api/Site/altadefinizione/util/ScrapeSerie.py +35 -21
  11. StreamingCommunity/Api/Site/animeunity/site.py +43 -71
  12. StreamingCommunity/Api/Site/streamingcommunity/film.py +4 -0
  13. StreamingCommunity/Lib/Downloader/HLS/downloader.py +17 -8
  14. StreamingCommunity/Lib/Downloader/HLS/segments.py +5 -4
  15. StreamingCommunity/Lib/Downloader/MP4/downloader.py +3 -3
  16. StreamingCommunity/Upload/update.py +8 -3
  17. StreamingCommunity/Upload/version.py +2 -2
  18. StreamingCommunity/Util/config_json.py +5 -7
  19. StreamingCommunity/Util/message.py +1 -3
  20. StreamingCommunity/Util/os.py +5 -2
  21. StreamingCommunity/global_search.py +2 -2
  22. StreamingCommunity/run.py +71 -36
  23. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/METADATA +43 -58
  24. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/RECORD +28 -28
  25. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/WHEEL +0 -0
  26. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/entry_points.txt +0 -0
  27. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/licenses/LICENSE +0 -0
  28. {streamingcommunity-3.0.9.dist-info → streamingcommunity-3.2.0.dist-info}/top_level.txt +0 -0
@@ -15,6 +15,7 @@ from StreamingCommunity.Util.headers import get_userAgent
15
15
 
16
16
  # Variable
17
17
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
18
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
18
19
 
19
20
 
20
21
  class VideoSource:
@@ -41,7 +42,8 @@ class VideoSource:
41
42
  url=url,
42
43
  headers=self.headers,
43
44
  cookies=self.cookie,
44
- timeout=max_timeout
45
+ timeout=max_timeout,
46
+ verify=REQUEST_VERIFY
45
47
  )
46
48
  response.raise_for_status()
47
49
 
@@ -77,4 +79,4 @@ class VideoSource:
77
79
  logging.error("Failed to retrieve content from the URL.")
78
80
 
79
81
  except Exception as e:
80
- logging.error(f"An error occurred while parsing the playlist: {e}")
82
+ logging.error(f"An error occurred while parsing the playlist: {e}")
@@ -14,11 +14,12 @@ from StreamingCommunity.Util.config_json import config_manager
14
14
 
15
15
  # Variable
16
16
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
17
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
17
18
 
18
19
 
19
20
  class VideoSource:
20
21
  def __init__(self, proxy=None):
21
- self.client = httpx.Client(headers={'user-agent': get_userAgent()}, timeout=MAX_TIMEOUT, proxy=proxy)
22
+ self.client = httpx.Client(headers={'user-agent': get_userAgent()}, timeout=MAX_TIMEOUT, proxy=proxy, verify=REQUEST_VERIFY)
22
23
 
23
24
  def extractLinkHdPlayer(self, response):
24
25
  """Extract iframe source from the page."""
@@ -18,7 +18,7 @@ from StreamingCommunity.Util.headers import get_userAgent
18
18
 
19
19
  # Variable
20
20
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
21
-
21
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
22
22
 
23
23
  class VideoSource:
24
24
  def __init__(self, url: str):
@@ -39,7 +39,7 @@ class VideoSource:
39
39
  Sends a request to the initial URL and extracts the redirect URL.
40
40
  """
41
41
  try:
42
- response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
42
+ response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
43
43
  response.raise_for_status()
44
44
 
45
45
  # Extract the redirect URL from the HTML
@@ -58,7 +58,7 @@ class VideoSource:
58
58
  Sends a request to the redirect URL and extracts the Maxstream URL.
59
59
  """
60
60
  try:
61
- response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
61
+ response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
62
62
  response.raise_for_status()
63
63
 
64
64
  # Extract the Maxstream URL from the HTML
@@ -77,12 +77,12 @@ class VideoSource:
77
77
 
78
78
  # Make request to stayonline api
79
79
  data = {'id': self.redirect_url.split("/")[-2], 'ref': ''}
80
- response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data)
80
+ response = httpx.post('https://stayonline.pro/ajax/linkEmbedView.php', headers=headers, data=data, verify=REQUEST_VERIFY)
81
81
  response.raise_for_status()
82
82
  uprot_url = response.json()['data']['value']
83
83
 
84
84
  # Retry getting maxtstream url
85
- response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
85
+ response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
86
86
  response.raise_for_status()
87
87
  soup = BeautifulSoup(response.text, "html.parser")
88
88
  maxstream_url = soup.find("a").get("href")
@@ -104,7 +104,7 @@ class VideoSource:
104
104
  Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
105
105
  """
106
106
  try:
107
- response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
107
+ response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
108
108
  response.raise_for_status()
109
109
  soup = BeautifulSoup(response.text, "html.parser")
110
110
 
@@ -138,4 +138,4 @@ class VideoSource:
138
138
  """
139
139
  self.get_redirect_url()
140
140
  self.get_maxstream_url()
141
- return self.get_m3u8_url()
141
+ return self.get_m3u8_url()
@@ -12,7 +12,7 @@ from StreamingCommunity.Util.headers import get_headers
12
12
 
13
13
  # Variable
14
14
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
15
-
15
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
16
16
 
17
17
  class VideoSource:
18
18
 
@@ -29,7 +29,7 @@ class VideoSource:
29
29
  return "Error: Unable to determine video JSON URL"
30
30
 
31
31
  try:
32
- response = httpx.get(video_url, headers=get_headers(), timeout=MAX_TIMEOUT)
32
+ response = httpx.get(video_url, headers=get_headers(), timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
33
33
  if response.status_code != 200:
34
34
  return f"Error: Failed to fetch video data (Status: {response.status_code})"
35
35
 
@@ -50,7 +50,7 @@ class VideoSource:
50
50
  'cont': element_key,
51
51
  'output': '62',
52
52
  }
53
- stream_response = httpx.get('https://mediapolisvod.rai.it/relinker/relinkerServlet.htm', params=params, headers=get_headers(), timeout=MAX_TIMEOUT)
53
+ stream_response = httpx.get('https://mediapolisvod.rai.it/relinker/relinkerServlet.htm', params=params, headers=get_headers(), timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
54
54
 
55
55
  if stream_response.status_code != 200:
56
56
  return f"Error: Failed to fetch stream URL (Status: {stream_response.status_code})"
@@ -61,4 +61,4 @@ class VideoSource:
61
61
  return m3u8_url
62
62
 
63
63
  except Exception as e:
64
- return f"Error: {str(e)}"
64
+ return f"Error: {str(e)}"
@@ -17,7 +17,7 @@ from StreamingCommunity.Util.headers import get_userAgent
17
17
 
18
18
  # Variable
19
19
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
20
-
20
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
21
21
 
22
22
  class VideoSource:
23
23
  STAYONLINE_BASE_URL = "https://stayonline.pro"
@@ -45,7 +45,7 @@ class VideoSource:
45
45
  def get_redirect_url(self) -> str:
46
46
  """Extract the stayonline redirect URL from the initial page."""
47
47
  try:
48
- response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
48
+ response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
49
49
  response.raise_for_status()
50
50
  soup = BeautifulSoup(response.text, "html.parser")
51
51
 
@@ -68,7 +68,7 @@ class VideoSource:
68
68
  raise ValueError("Redirect URL not set. Call get_redirect_url first.")
69
69
 
70
70
  try:
71
- response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
71
+ response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
72
72
  response.raise_for_status()
73
73
  soup = BeautifulSoup(response.text, "html.parser")
74
74
 
@@ -89,7 +89,7 @@ class VideoSource:
89
89
  self.headers['referer'] = f'{self.STAYONLINE_BASE_URL}/l/{link_id}/'
90
90
  data = {'id': link_id, 'ref': ''}
91
91
 
92
- response = httpx.post(f'{self.STAYONLINE_BASE_URL}/ajax/linkView.php', headers=self.headers, data=data, timeout=MAX_TIMEOUT)
92
+ response = httpx.post(f'{self.STAYONLINE_BASE_URL}/ajax/linkView.php', headers=self.headers, data=data, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
93
93
  response.raise_for_status()
94
94
  return response.json()['data']['value']
95
95
 
@@ -128,7 +128,8 @@ class VideoSource:
128
128
  response = httpx.get(
129
129
  f'{self.MIXDROP_BASE_URL}/e/{video_id}',
130
130
  headers=self._get_mixdrop_headers(),
131
- timeout=MAX_TIMEOUT
131
+ timeout=MAX_TIMEOUT,
132
+ verify=REQUEST_VERIFY
132
133
  )
133
134
  response.raise_for_status()
134
135
  soup = BeautifulSoup(response.text, "html.parser")
@@ -5,9 +5,9 @@ import logging
5
5
 
6
6
 
7
7
  # External libraries
8
- import httpx
9
8
  import jsbeautifier
10
9
  from bs4 import BeautifulSoup
10
+ from curl_cffi import requests
11
11
 
12
12
 
13
13
  # Internal utilities
@@ -17,6 +17,7 @@ from StreamingCommunity.Util.headers import get_headers
17
17
 
18
18
  # Variable
19
19
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
20
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
20
21
 
21
22
 
22
23
  class VideoSource:
@@ -28,7 +29,6 @@ class VideoSource:
28
29
  - url (str): The URL of the video source.
29
30
  """
30
31
  self.headers = get_headers()
31
- self.client = httpx.Client()
32
32
  self.url = url
33
33
 
34
34
  def make_request(self, url: str) -> str:
@@ -42,8 +42,10 @@ class VideoSource:
42
42
  - str: The response content if successful, None otherwise.
43
43
  """
44
44
  try:
45
- response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
46
- response.raise_for_status()
45
+ response = requests.get(url, headers=self.headers, timeout=MAX_TIMEOUT, impersonate="chrome110", verify=REQUEST_VERIFY)
46
+ if response.status_code >= 400:
47
+ logging.error(f"Request failed with status code: {response.status_code}")
48
+ return None
47
49
  return response.text
48
50
 
49
51
  except Exception as e:
@@ -160,4 +162,4 @@ class VideoSource:
160
162
 
161
163
  except Exception as e:
162
164
  logging.error(f"An error occurred: {e}")
163
- return None
165
+ return None
@@ -14,7 +14,7 @@ from StreamingCommunity.Util.headers import get_userAgent
14
14
 
15
15
  # Variable
16
16
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
17
-
17
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
18
18
 
19
19
  class VideoSource:
20
20
  def __init__(self, full_url, episode_data, session_id, csrf_token):
@@ -30,7 +30,8 @@ class VideoSource:
30
30
  cookies={"sessionId": session_id},
31
31
  headers={"User-Agent": get_userAgent(), "csrf-token": csrf_token},
32
32
  base_url=full_url,
33
- timeout=MAX_TIMEOUT
33
+ timeout=MAX_TIMEOUT,
34
+ verify=REQUEST_VERIFY
34
35
  )
35
36
 
36
37
  def get_playlist(self):
@@ -46,4 +47,4 @@ class VideoSource:
46
47
 
47
48
  except Exception as e:
48
49
  logging.error(f"Error in new API system: {e}")
49
- return None
50
+ return None
@@ -20,6 +20,7 @@ from .Helper.Vixcloud.js_parser import JavaScriptParser
20
20
 
21
21
  # Variable
22
22
  MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
23
+ REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
23
24
  console = Console()
24
25
 
25
26
 
@@ -39,6 +40,7 @@ class VideoSource:
39
40
  self.is_series = is_series
40
41
  self.media_id = media_id
41
42
  self.iframe_src = None
43
+ self.window_parameter = None
42
44
 
43
45
  def get_iframe(self, episode_id: int) -> None:
44
46
  """
@@ -56,7 +58,7 @@ class VideoSource:
56
58
  }
57
59
 
58
60
  try:
59
- response = httpx.get(f"{self.url}/iframe/{self.media_id}", headers=self.headers, params=params, timeout=MAX_TIMEOUT, proxy=self.proxy)
61
+ response = httpx.get(f"{self.url}/iframe/{self.media_id}", headers=self.headers, params=params, timeout=MAX_TIMEOUT, proxy=self.proxy, verify=REQUEST_VERIFY)
60
62
  response.raise_for_status()
61
63
 
62
64
  # Parse response with BeautifulSoup to get iframe source
@@ -99,7 +101,7 @@ class VideoSource:
99
101
  """
100
102
  try:
101
103
  if self.iframe_src is not None:
102
- response = httpx.get(self.iframe_src, headers=self.headers, timeout=MAX_TIMEOUT)
104
+ response = httpx.get(self.iframe_src, headers=self.headers, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
103
105
  response.raise_for_status()
104
106
 
105
107
  # Parse response with BeautifulSoup to get content
@@ -109,41 +111,45 @@ class VideoSource:
109
111
  # Parse script to get video information
110
112
  self.parse_script(script_text=script)
111
113
 
114
+ except httpx.HTTPStatusError as e:
115
+ if e.response.status_code == 404:
116
+ console.print("[yellow]This content will be available soon![/yellow]")
117
+ return
118
+
119
+ logging.error(f"Error getting content: {e}")
120
+ raise
121
+
112
122
  except Exception as e:
113
123
  logging.error(f"Error getting content: {e}")
114
124
  raise
115
125
 
116
- def get_playlist(self) -> str:
126
+ def get_playlist(self) -> str | None:
117
127
  """
118
128
  Generate authenticated playlist URL.
119
129
 
120
130
  Returns:
121
- str: Fully constructed playlist URL with authentication parameters
131
+ str | None: Fully constructed playlist URL with authentication parameters, or None if content unavailable
122
132
  """
133
+ if not self.window_parameter:
134
+ return None
135
+
123
136
  params = {}
124
137
 
125
- # Add 'h' parameter if video quality is 1080p
126
138
  if self.canPlayFHD:
127
139
  params['h'] = 1
128
140
 
129
- # Parse the original URL
130
141
  parsed_url = urlparse(self.window_parameter.url)
131
142
  query_params = parse_qs(parsed_url.query)
132
143
 
133
- # Check specifically for 'b=1' in the query parameters
134
144
  if 'b' in query_params and query_params['b'] == ['1']:
135
145
  params['b'] = 1
136
146
 
137
- # Add authentication parameters (token and expiration)
138
147
  params.update({
139
148
  "token": self.window_parameter.token,
140
149
  "expires": self.window_parameter.expires
141
150
  })
142
151
 
143
- # Build the updated query string
144
152
  query_string = urlencode(params)
145
-
146
- # Construct the new URL with updated query parameters
147
153
  return urlunparse(parsed_url._replace(query=query_string))
148
154
 
149
155
 
@@ -173,7 +179,7 @@ class VideoSourceAnime(VideoSource):
173
179
  str: Parsed script content
174
180
  """
175
181
  try:
176
- response = httpx.get(f"{self.url}/embed-url/{episode_id}", headers=self.headers, timeout=MAX_TIMEOUT)
182
+ response = httpx.get(f"{self.url}/embed-url/{episode_id}", headers=self.headers, timeout=MAX_TIMEOUT, verify=REQUEST_VERIFY)
177
183
  response.raise_for_status()
178
184
 
179
185
  # Extract and clean embed URL
@@ -181,7 +187,7 @@ class VideoSourceAnime(VideoSource):
181
187
  self.iframe_src = embed_url
182
188
 
183
189
  # Fetch video content using embed URL
184
- video_response = httpx.get(embed_url)
190
+ video_response = httpx.get(embed_url, verify=REQUEST_VERIFY)
185
191
  video_response.raise_for_status()
186
192
 
187
193
  # Parse response with BeautifulSoup to get content of the scriot
@@ -193,4 +199,4 @@ class VideoSourceAnime(VideoSource):
193
199
 
194
200
  except Exception as e:
195
201
  logging.error(f"Error fetching embed URL: {e}")
196
- return None
202
+ return None
@@ -61,16 +61,22 @@ def download_film(select_title: MediaItem) -> str:
61
61
  # Extract mostraguarda URL
62
62
  try:
63
63
  response = httpx.get(select_title.url, headers=get_headers(), timeout=10)
64
+ response.raise_for_status()
65
+
64
66
  soup = BeautifulSoup(response.text, 'html.parser')
65
67
  iframes = soup.find_all('iframe')
66
68
  mostraguarda = iframes[0]['src']
67
69
 
68
70
  except Exception as e:
69
71
  console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get mostraguarda")
72
+ return None
70
73
 
71
74
  # Extract supervideo URL
75
+ supervideo_url = None
72
76
  try:
73
77
  response = httpx.get(mostraguarda, headers=get_headers(), timeout=10)
78
+ response.raise_for_status()
79
+
74
80
  soup = BeautifulSoup(response.text, 'html.parser')
75
81
  pattern = r'//supervideo\.[^/]+/[a-z]/[a-zA-Z0-9]+'
76
82
  supervideo_match = re.search(pattern, response.text)
@@ -78,7 +84,9 @@ def download_film(select_title: MediaItem) -> str:
78
84
 
79
85
  except Exception as e:
80
86
  console.print(f"[red]Site: {site_constant.SITE_NAME}, request error: {e}, get supervideo URL")
81
-
87
+ console.print("[yellow]This content will be available soon![/yellow]")
88
+ return None
89
+
82
90
  # Init class
83
91
  video_source = VideoSource(supervideo_url)
84
92
  master_playlist = video_source.get_playlist()
@@ -38,38 +38,52 @@ class GetSerieInfo:
38
38
  soup = BeautifulSoup(response.text, "html.parser")
39
39
  self.series_name = soup.find("title").get_text(strip=True).split(" - ")[0]
40
40
 
41
- # Process all seasons
42
- season_items = soup.find_all('div', class_='accordion-item')
43
-
44
- for season_idx, season_item in enumerate(season_items, 1):
45
- season_header = season_item.find('div', class_='accordion-header')
46
- if not season_header:
47
- continue
48
-
49
- season_name = season_header.get_text(strip=True)
41
+ # Find all season dropdowns
42
+ seasons_dropdown = soup.find('div', class_='dropdown seasons')
43
+ if not seasons_dropdown:
44
+ return
45
+
46
+ # Get all season items
47
+ season_items = seasons_dropdown.find_all('span', {'data-season': True})
48
+
49
+ for season_item in season_items:
50
+ season_num = int(season_item['data-season'])
51
+ season_name = season_item.get_text(strip=True)
50
52
 
51
- # Create a new season and get a reference to it
53
+ # Create a new season
52
54
  current_season = self.seasons_manager.add_season({
53
- 'number': season_idx,
55
+ 'number': season_num,
54
56
  'name': season_name
55
57
  })
56
58
 
57
- # Find episodes for this season
58
- episode_divs = season_item.find_all('div', class_='down-episode')
59
- for ep_idx, ep_div in enumerate(episode_divs, 1):
60
- episode_name_tag = ep_div.find('b')
61
- if not episode_name_tag:
59
+ # Find all episodes for this season
60
+ episodes_container = soup.find('div', {'class': 'dropdown mirrors', 'data-season': str(season_num)})
61
+ if not episodes_container:
62
+ continue
63
+
64
+ # Get all episode mirrors for this season
65
+ episode_mirrors = soup.find_all('div', {'class': 'dropdown mirrors',
66
+ 'data-season': str(season_num)})
67
+
68
+ for mirror in episode_mirrors:
69
+ episode_data = mirror.get('data-episode', '').split('-')
70
+ if len(episode_data) != 2:
71
+ continue
72
+
73
+ ep_num = int(episode_data[1])
74
+
75
+ # Find supervideo link
76
+ supervideo_span = mirror.find('span', {'data-id': 'supervideo'})
77
+ if not supervideo_span:
62
78
  continue
63
79
 
64
- episode_name = episode_name_tag.get_text(strip=True)
65
- link_tag = ep_div.find('a', string=lambda text: text and "Supervideo" in text)
66
- episode_url = link_tag['href'] if link_tag else None
80
+ episode_url = supervideo_span.get('data-link', '')
67
81
 
68
82
  # Add episode to the season
69
83
  if current_season:
70
84
  current_season.episodes.add({
71
- 'number': ep_idx,
72
- 'name': episode_name,
85
+ 'number': ep_num,
86
+ 'name': f"Episodio {ep_num}",
73
87
  'url': episode_url
74
88
  })
75
89