KekikStream 2.2.9__py3-none-any.whl → 2.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. KekikStream/Core/HTMLHelper.py +134 -0
  2. KekikStream/Core/Plugin/PluginBase.py +22 -4
  3. KekikStream/Core/Plugin/PluginLoader.py +3 -2
  4. KekikStream/Core/Plugin/PluginManager.py +2 -2
  5. KekikStream/Core/__init__.py +2 -0
  6. KekikStream/Extractors/CloseLoad.py +12 -13
  7. KekikStream/Extractors/ContentX.py +33 -31
  8. KekikStream/Extractors/DonilasPlay.py +10 -10
  9. KekikStream/Extractors/DzenRu.py +3 -3
  10. KekikStream/Extractors/ExPlay.py +10 -10
  11. KekikStream/Extractors/Filemoon.py +11 -16
  12. KekikStream/Extractors/JetTv.py +4 -4
  13. KekikStream/Extractors/MixPlayHD.py +10 -11
  14. KekikStream/Extractors/MolyStream.py +16 -9
  15. KekikStream/Extractors/Odnoklassniki.py +4 -4
  16. KekikStream/Extractors/PeaceMakerst.py +3 -3
  17. KekikStream/Extractors/PixelDrain.py +6 -5
  18. KekikStream/Extractors/PlayerFilmIzle.py +6 -10
  19. KekikStream/Extractors/RapidVid.py +8 -7
  20. KekikStream/Extractors/SetPlay.py +10 -10
  21. KekikStream/Extractors/SetPrime.py +3 -6
  22. KekikStream/Extractors/SibNet.py +4 -5
  23. KekikStream/Extractors/Sobreatsesuyp.py +5 -5
  24. KekikStream/Extractors/TRsTX.py +5 -5
  25. KekikStream/Extractors/TurboImgz.py +3 -4
  26. KekikStream/Extractors/TurkeyPlayer.py +5 -5
  27. KekikStream/Extractors/VidHide.py +4 -7
  28. KekikStream/Extractors/VidMoly.py +37 -25
  29. KekikStream/Extractors/VidMoxy.py +8 -9
  30. KekikStream/Extractors/VidPapi.py +5 -7
  31. KekikStream/Extractors/VideoSeyred.py +3 -3
  32. KekikStream/Plugins/BelgeselX.py +40 -51
  33. KekikStream/Plugins/DiziBox.py +53 -81
  34. KekikStream/Plugins/DiziPal.py +50 -72
  35. KekikStream/Plugins/DiziYou.py +96 -83
  36. KekikStream/Plugins/Dizilla.py +95 -107
  37. KekikStream/Plugins/FilmBip.py +29 -50
  38. KekikStream/Plugins/FilmMakinesi.py +84 -46
  39. KekikStream/Plugins/FilmModu.py +27 -41
  40. KekikStream/Plugins/FullHDFilm.py +57 -62
  41. KekikStream/Plugins/FullHDFilmizlesene.py +32 -57
  42. KekikStream/Plugins/HDFilmCehennemi.py +51 -65
  43. KekikStream/Plugins/JetFilmizle.py +38 -51
  44. KekikStream/Plugins/KultFilmler.py +43 -67
  45. KekikStream/Plugins/RecTV.py +34 -9
  46. KekikStream/Plugins/RoketDizi.py +89 -111
  47. KekikStream/Plugins/SelcukFlix.py +102 -93
  48. KekikStream/Plugins/SetFilmIzle.py +65 -75
  49. KekikStream/Plugins/SezonlukDizi.py +47 -65
  50. KekikStream/Plugins/Sinefy.py +70 -70
  51. KekikStream/Plugins/SinemaCX.py +31 -55
  52. KekikStream/Plugins/Sinezy.py +27 -54
  53. KekikStream/Plugins/SuperFilmGeldi.py +25 -44
  54. KekikStream/Plugins/UgurFilm.py +23 -48
  55. KekikStream/Plugins/YabanciDizi.py +285 -0
  56. {kekikstream-2.2.9.dist-info → kekikstream-2.3.9.dist-info}/METADATA +1 -1
  57. kekikstream-2.3.9.dist-info/RECORD +84 -0
  58. kekikstream-2.2.9.dist-info/RECORD +0 -82
  59. {kekikstream-2.2.9.dist-info → kekikstream-2.3.9.dist-info}/WHEEL +0 -0
  60. {kekikstream-2.2.9.dist-info → kekikstream-2.3.9.dist-info}/entry_points.txt +0 -0
  61. {kekikstream-2.2.9.dist-info → kekikstream-2.3.9.dist-info}/licenses/LICENSE +0 -0
  62. {kekikstream-2.2.9.dist-info → kekikstream-2.3.9.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, Subtitle, ExtractResult
4
- from selectolax.parser import HTMLParser
5
- import re
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, Subtitle, ExtractResult, HTMLHelper
6
4
 
7
5
  class DiziYou(PluginBase):
8
6
  name = "DiziYou"
@@ -31,16 +29,13 @@ class DiziYou(PluginBase):
31
29
 
32
30
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
33
31
  istek = await self.httpx.get(f"{url.replace('SAYFA', str(page))}")
34
- secici = HTMLParser(istek.text)
32
+ secici = HTMLHelper(istek.text)
35
33
 
36
34
  results = []
37
- for veri in secici.css("div.single-item"):
38
- title_el = veri.css_first("div#categorytitle a")
39
- img_el = veri.css_first("img")
40
-
41
- title = title_el.text(strip=True) if title_el else None
42
- href = title_el.attrs.get("href") if title_el else None
43
- poster = img_el.attrs.get("src") if img_el else None
35
+ for veri in secici.select("div.single-item"):
36
+ title = secici.select_text("div#categorytitle a", veri)
37
+ href = secici.select_attr("div#categorytitle a", "href", veri)
38
+ poster = secici.select_attr("img", "src", veri)
44
39
 
45
40
  if title and href:
46
41
  results.append(MainPageResult(
@@ -54,16 +49,13 @@ class DiziYou(PluginBase):
54
49
 
55
50
  async def search(self, query: str) -> list[SearchResult]:
56
51
  istek = await self.httpx.get(f"{self.main_url}/?s={query}")
57
- secici = HTMLParser(istek.text)
52
+ secici = HTMLHelper(istek.text)
58
53
 
59
54
  results = []
60
- for afis in secici.css("div.incontent div#list-series"):
61
- title_el = afis.css_first("div#categorytitle a")
62
- img_el = afis.css_first("img")
63
-
64
- title = title_el.text(strip=True) if title_el else None
65
- href = title_el.attrs.get("href") if title_el else None
66
- poster = (img_el.attrs.get("src") or img_el.attrs.get("data-src")) if img_el else None
55
+ for afis in secici.select("div.incontent div#list-series"):
56
+ title = secici.select_text("div#categorytitle a", afis)
57
+ href = secici.select_attr("div#categorytitle a", "href", afis)
58
+ poster = (secici.select_attr("img", "src", afis) or secici.select_attr("img", "data-src", afis))
67
59
 
68
60
  if title and href:
69
61
  results.append(SearchResult(
@@ -76,80 +68,88 @@ class DiziYou(PluginBase):
76
68
 
77
69
  async def load_item(self, url: str) -> SeriesInfo:
78
70
  istek = await self.httpx.get(url)
79
- secici = HTMLParser(istek.text)
71
+ secici = HTMLHelper(istek.text)
80
72
  html_text = istek.text
81
73
 
82
74
  # Title - div.title h1 içinde
83
- title_el = secici.css_first("div.title h1")
84
- title = title_el.text(strip=True) if title_el else ""
85
-
75
+ title = (secici.select_text("div.title h1") or "").strip()
76
+
86
77
  # Fallback: Eğer title boşsa URL'den çıkar (telif kısıtlaması olan sayfalar için)
87
78
  if not title:
88
79
  # URL'den slug'ı al: https://www.diziyou.one/jasmine/ -> jasmine -> Jasmine
89
80
  slug = url.rstrip('/').split('/')[-1]
90
81
  title = slug.replace('-', ' ').title()
91
-
82
+
92
83
  # Poster
93
- poster_el = secici.css_first("div.category_image img")
94
- poster = self.fix_url(poster_el.attrs.get("src")) if poster_el else ""
84
+ poster_src = secici.select_attr("div.category_image img", "src") or secici.select_attr("meta[property='og:image']", "content")
85
+ poster = self.fix_url(poster_src) if poster_src else ""
95
86
 
96
87
  # Year - regex ile çıkarma (xpath yerine)
97
- year = None
98
- year_match = re.search(r"Yapım Yılı.*?(\d{4})", html_text, re.DOTALL | re.IGNORECASE)
99
- if year_match:
100
- year = year_match.group(1)
88
+ year = secici.regex_first(r"(?is)Yapım Yılı.*?(\d{4})", secici.html)
101
89
 
102
- desc_el = secici.css_first("div.diziyou_desc")
103
- description = desc_el.text(strip=True) if desc_el else None
90
+ description_el = secici.select("div.diziyou_desc") or secici.select("div#icerikcat")
91
+ description = ""
92
+ if description_el:
93
+ # Scriptleri temizle
94
+ for script in secici.select("script", description_el[0]):
95
+ script.decompose()
96
+ description = secici.select_text(None, description_el[0])
104
97
 
105
- tags = [a.text(strip=True) for a in secici.css("div.genres a") if a.text(strip=True)]
98
+ tags = [secici.select_text(None, a) for a in secici.select("div.genres a") if secici.select_text(None, a)]
106
99
 
107
- # Rating - regex ile
108
- rating = None
109
- rating_match = re.search(r"IMDB.*?([0-9.]+)", html_text, re.DOTALL | re.IGNORECASE)
110
- if rating_match:
111
- rating = rating_match.group(1)
100
+ # Rating - daha spesifik regex ile
101
+ rating = secici.regex_first(r"(?is)IMDB\s*:\s*</span>([0-9.]+)", secici.html)
112
102
 
113
103
  # Actors - regex ile
114
- actors = []
115
- actors_match = re.search(r"Oyuncular.*?</span>([^<]+)", html_text, re.DOTALL | re.IGNORECASE)
116
- if actors_match:
117
- actors = [actor.strip() for actor in actors_match.group(1).split(",") if actor.strip()]
104
+ actors_raw = secici.regex_first(r"(?is)Oyuncular.*?</span>([^<]+)", secici.html)
105
+ actors = [actor.strip() for actor in actors_raw.split(",") if actor.strip()] if actors_raw else []
118
106
 
119
107
  episodes = []
120
- # Episodes - bolumust div içeren a linklerini bul
121
- for link in secici.css("a"):
122
- bolumust = link.css_first("div.bolumust")
123
- if not bolumust:
124
- continue
125
-
126
- baslik_el = link.css_first("div.baslik")
127
- if not baslik_el:
128
- continue
129
-
130
- ep_name = baslik_el.text(strip=True)
131
- ep_href = link.attrs.get("href")
108
+ # Episodes - div#scrollbar-container a (kısıtlı alan)
109
+ for link in secici.select("div#scrollbar-container a"):
110
+ ep_href = secici.select_attr(None, "href", link)
132
111
  if not ep_href:
133
112
  continue
134
113
 
135
- # Bölüm ismi varsa al
136
- bolumismi_el = link.css_first("div.bolumismi")
137
- ep_name_clean = bolumismi_el.text(strip=True).replace("(", "").replace(")", "").strip() if bolumismi_el else ep_name
138
-
139
- ep_episode_match = re.search(r"(\d+)\. Bölüm", ep_name)
140
- ep_season_match = re.search(r"(\d+)\. Sezon", ep_name)
141
-
142
- ep_episode = ep_episode_match.group(1) if ep_episode_match else None
143
- ep_season = ep_season_match.group(1) if ep_season_match else None
144
-
145
- if ep_episode and ep_season:
146
- episode = Episode(
147
- season = ep_season,
148
- episode = ep_episode,
149
- title = ep_name_clean,
114
+ # Link metni veya alt başlık al
115
+ ep_name = (secici.select_text(None, link) or "").strip()
116
+ title_child = secici.select_text("div.baslik", link) or secici.select_text("div.bolumismi", link)
117
+ if title_child:
118
+ ep_name = title_child
119
+
120
+ # Önce metin üzerinden sezon/bölüm çıkart
121
+ s_val, e_val = HTMLHelper.extract_season_episode(ep_name)
122
+
123
+ # URL bazlı kalıplar: -1-sezon-2-bolum gibi
124
+ if not (s_val or e_val):
125
+ pairs = HTMLHelper(ep_href).regex_all(r"-(\d+)-sezon-(\d+)-bolum")
126
+ if pairs:
127
+ s_val, e_val = int(pairs[0][0]), int(pairs[0][1])
128
+ else:
129
+ pairs = HTMLHelper(ep_href).regex_all(r"(\d+)-sezon-(\d+)-bolum")
130
+ if pairs:
131
+ s_val, e_val = int(pairs[0][0]), int(pairs[0][1])
132
+ else:
133
+ e_val_str = HTMLHelper(ep_href).regex_first(r"(\d+)-bolum")
134
+ if e_val_str:
135
+ e_val = int(e_val_str)
136
+ # Metin üzerinden son bir deneme
137
+ if not e_val:
138
+ e_str = HTMLHelper(ep_name).regex_first(r"(\d+)\s*\.\s*[Bb]ölüm")
139
+ if e_str:
140
+ e_val = int(e_str)
141
+ if not s_val:
142
+ s_str = HTMLHelper(ep_name).regex_first(r"(\d+)\s*\.\s*[Ss]ezon")
143
+ if s_str:
144
+ s_val = int(s_str)
145
+
146
+ if e_val or HTMLHelper(ep_href).regex_first(r"-\d+-sezon-\d+-bolum"):
147
+ episodes.append(Episode(
148
+ season = s_val,
149
+ episode = e_val,
150
+ title = ep_name if ep_name else None,
150
151
  url = self.fix_url(ep_href),
151
- )
152
- episodes.append(episode)
152
+ ))
153
153
 
154
154
  return SeriesInfo(
155
155
  url = url,
@@ -165,29 +165,42 @@ class DiziYou(PluginBase):
165
165
 
166
166
  async def load_links(self, url: str) -> list[ExtractResult]:
167
167
  istek = await self.httpx.get(url)
168
- secici = HTMLParser(istek.text)
168
+ secici = HTMLHelper(istek.text)
169
169
 
170
170
  # Title ve episode name - None kontrolü ekle
171
- title_el = secici.css_first("div.title h1")
172
- item_title = title_el.text(strip=True) if title_el else ""
173
-
174
- ep_name_el = secici.css_first("div#bolum-ismi")
175
- ep_name = ep_name_el.text(strip=True) if ep_name_el else ""
171
+ item_title = secici.select_text("div.title h1")
172
+ ep_name = secici.select_text("div#bolum-ismi")
176
173
 
177
174
  # Player src'den item_id çıkar
178
- player_el = secici.css_first("iframe#diziyouPlayer")
179
- player_src = player_el.attrs.get("src") if player_el else None
175
+ # Player src'den item_id çıkar - önce özel player seçicisini dene
176
+ player_src = None
177
+ # Yaygın locatorlar
178
+ for sel in ["iframe#diziyouPlayer", "div.player iframe", "iframe[src*='/player/']", "iframe[src*='/episodes/']", "iframe"]:
179
+ p = secici.select_attr(sel, "src")
180
+ if p and any(x in p.lower() for x in ["/player/", "/episodes/", "diziyou"]):
181
+ player_src = p
182
+ break
183
+
184
+ # Eğer hâlâ bulunamadıysa, varsa bir bölüm sayfasına git ve oradan player'ı çek
185
+ if not player_src:
186
+ for a in secici.select("a"):
187
+ href = secici.select_attr("a", "href", a)
188
+ if not href:
189
+ continue
190
+ if HTMLHelper(href).regex_first(r"(-\d+-sezon-\d+-bolum|/bolum|/episode|/episodes|/play)"):
191
+ break
192
+
180
193
  if not player_src:
181
194
  return [] # Player bulunamadıysa boş liste döndür
182
-
195
+
183
196
  item_id = player_src.split("/")[-1].replace(".html", "")
184
197
 
185
198
  subtitles = []
186
199
  stream_urls = []
187
200
 
188
- for secenek in secici.css("span.diziyouOption"):
189
- opt_id = secenek.attrs.get("id")
190
- op_name = secenek.text(strip=True)
201
+ for secenek in secici.select("span.diziyouOption"):
202
+ opt_id = secici.select_attr("span.diziyouOption", "id", secenek)
203
+ op_name = secici.select_text("span.diziyouOption", secenek)
191
204
 
192
205
  match opt_id:
193
206
  case "turkceAltyazili":
@@ -1,7 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult
4
- from selectolax.parser import HTMLParser
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
5
4
  from json import loads
6
5
  from urllib.parse import urlparse, urlunparse
7
6
  from Crypto.Cipher import AES
@@ -45,51 +44,32 @@ class Dizilla(PluginBase):
45
44
  category = category,
46
45
  title = veri.get("original_title"),
47
46
  url = self.fix_url(f"{self.main_url}/{veri.get('used_slug')}"),
48
- poster = self.fix_url(veri.get("object_poster_url")),
47
+ poster = self.fix_poster_url(self.fix_url(veri.get("object_poster_url"))),
49
48
  )
50
49
  for veri in veriler
51
50
  ])
52
51
  else:
53
52
  istek = await self.httpx.get(url.replace("SAYFA", str(page)))
54
- secici = HTMLParser(istek.text)
53
+ secici = HTMLHelper(istek.text)
55
54
 
56
- for veri in secici.css("div.tab-content > div.grid a"):
57
- h2_el = veri.css_first("h2")
58
- name = h2_el.text(strip=True) if h2_el else None
59
-
60
- # opacity-80 div'den episode bilgisi - normalize-space yerine doğrudan text
61
- opacity_el = veri.css_first("div[class*='opacity-80']")
62
- ep_name = opacity_el.text(strip=True) if opacity_el else None
63
- if not ep_name:
55
+ # Genel olarak dizi sayfalarına giden linkleri al
56
+ for veri in secici.select('a[href*="/dizi/"]'):
57
+ href = secici.select_attr('a', 'href', veri)
58
+ title = secici.select_text(None, veri)
59
+ if not href or not title:
64
60
  continue
65
61
 
66
- ep_name = ep_name.replace(". Sezon", "x").replace(". Bölüm", "").replace("x ", "x")
67
- title = f"{name} - {ep_name}"
68
-
69
- href = veri.attrs.get("href")
70
- ep_req = await self.httpx.get(self.fix_url(href))
71
- ep_secici = HTMLParser(ep_req.text)
72
-
73
- # nav li'leri alıp 3. elemana erişme (nth-of-type yerine)
74
- nav_lis = ep_secici.css("nav li")
75
- if len(nav_lis) >= 3:
76
- link_el = nav_lis[2].css_first("a")
77
- href = link_el.attrs.get("href") if link_el else None
78
- else:
79
- href = None
80
-
81
- poster_el = ep_secici.css_first("img.imgt")
82
- poster = poster_el.attrs.get("src") if poster_el else None
83
-
84
- if href:
85
- ana_sayfa.append(
86
- MainPageResult(
87
- category = category,
88
- title = title,
89
- url = self.fix_url(href),
90
- poster = self.fix_url(poster) if poster else None
91
- )
92
- )
62
+ # Detay sayfasından poster vb. bilgileri al
63
+ ep_req = await self.httpx.get(self.fix_url(href))
64
+ ep_secici = HTMLHelper(ep_req.text)
65
+ poster = ep_secici.select_attr('img.imgt', 'src') or ep_secici.select_attr('img', 'src')
66
+
67
+ ana_sayfa.append(MainPageResult(
68
+ category = category,
69
+ title = title,
70
+ url = self.fix_url(href),
71
+ poster = self.fix_url(poster) if poster else None
72
+ ))
93
73
 
94
74
  return ana_sayfa
95
75
 
@@ -114,6 +94,21 @@ class Dizilla(PluginBase):
114
94
  # JSON decode
115
95
  return loads(decrypted.decode("utf-8"))
116
96
 
97
+ def fix_poster_url(self, url: str) -> str:
98
+ """AMP CDN URL'lerini düzelt."""
99
+ if not url:
100
+ return url
101
+ # AMP CDN URL'lerini orijinal URL'ye çevir
102
+ # https://images-macellan-online.cdn.ampproject.org/i/s/images.macellan.online/...
103
+ # -> https://images.macellan.online/...
104
+ if "cdn.ampproject.org" in url:
105
+ # /i/s/ veya /ii/s/ gibi AMP prefix'lerinden sonraki kısmı al
106
+ helper = HTMLHelper(url)
107
+ match = helper.regex_first(r"cdn\.ampproject\.org/[^/]+/s/(.+)$")
108
+ if match:
109
+ return f"https://{match}"
110
+ return url
111
+
117
112
  async def search(self, query: str) -> list[SearchResult]:
118
113
  arama_istek = await self.httpx.post(f"{self.main_url}/api/bg/searchcontent?searchterm={query}")
119
114
  decrypted = await self.decrypt_response(arama_istek.json().get("response"))
@@ -123,7 +118,7 @@ class Dizilla(PluginBase):
123
118
  SearchResult(
124
119
  title = veri.get("object_name"),
125
120
  url = self.fix_url(f"{self.main_url}/{veri.get('used_slug')}"),
126
- poster = self.fix_url(veri.get("object_poster_url")),
121
+ poster = self.fix_poster_url(self.fix_url(veri.get("object_poster_url"))),
127
122
  )
128
123
  for veri in arama_veri
129
124
  ]
@@ -140,72 +135,62 @@ class Dizilla(PluginBase):
140
135
 
141
136
  async def load_item(self, url: str) -> SeriesInfo:
142
137
  istek = await self.httpx.get(url)
143
- secici = HTMLParser(istek.text)
138
+ secici = HTMLHelper(istek.text)
144
139
 
145
- title = secici.css_first("div.poster.poster h2")
146
- title = title.text(strip=True) if title else None
147
- if not title:
140
+ next_data_text = secici.select_text("script#__NEXT_DATA__")
141
+ if not next_data_text:
148
142
  return None
149
143
 
150
- poster_el = secici.css_first("div.w-full.page-top.relative img")
151
- poster = self.fix_url(poster_el.attrs.get("src")) if poster_el else None
152
-
153
- # Year extraction (Kotlin: [1] index for w-fit min-w-fit)
154
- info_boxes = secici.css("div.w-fit.min-w-fit")
155
- year = None
156
- if len(info_boxes) > 1:
157
- year_el = info_boxes[1].css_first("span.text-sm.opacity-60")
158
- if year_el:
159
- year_text = year_el.text(strip=True)
160
- year = year_text.split(" ")[-1] if " " in year_text else year_text
161
-
162
- description_el = secici.css_first("div.mt-2.text-sm")
163
- description = description_el.text(strip=True) if description_el else None
164
-
165
- tags_el = secici.css_first("div.poster.poster h3")
166
- tags = [t.strip() for t in tags_el.text(strip=True).split(",")] if tags_el else []
167
-
168
- actors = [h5.text(strip=True) for h5 in secici.css("div.global-box h5")]
169
-
170
- episodeses = []
171
- # Seasons links iteration
172
- season_links = secici.css("div.flex.items-center.flex-wrap.gap-2.mb-4 a")
173
- for sezon in season_links:
174
- sezon_href = self.fix_url(sezon.attrs.get("href"))
175
- sezon_req = await self.httpx.get(sezon_href)
176
-
177
- season_num = None
178
- try:
179
- # URL'den sezon numarasını çek: ...-sezon-X
180
- season_match = re.search(r"sezon-(\d+)", sezon_href)
181
- if season_match:
182
- season_num = int(season_match.group(1))
183
- except:
184
- pass
185
-
186
- sezon_secici = HTMLParser(sezon_req.text)
187
- for bolum in sezon_secici.css("div.episodes div.cursor-pointer"):
188
- # Kotlin: bolum.select("a").last()
189
- links = bolum.css("a")
190
- if not links:
191
- continue
192
-
193
- ep_link = links[-1]
194
- ep_name = ep_link.text(strip=True)
195
- ep_href = self.fix_url(ep_link.attrs.get("href"))
144
+ next_data = loads(next_data_text)
145
+ secure_data = next_data.get("props", {}).get("pageProps", {}).get("secureData")
146
+ if not secure_data:
147
+ return None
148
+
149
+ decrypted = await self.decrypt_response(secure_data)
150
+ content = decrypted.get("contentItem", {})
151
+ if not content:
152
+ return None
153
+
154
+ title = content.get("original_title") or content.get("used_title")
155
+ description = content.get("description") or content.get("used_description")
156
+ rating = content.get("imdb_point") or content.get("local_vote_avg")
157
+ year = content.get("release_year")
158
+
159
+ # Poster and Backdrop - prefer backdrop if available for SeriesInfo
160
+ poster = self.fix_poster_url(self.fix_url(content.get("back_url") or content.get("poster_url")))
161
+
162
+ # Tags
163
+ tags = []
164
+ categories = decrypted.get("RelatedResults", {}).get("getSerieCategoriesById", {}).get("result", [])
165
+ for cat in categories:
166
+ tags.append(cat.get("name"))
167
+
168
+ # Actors
169
+ actors = []
170
+ casts = decrypted.get("RelatedResults", {}).get("getSerieCastsById", {}).get("result", [])
171
+ for cast in casts:
172
+ actors.append(cast.get("name"))
173
+
174
+ # Episodes
175
+ episodes = []
176
+ seasons_data = decrypted.get("RelatedResults", {}).get("getSerieSeasonAndEpisodes", {}).get("result", [])
177
+ for season_item in seasons_data:
178
+ season_num = season_item.get("season_no")
179
+ for ep_item in season_item.get("episodes", []):
180
+ ep_num = ep_item.get("episode_no")
181
+ ep_slug = ep_item.get("used_slug")
182
+ ep_name = ep_item.get("episode_text") or ""
196
183
 
197
- # Episode number (first link's text usually)
198
- ep_num = None
199
- try:
200
- ep_num = int(links[0].text(strip=True))
201
- except:
202
- pass
203
-
204
- episodeses.append(Episode(
184
+ # Filter out duplicate language entries if any (we just need one link per episode)
185
+ # Usually they share the same slug for the episode page
186
+ if any(e.season == season_num and e.episode == ep_num for e in episodes):
187
+ continue
188
+
189
+ episodes.append(Episode(
205
190
  season = season_num,
206
191
  episode = ep_num,
207
192
  title = ep_name,
208
- url = ep_href
193
+ url = self.fix_url(f"{self.main_url}/{ep_slug}")
209
194
  ))
210
195
 
211
196
  return SeriesInfo(
@@ -214,20 +199,21 @@ class Dizilla(PluginBase):
214
199
  title = title,
215
200
  description = description,
216
201
  tags = tags,
202
+ rating = str(rating) if rating else None,
217
203
  year = str(year) if year else None,
218
- episodes = episodeses,
204
+ episodes = episodes,
219
205
  actors = actors
220
206
  )
221
207
 
222
208
  async def load_links(self, url: str) -> list[ExtractResult]:
223
209
  istek = await self.httpx.get(url)
224
- secici = HTMLParser(istek.text)
210
+ secici = HTMLHelper(istek.text)
225
211
 
226
- next_data_el = secici.css_first("script#__NEXT_DATA__")
227
- if not next_data_el:
212
+ next_data_text = secici.select_text("script#__NEXT_DATA__")
213
+ if not next_data_text:
228
214
  return []
229
215
 
230
- next_data = loads(next_data_el.text(strip=True))
216
+ next_data = loads(next_data_text)
231
217
  secure_data = next_data.get("props", {}).get("pageProps", {}).get("secureData", {})
232
218
  decrypted = await self.decrypt_response(secure_data)
233
219
  results = decrypted.get("RelatedResults", {}).get("getEpisodeSources", {}).get("result", [])
@@ -243,8 +229,8 @@ class Dizilla(PluginBase):
243
229
  cleaned_source = source_content.replace('"', '').replace('\\', '')
244
230
 
245
231
  # Parse cleaned HTML
246
- iframe_el = HTMLParser(cleaned_source).css_first("iframe")
247
- iframe_src = iframe_el.attrs.get("src") if iframe_el else None
232
+ iframe_secici = HTMLHelper(cleaned_source)
233
+ iframe_src = iframe_secici.select_attr("iframe", "src")
248
234
 
249
235
  # Referer check (matching Kotlin: loadExtractor(iframe, "${mainUrl}/", ...))
250
236
  iframe_url = self.fix_url(iframe_src) if iframe_src else None
@@ -253,4 +239,6 @@ class Dizilla(PluginBase):
253
239
  return []
254
240
 
255
241
  data = await self.extract(iframe_url, referer=f"{self.main_url}/", prefix=first_result.get('language_name', 'Unknown'))
256
- return [data] if data else []
242
+ if not data:
243
+ return []
244
+ return data if isinstance(data, list) else [data]