KekikStream 1.7.1__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. KekikStream/Core/Extractor/ExtractorBase.py +20 -9
  2. KekikStream/Core/Extractor/ExtractorLoader.py +25 -17
  3. KekikStream/Core/Extractor/ExtractorManager.py +53 -9
  4. KekikStream/Core/Extractor/ExtractorModels.py +5 -7
  5. KekikStream/Core/Extractor/YTDLPCache.py +35 -0
  6. KekikStream/Core/Media/MediaHandler.py +44 -26
  7. KekikStream/Core/Media/MediaManager.py +0 -3
  8. KekikStream/Core/Plugin/PluginBase.py +82 -22
  9. KekikStream/Core/Plugin/PluginLoader.py +11 -7
  10. KekikStream/Core/Plugin/PluginModels.py +25 -26
  11. KekikStream/Core/__init__.py +1 -0
  12. KekikStream/Extractors/CloseLoad.py +21 -7
  13. KekikStream/Extractors/ContentX.py +21 -6
  14. KekikStream/Extractors/DonilasPlay.py +86 -0
  15. KekikStream/Extractors/DzenRu.py +38 -0
  16. KekikStream/Extractors/ExPlay.py +53 -0
  17. KekikStream/Extractors/Filemoon.py +78 -0
  18. KekikStream/Extractors/HDPlayerSystem.py +41 -0
  19. KekikStream/Extractors/JetTv.py +45 -0
  20. KekikStream/Extractors/MailRu.py +3 -4
  21. KekikStream/Extractors/MixPlayHD.py +2 -3
  22. KekikStream/Extractors/MixTiger.py +57 -0
  23. KekikStream/Extractors/MolyStream.py +5 -5
  24. KekikStream/Extractors/Odnoklassniki.py +13 -7
  25. KekikStream/Extractors/PeaceMakerst.py +10 -5
  26. KekikStream/Extractors/PixelDrain.py +1 -2
  27. KekikStream/Extractors/PlayerFilmIzle.py +65 -0
  28. KekikStream/Extractors/RapidVid.py +23 -8
  29. KekikStream/Extractors/SetPlay.py +66 -0
  30. KekikStream/Extractors/SetPrime.py +45 -0
  31. KekikStream/Extractors/SibNet.py +2 -3
  32. KekikStream/Extractors/Sobreatsesuyp.py +4 -5
  33. KekikStream/Extractors/TRsTX.py +4 -5
  34. KekikStream/Extractors/TauVideo.py +2 -3
  35. KekikStream/Extractors/TurboImgz.py +2 -3
  36. KekikStream/Extractors/TurkeyPlayer.py +34 -0
  37. KekikStream/Extractors/VCTPlay.py +41 -0
  38. KekikStream/Extractors/VidHide.py +81 -0
  39. KekikStream/Extractors/VidMoly.py +55 -34
  40. KekikStream/Extractors/VidMoxy.py +2 -3
  41. KekikStream/Extractors/VidPapi.py +89 -0
  42. KekikStream/Extractors/VideoSeyred.py +3 -4
  43. KekikStream/Extractors/YTDLP.py +211 -0
  44. KekikStream/Extractors/YildizKisaFilm.py +41 -0
  45. KekikStream/Plugins/BelgeselX.py +196 -0
  46. KekikStream/Plugins/DiziBox.py +25 -34
  47. KekikStream/Plugins/DiziPal.py +24 -35
  48. KekikStream/Plugins/DiziYou.py +54 -37
  49. KekikStream/Plugins/Dizilla.py +66 -46
  50. KekikStream/Plugins/FilmBip.py +142 -0
  51. KekikStream/Plugins/FilmMakinesi.py +36 -28
  52. KekikStream/Plugins/FilmModu.py +20 -24
  53. KekikStream/Plugins/FullHDFilm.py +220 -0
  54. KekikStream/Plugins/FullHDFilmizlesene.py +9 -15
  55. KekikStream/Plugins/HDFilmCehennemi.py +141 -69
  56. KekikStream/Plugins/JetFilmizle.py +85 -52
  57. KekikStream/Plugins/KultFilmler.py +217 -0
  58. KekikStream/Plugins/RecTV.py +22 -34
  59. KekikStream/Plugins/RoketDizi.py +222 -0
  60. KekikStream/Plugins/SelcukFlix.py +328 -0
  61. KekikStream/Plugins/SetFilmIzle.py +252 -0
  62. KekikStream/Plugins/SezonlukDizi.py +54 -21
  63. KekikStream/Plugins/SineWix.py +17 -29
  64. KekikStream/Plugins/Sinefy.py +241 -0
  65. KekikStream/Plugins/SinemaCX.py +154 -0
  66. KekikStream/Plugins/Sinezy.py +143 -0
  67. KekikStream/Plugins/SuperFilmGeldi.py +130 -0
  68. KekikStream/Plugins/UgurFilm.py +13 -19
  69. KekikStream/__init__.py +47 -56
  70. KekikStream/requirements.txt +3 -4
  71. kekikstream-2.2.0.dist-info/METADATA +312 -0
  72. kekikstream-2.2.0.dist-info/RECORD +81 -0
  73. KekikStream/Extractors/FourCX.py +0 -7
  74. KekikStream/Extractors/FourPichive.py +0 -7
  75. KekikStream/Extractors/FourPlayRu.py +0 -7
  76. KekikStream/Extractors/HDStreamAble.py +0 -7
  77. KekikStream/Extractors/Hotlinger.py +0 -7
  78. KekikStream/Extractors/OkRuHTTP.py +0 -7
  79. KekikStream/Extractors/OkRuSSL.py +0 -7
  80. KekikStream/Extractors/Pichive.py +0 -7
  81. KekikStream/Extractors/PlayRu.py +0 -7
  82. KekikStream/Extractors/VidMolyMe.py +0 -7
  83. kekikstream-1.7.1.dist-info/METADATA +0 -109
  84. kekikstream-1.7.1.dist-info/RECORD +0 -63
  85. {kekikstream-1.7.1.dist-info → kekikstream-2.2.0.dist-info}/WHEEL +0 -0
  86. {kekikstream-1.7.1.dist-info → kekikstream-2.2.0.dist-info}/entry_points.txt +0 -0
  87. {kekikstream-1.7.1.dist-info → kekikstream-2.2.0.dist-info}/licenses/LICENSE +0 -0
  88. {kekikstream-1.7.1.dist-info → kekikstream-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,222 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, MovieInfo
4
+ from parsel import Selector
5
+ import re, base64, json
6
+
7
+ class RoketDizi(PluginBase):
8
+ name = "RoketDizi"
9
+ lang = "tr"
10
+ main_url = "https://roketdizi.to"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Türkiye'nin en tatlış yabancı dizi izleme sitesi. Türkçe dublaj, altyazılı, eski ve yeni yabancı dizilerin yanı sıra kore (asya) dizileri izleyebilirsiniz."
13
+
14
+ main_page = {
15
+ f"{main_url}/dizi/tur/aksiyon" : "Aksiyon",
16
+ f"{main_url}/dizi/tur/bilim-kurgu" : "Bilim Kurgu",
17
+ f"{main_url}/dizi/tur/gerilim" : "Gerilim",
18
+ f"{main_url}/dizi/tur/fantastik" : "Fantastik",
19
+ f"{main_url}/dizi/tur/komedi" : "Komedi",
20
+ f"{main_url}/dizi/tur/korku" : "Korku",
21
+ f"{main_url}/dizi/tur/macera" : "Macera",
22
+ f"{main_url}/dizi/tur/suc" : "Suç"
23
+ }
24
+
25
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
26
+ istek = await self.httpx.get(f"{url}?&page={page}")
27
+ secici = Selector(istek.text)
28
+
29
+ results = []
30
+
31
+ for item in secici.css("div.w-full.p-4 span.bg-\\[\\#232323\\]"):
32
+ title = item.css("span.font-normal.line-clamp-1::text").get()
33
+ href = item.css("a::attr(href)").get()
34
+ poster = item.css("img::attr(src)").get()
35
+
36
+ if title and href:
37
+ results.append(MainPageResult(
38
+ category = category,
39
+ title = self.clean_title(title),
40
+ url = self.fix_url(href),
41
+ poster = self.fix_url(poster)
42
+ ))
43
+
44
+ return results
45
+
46
+ async def search(self, query: str) -> list[SearchResult]:
47
+ istek = await self.httpx.post(
48
+ url = f"{self.main_url}/api/bg/searchContent?searchterm={query}",
49
+ headers = {
50
+ "Accept" : "application/json, text/javascript, */*; q=0.01",
51
+ "X-Requested-With" : "XMLHttpRequest",
52
+ "Referer" : f"{self.main_url}/",
53
+ }
54
+ )
55
+
56
+ try:
57
+ veri = istek.json()
58
+ encoded = veri.get("response", "")
59
+ if not encoded:
60
+ return []
61
+
62
+ decoded = base64.b64decode(encoded).decode("utf-8")
63
+ veri = json.loads(decoded)
64
+
65
+ if not veri.get("state"):
66
+ return []
67
+
68
+ results = []
69
+
70
+ for item in veri.get("result", []):
71
+ title = item.get("object_name", "")
72
+ slug = item.get("used_slug", "")
73
+ poster = item.get("object_poster_url", "")
74
+
75
+ if title and slug:
76
+ results.append(SearchResult(
77
+ title = self.clean_title(title.strip()),
78
+ url = self.fix_url(f"{self.main_url}/{slug}"),
79
+ poster = self.fix_url(poster) if poster else None
80
+ ))
81
+
82
+ return results
83
+
84
+ except Exception:
85
+ return []
86
+
87
+ async def load_item(self, url: str) -> SeriesInfo:
88
+ # Note: Handling both Movie and Series logic in one, returning SeriesInfo generally or MovieInfo
89
+ resp = await self.httpx.get(url)
90
+ sel = Selector(resp.text)
91
+
92
+ title = sel.css("h1.text-white::text").get()
93
+ poster = sel.css("div.w-full.page-top img::attr(src)").get()
94
+ description = sel.css("div.mt-2.text-sm::text").get()
95
+
96
+ # Tags - genre bilgileri (Detaylar bölümünde)
97
+ tags = []
98
+ genre_text = sel.css("h3.text-white.opacity-90::text").get()
99
+ if genre_text:
100
+ tags = [t.strip() for t in genre_text.split(",")]
101
+
102
+ # Rating
103
+ rating = sel.css("span.text-white.text-sm.font-bold::text").get()
104
+
105
+ # Year ve Actors - Detaylar (Details) bölümünden
106
+ year = None
107
+ actors = []
108
+
109
+ # Detaylar bölümündeki tüm flex-col div'leri al
110
+ detail_items = sel.css("div.flex.flex-col")
111
+ for item in detail_items:
112
+ # Label ve value yapısı: span.text-base ve span.text-sm.opacity-90
113
+ label = item.css("span.text-base::text").get()
114
+ value = item.css("span.text-sm.opacity-90::text").get()
115
+
116
+ if label and value:
117
+ label = label.strip()
118
+ value = value.strip()
119
+
120
+ # Yayın tarihi (yıl)
121
+ if label == "Yayın tarihi":
122
+ # "16 Ekim 2018" formatından yılı çıkar
123
+ year_match = re.search(r'\d{4}', value)
124
+ if year_match:
125
+ year = year_match.group()
126
+
127
+ # Yaratıcılar veya Oyuncular
128
+ elif label in ["Yaratıcılar", "Oyuncular"]:
129
+ if value:
130
+ actors.append(value)
131
+
132
+ # Check urls for episodes
133
+ all_urls = re.findall(r'"url":"([^"]*)"', resp.text)
134
+ is_series = any("bolum-" in u for u in all_urls)
135
+
136
+ episodes = []
137
+ if is_series:
138
+ # Dict kullanarak duplicate'leri önle ama sıralı tut
139
+ episodes_dict = {}
140
+ for u in all_urls:
141
+ if "bolum" in u and u not in episodes_dict:
142
+ season_match = re.search(r'/sezon-(\d+)', u)
143
+ ep_match = re.search(r'/bolum-(\d+)', u)
144
+
145
+ season = int(season_match.group(1)) if season_match else 1
146
+ episode_num = int(ep_match.group(1)) if ep_match else 1
147
+
148
+ # Key olarak (season, episode) tuple kullan
149
+ key = (season, episode_num)
150
+ episodes_dict[key] = Episode(
151
+ season = season,
152
+ episode = episode_num,
153
+ title = f"{season}. Sezon {episode_num}. Bölüm",
154
+ url = self.fix_url(u)
155
+ )
156
+
157
+ # Sıralı liste oluştur
158
+ episodes = [episodes_dict[key] for key in sorted(episodes_dict.keys())]
159
+
160
+ return SeriesInfo(
161
+ title = title,
162
+ url = url,
163
+ poster = self.fix_url(poster),
164
+ description = description,
165
+ tags = tags,
166
+ rating = rating,
167
+ actors = actors,
168
+ episodes = episodes,
169
+ year = year
170
+ )
171
+
172
+ async def load_links(self, url: str) -> list[ExtractResult]:
173
+ resp = await self.httpx.get(url)
174
+ sel = Selector(resp.text)
175
+
176
+ next_data = sel.css("script#__NEXT_DATA__::text").get()
177
+ if not next_data:
178
+ return []
179
+
180
+ try:
181
+ data = json.loads(next_data)
182
+ secure_data = data["props"]["pageProps"]["secureData"]
183
+ decoded_json = json.loads(base64.b64decode(secure_data).decode('utf-8'))
184
+
185
+ # secureData içindeki RelatedResults -> getEpisodeSources -> result dizisini al
186
+ sources = decoded_json.get("RelatedResults", {}).get("getEpisodeSources", {}).get("result", [])
187
+
188
+ seen_urls = set()
189
+ results = []
190
+ for source in sources:
191
+ source_content = source.get("source_content", "")
192
+
193
+ # iframe URL'ini source_content'ten çıkar
194
+ iframe_match = re.search(r'<iframe[^>]*src=["\']([^"\']*)["\']', source_content)
195
+ if not iframe_match:
196
+ continue
197
+
198
+ iframe_url = iframe_match.group(1)
199
+
200
+ # Fix URL protocol
201
+ if not iframe_url.startswith("http"):
202
+ if iframe_url.startswith("//"):
203
+ iframe_url = "https:" + iframe_url
204
+ else:
205
+ iframe_url = "https://" + iframe_url
206
+
207
+ iframe_url = self.fix_url(iframe_url)
208
+
209
+ # Deduplicate
210
+ if iframe_url in seen_urls:
211
+ continue
212
+ seen_urls.add(iframe_url)
213
+
214
+ # Extract with helper
215
+ data = await self.extract(iframe_url)
216
+ if data:
217
+ results.append(data)
218
+
219
+ return results
220
+
221
+ except Exception:
222
+ return []
@@ -0,0 +1,328 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult
4
+ from parsel import Selector
5
+ import re, base64, json, urllib.parse
6
+
7
+ class SelcukFlix(PluginBase):
8
+ name = "SelcukFlix"
9
+ lang = "tr"
10
+ main_url = "https://selcukflix.net"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Selcukflix'te her türden en yeni ve en popüler dizi ve filmleri izlemenin keyfini çıkarın. Aksiyondan romantiğe, bilim kurgudan dramaya, geniş kütüphanemizde herkes için bir şey var."
13
+
14
+ main_page = {
15
+ f"{main_url}/tum-bolumler" : "Yeni Eklenen Bölümler",
16
+ "" : "Yeni Diziler",
17
+ "" : "Kore Dizileri",
18
+ "" : "Yerli Diziler",
19
+ "15" : "Aile",
20
+ "17" : "Animasyon",
21
+ "9" : "Aksiyon",
22
+ "5" : "Bilim Kurgu",
23
+ "2" : "Dram",
24
+ "12" : "Fantastik",
25
+ "18" : "Gerilim",
26
+ "3" : "Gizem",
27
+ "8" : "Korku",
28
+ "4" : "Komedi",
29
+ "7" : "Romantik"
30
+ }
31
+
32
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
33
+ results = []
34
+ if "tum-bolumler" in url:
35
+ try:
36
+ resp = await self.httpx.get(url)
37
+ sel = Selector(resp.text)
38
+
39
+ for item in sel.css("div.col-span-3 a"):
40
+ name = item.css("h2::text").get()
41
+ ep_info = item.css("div.opacity-80::text").get()
42
+ href = item.css("::attr(href)").get()
43
+ poster = item.css("div.image img::attr(src)").get()
44
+
45
+ if name and href:
46
+ title = f"{name} - {ep_info}" if ep_info else name
47
+ final_url = self.fix_url(href)
48
+
49
+ if "/dizi/" in final_url and "/sezon-" in final_url:
50
+ final_url = final_url.split("/sezon-")[0]
51
+
52
+ results.append(MainPageResult(
53
+ category = category,
54
+ title = title,
55
+ url = final_url,
56
+ poster = self.fix_url(poster)
57
+ ))
58
+ except Exception:
59
+ pass
60
+ return results
61
+
62
+ base_api = f"{self.main_url}/api/bg/findSeries"
63
+
64
+ params = {
65
+ "releaseYearStart" : "1900",
66
+ "releaseYearEnd" : "2026",
67
+ "imdbPointMin" : "1",
68
+ "imdbPointMax" : "10",
69
+ "categoryIdsComma" : "",
70
+ "countryIdsComma" : "",
71
+ "orderType" : "date_desc",
72
+ "languageId" : "-1",
73
+ "currentPage" : page,
74
+ "currentPageCount" : "24",
75
+ "queryStr" : "",
76
+ "categorySlugsComma" : "",
77
+ "countryCodesComma" : ""
78
+ }
79
+
80
+ if "Yerli Diziler" in category:
81
+ params["imdbPointMin"] = "5"
82
+ params["countryIdsComma"] = "29"
83
+ elif "Kore Dizileri" in category:
84
+ params["countryIdsComma"] = "21"
85
+ params["countryCodesComma"] = "KR"
86
+ else:
87
+ params["categoryIdsComma"] = url
88
+
89
+ full_url = f"{base_api}?{urllib.parse.urlencode(params)}"
90
+
91
+ headers = {
92
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0",
93
+ "Accept" : "application/json, text/plain, */*",
94
+ "Accept-Language" : "en-US,en;q=0.5",
95
+ "X-Requested-With" : "XMLHttpRequest",
96
+ "Sec-Fetch-Site" : "same-origin",
97
+ "Sec-Fetch-Mode" : "cors",
98
+ "Sec-Fetch-Dest" : "empty",
99
+ "Referer" : f"{self.main_url}/"
100
+ }
101
+
102
+ try:
103
+ post_resp = await self.httpx.post(full_url, headers=headers)
104
+ resp_json = post_resp.json()
105
+ response_data = resp_json.get("response")
106
+
107
+ raw_data = base64.b64decode(response_data)
108
+ try:
109
+ decoded_str = raw_data.decode('utf-8')
110
+ except UnicodeDecodeError:
111
+ decoded_str = raw_data.decode('iso-8859-1').encode('utf-8').decode('utf-8')
112
+
113
+ data = json.loads(decoded_str)
114
+
115
+ for item in data.get("result", []):
116
+ title = item.get("title")
117
+ slug = item.get("slug")
118
+ poster = item.get("poster")
119
+
120
+ if poster:
121
+ poster = self.clean_image_url(poster)
122
+
123
+ if slug:
124
+ results.append(MainPageResult(
125
+ category = category,
126
+ title = title,
127
+ url = self.fix_url(slug),
128
+ poster = poster
129
+ ))
130
+
131
+ except Exception:
132
+ pass
133
+
134
+ return results
135
+
136
+ async def search(self, query: str) -> list[SearchResult]:
137
+ search_url = f"{self.main_url}/api/bg/searchcontent?searchterm={query}"
138
+
139
+ headers = {
140
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0",
141
+ "Accept" : "application/json, text/plain, */*",
142
+ "Accept-Language" : "en-US,en;q=0.5",
143
+ "X-Requested-With" : "XMLHttpRequest",
144
+ "Sec-Fetch-Site" : "same-origin",
145
+ "Sec-Fetch-Mode" : "cors",
146
+ "Sec-Fetch-Dest" : "empty",
147
+ "Referer" : f"{self.main_url}/"
148
+ }
149
+
150
+ post_resp = await self.httpx.post(search_url, headers=headers)
151
+
152
+ try:
153
+ resp_json = post_resp.json()
154
+ response_data = resp_json.get("response")
155
+ raw_data = base64.b64decode(response_data)
156
+ try:
157
+ decoded_str = raw_data.decode('utf-8')
158
+ except UnicodeDecodeError:
159
+ decoded_str = raw_data.decode('iso-8859-1')
160
+
161
+ search_data = json.loads(decoded_str)
162
+
163
+ results = []
164
+ for item in search_data.get("result", []):
165
+ # API field isimleri: object_name, used_slug, object_poster_url
166
+ title = item.get("object_name") or item.get("title")
167
+ slug = item.get("used_slug") or item.get("slug")
168
+ poster = item.get("object_poster_url") or item.get("poster")
169
+
170
+ if poster:
171
+ poster = self.clean_image_url(poster)
172
+
173
+ if slug and "/seri-filmler/" not in slug:
174
+ results.append(SearchResult(
175
+ title = title,
176
+ url = self.fix_url(slug),
177
+ poster = poster
178
+ ))
179
+
180
+ return results
181
+
182
+ except Exception:
183
+ return []
184
+
185
+ async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
186
+ resp = await self.httpx.get(url)
187
+ sel = Selector(resp.text)
188
+
189
+ next_data = sel.css("script#__NEXT_DATA__::text").get()
190
+ if not next_data:
191
+ return None
192
+
193
+ data = json.loads(next_data)
194
+ secure_data = data["props"]["pageProps"]["secureData"]
195
+ raw_data = base64.b64decode(secure_data.replace('"', ''))
196
+ try:
197
+ decoded_str = raw_data.decode('utf-8')
198
+ except UnicodeDecodeError:
199
+ decoded_str = raw_data.decode('iso-8859-1')
200
+
201
+ content_details = json.loads(decoded_str)
202
+ item = content_details.get("contentItem", {})
203
+
204
+ title = item.get("original_title") or item.get("originalTitle") or ""
205
+ poster = self.clean_image_url(item.get("poster_url") or item.get("posterUrl"))
206
+ description = item.get("description") or item.get("used_description")
207
+ rating = str(item.get("imdb_point") or item.get("imdbPoint", ""))
208
+ year = item.get("release_year") or item.get("releaseYear")
209
+ duration = item.get("total_minutes") or item.get("totalMinutes")
210
+
211
+ series_data = content_details.get("relatedData", {}).get("seriesData")
212
+ if not series_data and "RelatedResults" in content_details:
213
+ series_data = content_details["RelatedResults"].get("getSerieSeasonAndEpisodes", {}).get("result")
214
+ if series_data and isinstance(series_data, list):
215
+ pass
216
+
217
+ # Dizi mi film mi kontrol et (Kotlin referansı)
218
+ if series_data:
219
+ episodes = []
220
+ seasons_list = []
221
+ if isinstance(series_data, dict):
222
+ seasons_list = series_data.get("seasons", [])
223
+ elif isinstance(series_data, list):
224
+ seasons_list = series_data
225
+
226
+ for season in seasons_list:
227
+ if not isinstance(season, dict): continue
228
+ s_no = season.get("season_no") or season.get("seasonNo")
229
+ ep_list = season.get("episodes", [])
230
+ for ep in ep_list:
231
+ episodes.append(Episode(
232
+ season = s_no,
233
+ episode = ep.get("episode_no") or ep.get("episodeNo"),
234
+ title = ep.get("ep_text") or ep.get("epText"),
235
+ url = self.fix_url(ep.get("used_slug") or ep.get("usedSlug"))
236
+ ))
237
+
238
+ return SeriesInfo(
239
+ title = title,
240
+ url = url,
241
+ poster = poster,
242
+ description = description,
243
+ rating = rating,
244
+ year = year,
245
+ episodes = episodes
246
+ )
247
+ else:
248
+ # Film ise MovieInfo döndür
249
+ return MovieInfo(
250
+ title = title,
251
+ url = url,
252
+ poster = poster,
253
+ description = description,
254
+ rating = rating,
255
+ year = year,
256
+ duration = duration
257
+ )
258
+
259
+ async def load_links(self, url: str) -> list[ExtractResult]:
260
+ resp = await self.httpx.get(url)
261
+ sel = Selector(resp.text)
262
+
263
+ next_data = sel.css("script#__NEXT_DATA__::text").get()
264
+ if not next_data:
265
+ return []
266
+
267
+ try:
268
+ data = json.loads(next_data)
269
+ secure_data = data["props"]["pageProps"]["secureData"]
270
+ raw_data = base64.b64decode(secure_data.replace('"', ''))
271
+
272
+ try:
273
+ decoded_str = raw_data.decode('utf-8')
274
+ except UnicodeDecodeError:
275
+ decoded_str = raw_data.decode('iso-8859-1')
276
+
277
+ content_details = json.loads(decoded_str)
278
+ related_results = content_details.get("RelatedResults", {})
279
+
280
+ source_content = None
281
+
282
+ # Dizi (bölüm) için
283
+ if "/dizi/" in url:
284
+ episode_sources = related_results.get("getEpisodeSources", {})
285
+ if episode_sources.get("state"):
286
+ res = episode_sources.get("result", [])
287
+ if res:
288
+ source_content = res[0].get("source_content") or res[0].get("sourceContent")
289
+ else:
290
+ # Film için
291
+ movie_parts = related_results.get("getMoviePartsById", {})
292
+ if movie_parts.get("state"):
293
+ parts = movie_parts.get("result", [])
294
+ if parts:
295
+ first_part_id = parts[0].get("id")
296
+ key = f"getMoviePartSourcesById_{first_part_id}"
297
+ if key in related_results:
298
+ res = related_results[key].get("result", [])
299
+ if res:
300
+ source_content = res[0].get("source_content") or res[0].get("sourceContent")
301
+
302
+ if source_content:
303
+ iframe_sel = Selector(source_content)
304
+ iframe_src = iframe_sel.css("iframe::attr(src)").get()
305
+ if iframe_src:
306
+ iframe_src = self.fix_url(iframe_src)
307
+ # Hotlinger domain değişimi (Kotlin referansı)
308
+ if "sn.dplayer74.site" in iframe_src:
309
+ iframe_src = iframe_src.replace("sn.dplayer74.site", "sn.hotlinger.com")
310
+
311
+ data = await self.extract(iframe_src)
312
+ if data:
313
+ return [data]
314
+
315
+ return []
316
+
317
+ except Exception:
318
+ return []
319
+
320
+ def clean_image_url(self, url: str) -> str:
321
+ if not url: return None
322
+ url = url.replace("images-macellan-online.cdn.ampproject.org/i/s/", "")
323
+ url = url.replace("file.dizilla.club", "file.macellan.online")
324
+ url = url.replace("images.dizilla.club", "images.macellan.online")
325
+ url = url.replace("images.dizimia4.com", "images.macellan.online")
326
+ url = url.replace("file.dizimia4.com", "file.macellan.online")
327
+ url = url.replace("/f/f/", "/630/910/")
328
+ return self.fix_url(url)