KekikStream 1.7.1__py3-none-any.whl → 2.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- KekikStream/Core/Extractor/ExtractorBase.py +13 -7
- KekikStream/Core/Extractor/ExtractorLoader.py +25 -17
- KekikStream/Core/Extractor/ExtractorManager.py +53 -9
- KekikStream/Core/Extractor/ExtractorModels.py +5 -7
- KekikStream/Core/Extractor/YTDLPCache.py +35 -0
- KekikStream/Core/Media/MediaHandler.py +44 -26
- KekikStream/Core/Media/MediaManager.py +0 -3
- KekikStream/Core/Plugin/PluginBase.py +21 -9
- KekikStream/Core/Plugin/PluginLoader.py +11 -7
- KekikStream/Core/Plugin/PluginModels.py +25 -26
- KekikStream/Core/__init__.py +1 -0
- KekikStream/Extractors/CloseLoad.py +4 -5
- KekikStream/Extractors/ContentX.py +4 -6
- KekikStream/Extractors/ContentX_.py +40 -0
- KekikStream/Extractors/DzenRu.py +38 -0
- KekikStream/Extractors/ExPlay.py +53 -0
- KekikStream/Extractors/FirePlayer.py +60 -0
- KekikStream/Extractors/HDPlayerSystem.py +41 -0
- KekikStream/Extractors/JetTv.py +45 -0
- KekikStream/Extractors/MailRu.py +3 -4
- KekikStream/Extractors/MixPlayHD.py +2 -3
- KekikStream/Extractors/MixTiger.py +57 -0
- KekikStream/Extractors/MolyStream.py +5 -5
- KekikStream/Extractors/Odnoklassniki.py +7 -7
- KekikStream/Extractors/{OkRuHTTP.py → Odnoklassniki_.py} +5 -1
- KekikStream/Extractors/PeaceMakerst.py +4 -5
- KekikStream/Extractors/{HDStreamAble.py → PeaceMakerst_.py} +1 -1
- KekikStream/Extractors/PixelDrain.py +1 -2
- KekikStream/Extractors/PlayerFilmIzle.py +62 -0
- KekikStream/Extractors/RapidVid.py +2 -3
- KekikStream/Extractors/RapidVid_.py +7 -0
- KekikStream/Extractors/SetPlay.py +57 -0
- KekikStream/Extractors/SetPrime.py +45 -0
- KekikStream/Extractors/SibNet.py +2 -3
- KekikStream/Extractors/Sobreatsesuyp.py +4 -5
- KekikStream/Extractors/TRsTX.py +4 -5
- KekikStream/Extractors/TauVideo.py +2 -3
- KekikStream/Extractors/TurboImgz.py +2 -3
- KekikStream/Extractors/TurkeyPlayer.py +34 -0
- KekikStream/Extractors/VidHide.py +72 -0
- KekikStream/Extractors/VidMoly.py +4 -5
- KekikStream/Extractors/{VidMolyMe.py → VidMoly_.py} +1 -1
- KekikStream/Extractors/VidMoxy.py +2 -3
- KekikStream/Extractors/VidPapi.py +89 -0
- KekikStream/Extractors/VideoSeyred.py +3 -4
- KekikStream/Extractors/YTDLP.py +177 -0
- KekikStream/Extractors/YildizKisaFilm.py +41 -0
- KekikStream/Plugins/DiziBox.py +18 -23
- KekikStream/Plugins/DiziPal.py +16 -16
- KekikStream/Plugins/DiziYou.py +48 -23
- KekikStream/Plugins/Dizilla.py +47 -32
- KekikStream/Plugins/FilmBip.py +145 -0
- KekikStream/Plugins/FilmMakinesi.py +6 -8
- KekikStream/Plugins/FilmModu.py +9 -9
- KekikStream/Plugins/FullHDFilm.py +164 -0
- KekikStream/Plugins/FullHDFilmizlesene.py +4 -8
- KekikStream/Plugins/HDFilmCehennemi.py +15 -19
- KekikStream/Plugins/JetFilmizle.py +67 -49
- KekikStream/Plugins/KultFilmler.py +219 -0
- KekikStream/Plugins/RecTV.py +18 -22
- KekikStream/Plugins/RoketDizi.py +232 -0
- KekikStream/Plugins/SelcukFlix.py +309 -0
- KekikStream/Plugins/SezonlukDizi.py +12 -13
- KekikStream/Plugins/SineWix.py +8 -12
- KekikStream/Plugins/Sinefy.py +238 -0
- KekikStream/Plugins/SinemaCX.py +157 -0
- KekikStream/Plugins/Sinezy.py +146 -0
- KekikStream/Plugins/SuperFilmGeldi.py +121 -0
- KekikStream/Plugins/UgurFilm.py +7 -11
- KekikStream/__init__.py +34 -24
- KekikStream/requirements.txt +3 -4
- kekikstream-2.0.2.dist-info/METADATA +309 -0
- kekikstream-2.0.2.dist-info/RECORD +82 -0
- KekikStream/Extractors/FourCX.py +0 -7
- KekikStream/Extractors/FourPichive.py +0 -7
- KekikStream/Extractors/FourPlayRu.py +0 -7
- KekikStream/Extractors/Hotlinger.py +0 -7
- KekikStream/Extractors/OkRuSSL.py +0 -7
- KekikStream/Extractors/Pichive.py +0 -7
- KekikStream/Extractors/PlayRu.py +0 -7
- kekikstream-1.7.1.dist-info/METADATA +0 -109
- kekikstream-1.7.1.dist-info/RECORD +0 -63
- {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/WHEEL +0 -0
- {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/entry_points.txt +0 -0
- {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/licenses/LICENSE +0 -0
- {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
|
+
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, MovieInfo
|
|
4
|
+
from parsel import Selector
|
|
5
|
+
import re, base64, json
|
|
6
|
+
|
|
7
|
+
class RoketDizi(PluginBase):
|
|
8
|
+
name = "RoketDizi"
|
|
9
|
+
lang = "tr"
|
|
10
|
+
main_url = "https://roketdizi.to"
|
|
11
|
+
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
|
|
12
|
+
description = "Türkiye'nin en tatlış yabancı dizi izleme sitesi. Türkçe dublaj, altyazılı, eski ve yeni yabancı dizilerin yanı sıra kore (asya) dizileri izleyebilirsiniz."
|
|
13
|
+
|
|
14
|
+
main_page = {
|
|
15
|
+
f"{main_url}/dizi/tur/aksiyon" : "Aksiyon",
|
|
16
|
+
f"{main_url}/dizi/tur/bilim-kurgu" : "Bilim Kurgu",
|
|
17
|
+
f"{main_url}/dizi/tur/gerilim" : "Gerilim",
|
|
18
|
+
f"{main_url}/dizi/tur/fantastik" : "Fantastik",
|
|
19
|
+
f"{main_url}/dizi/tur/komedi" : "Komedi",
|
|
20
|
+
f"{main_url}/dizi/tur/korku" : "Korku",
|
|
21
|
+
f"{main_url}/dizi/tur/macera" : "Macera",
|
|
22
|
+
f"{main_url}/dizi/tur/suc" : "Suç"
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
26
|
+
istek = await self.httpx.get(f"{url}?&page={page}")
|
|
27
|
+
secici = Selector(istek.text)
|
|
28
|
+
|
|
29
|
+
results = []
|
|
30
|
+
|
|
31
|
+
for item in secici.css("div.w-full.p-4 span.bg-\\[\\#232323\\]"):
|
|
32
|
+
title = item.css("span.font-normal.line-clamp-1::text").get()
|
|
33
|
+
href = item.css("a::attr(href)").get()
|
|
34
|
+
poster = item.css("img::attr(src)").get()
|
|
35
|
+
|
|
36
|
+
if title and href:
|
|
37
|
+
results.append(MainPageResult(
|
|
38
|
+
category = category,
|
|
39
|
+
title = self.clean_title(title),
|
|
40
|
+
url = self.fix_url(href),
|
|
41
|
+
poster = self.fix_url(poster)
|
|
42
|
+
))
|
|
43
|
+
|
|
44
|
+
return results
|
|
45
|
+
|
|
46
|
+
async def search(self, query: str) -> list[SearchResult]:
|
|
47
|
+
istek = await self.httpx.post(
|
|
48
|
+
url = f"{self.main_url}/api/bg/searchContent?searchterm={query}",
|
|
49
|
+
headers = {
|
|
50
|
+
"Accept" : "application/json, text/javascript, */*; q=0.01",
|
|
51
|
+
"X-Requested-With" : "XMLHttpRequest",
|
|
52
|
+
"Referer" : f"{self.main_url}/",
|
|
53
|
+
}
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
veri = istek.json()
|
|
58
|
+
encoded = veri.get("response", "")
|
|
59
|
+
if not encoded:
|
|
60
|
+
return []
|
|
61
|
+
|
|
62
|
+
decoded = base64.b64decode(encoded).decode("utf-8")
|
|
63
|
+
veri = json.loads(decoded)
|
|
64
|
+
|
|
65
|
+
if not veri.get("state"):
|
|
66
|
+
return []
|
|
67
|
+
|
|
68
|
+
results = []
|
|
69
|
+
|
|
70
|
+
for item in veri.get("result", []):
|
|
71
|
+
title = item.get("object_name", "")
|
|
72
|
+
slug = item.get("used_slug", "")
|
|
73
|
+
poster = item.get("object_poster_url", "")
|
|
74
|
+
|
|
75
|
+
if title and slug:
|
|
76
|
+
results.append(SearchResult(
|
|
77
|
+
title = self.clean_title(title.strip()),
|
|
78
|
+
url = self.fix_url(f"{self.main_url}/{slug}"),
|
|
79
|
+
poster = self.fix_url(poster) if poster else None
|
|
80
|
+
))
|
|
81
|
+
|
|
82
|
+
return results
|
|
83
|
+
|
|
84
|
+
except Exception:
|
|
85
|
+
return []
|
|
86
|
+
|
|
87
|
+
async def load_item(self, url: str) -> SeriesInfo:
|
|
88
|
+
# Note: Handling both Movie and Series logic in one, returning SeriesInfo generally or MovieInfo
|
|
89
|
+
resp = await self.httpx.get(url)
|
|
90
|
+
sel = Selector(resp.text)
|
|
91
|
+
|
|
92
|
+
title = sel.css("h1.text-white::text").get()
|
|
93
|
+
poster = sel.css("div.w-full.page-top img::attr(src)").get()
|
|
94
|
+
description = sel.css("div.mt-2.text-sm::text").get()
|
|
95
|
+
|
|
96
|
+
# Tags - genre bilgileri (Detaylar bölümünde)
|
|
97
|
+
tags = []
|
|
98
|
+
genre_text = sel.css("h3.text-white.opacity-90::text").get()
|
|
99
|
+
if genre_text:
|
|
100
|
+
tags = [t.strip() for t in genre_text.split(",")]
|
|
101
|
+
|
|
102
|
+
# Rating
|
|
103
|
+
rating = sel.css("span.text-white.text-sm.font-bold::text").get()
|
|
104
|
+
|
|
105
|
+
# Year ve Actors - Detaylar (Details) bölümünden
|
|
106
|
+
year = None
|
|
107
|
+
actors = []
|
|
108
|
+
|
|
109
|
+
# Detaylar bölümündeki tüm flex-col div'leri al
|
|
110
|
+
detail_items = sel.css("div.flex.flex-col")
|
|
111
|
+
for item in detail_items:
|
|
112
|
+
# Label ve value yapısı: span.text-base ve span.text-sm.opacity-90
|
|
113
|
+
label = item.css("span.text-base::text").get()
|
|
114
|
+
value = item.css("span.text-sm.opacity-90::text").get()
|
|
115
|
+
|
|
116
|
+
if label and value:
|
|
117
|
+
label = label.strip()
|
|
118
|
+
value = value.strip()
|
|
119
|
+
|
|
120
|
+
# Yayın tarihi (yıl)
|
|
121
|
+
if label == "Yayın tarihi":
|
|
122
|
+
# "16 Ekim 2018" formatından yılı çıkar
|
|
123
|
+
year_match = re.search(r'\d{4}', value)
|
|
124
|
+
if year_match:
|
|
125
|
+
year = year_match.group()
|
|
126
|
+
|
|
127
|
+
# Yaratıcılar veya Oyuncular
|
|
128
|
+
elif label in ["Yaratıcılar", "Oyuncular"]:
|
|
129
|
+
if value:
|
|
130
|
+
actors.append(value)
|
|
131
|
+
|
|
132
|
+
# Check urls for episodes
|
|
133
|
+
all_urls = re.findall(r'"url":"([^"]*)"', resp.text)
|
|
134
|
+
is_series = any("bolum-" in u for u in all_urls)
|
|
135
|
+
|
|
136
|
+
episodes = []
|
|
137
|
+
if is_series:
|
|
138
|
+
# Dict kullanarak duplicate'leri önle ama sıralı tut
|
|
139
|
+
episodes_dict = {}
|
|
140
|
+
for u in all_urls:
|
|
141
|
+
if "bolum" in u and u not in episodes_dict:
|
|
142
|
+
season_match = re.search(r'/sezon-(\d+)', u)
|
|
143
|
+
ep_match = re.search(r'/bolum-(\d+)', u)
|
|
144
|
+
|
|
145
|
+
season = int(season_match.group(1)) if season_match else 1
|
|
146
|
+
episode_num = int(ep_match.group(1)) if ep_match else 1
|
|
147
|
+
|
|
148
|
+
# Key olarak (season, episode) tuple kullan
|
|
149
|
+
key = (season, episode_num)
|
|
150
|
+
episodes_dict[key] = Episode(
|
|
151
|
+
season = season,
|
|
152
|
+
episode = episode_num,
|
|
153
|
+
title = f"{season}. Sezon {episode_num}. Bölüm",
|
|
154
|
+
url = self.fix_url(u)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Sıralı liste oluştur
|
|
158
|
+
episodes = [episodes_dict[key] for key in sorted(episodes_dict.keys())]
|
|
159
|
+
|
|
160
|
+
return SeriesInfo(
|
|
161
|
+
title = title,
|
|
162
|
+
url = url,
|
|
163
|
+
poster = self.fix_url(poster),
|
|
164
|
+
description = description,
|
|
165
|
+
tags = tags,
|
|
166
|
+
rating = rating,
|
|
167
|
+
actors = actors,
|
|
168
|
+
episodes = episodes,
|
|
169
|
+
year = year
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
async def load_links(self, url: str) -> list[dict]:
|
|
173
|
+
resp = await self.httpx.get(url)
|
|
174
|
+
sel = Selector(resp.text)
|
|
175
|
+
|
|
176
|
+
next_data = sel.css("script#__NEXT_DATA__::text").get()
|
|
177
|
+
if not next_data:
|
|
178
|
+
return []
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
data = json.loads(next_data)
|
|
182
|
+
secure_data = data["props"]["pageProps"]["secureData"]
|
|
183
|
+
decoded_json = json.loads(base64.b64decode(secure_data).decode('utf-8'))
|
|
184
|
+
|
|
185
|
+
# secureData içindeki RelatedResults -> getEpisodeSources -> result dizisini al
|
|
186
|
+
sources = decoded_json.get("RelatedResults", {}).get("getEpisodeSources", {}).get("result", [])
|
|
187
|
+
|
|
188
|
+
results = []
|
|
189
|
+
for source in sources:
|
|
190
|
+
source_content = source.get("source_content", "")
|
|
191
|
+
|
|
192
|
+
# iframe URL'ini source_content'ten çıkar
|
|
193
|
+
iframe_match = re.search(r'<iframe[^>]*src=["\']([^"\']*)["\']', source_content)
|
|
194
|
+
if not iframe_match:
|
|
195
|
+
continue
|
|
196
|
+
|
|
197
|
+
iframe_url = iframe_match.group(1)
|
|
198
|
+
if "http" not in iframe_url:
|
|
199
|
+
if iframe_url.startswith("//"):
|
|
200
|
+
iframe_url = "https:" + iframe_url
|
|
201
|
+
else:
|
|
202
|
+
iframe_url = "https://" + iframe_url
|
|
203
|
+
|
|
204
|
+
# Check extractor
|
|
205
|
+
extractor = self.ex_manager.find_extractor(iframe_url)
|
|
206
|
+
ext_name = extractor.name if extractor else ""
|
|
207
|
+
|
|
208
|
+
# Metadata'dan bilgileri al
|
|
209
|
+
source_name = source.get("source_name", "")
|
|
210
|
+
language_name = source.get("language_name", "")
|
|
211
|
+
quality_name = source.get("quality_name", "")
|
|
212
|
+
|
|
213
|
+
# İsmi oluştur
|
|
214
|
+
name_parts = []
|
|
215
|
+
if source_name:
|
|
216
|
+
name_parts.append(source_name)
|
|
217
|
+
if ext_name:
|
|
218
|
+
name_parts.append(ext_name)
|
|
219
|
+
if language_name:
|
|
220
|
+
name_parts.append(language_name)
|
|
221
|
+
if quality_name:
|
|
222
|
+
name_parts.append(quality_name)
|
|
223
|
+
|
|
224
|
+
results.append({
|
|
225
|
+
"url" : iframe_url,
|
|
226
|
+
"name" : " | ".join(name_parts)
|
|
227
|
+
})
|
|
228
|
+
|
|
229
|
+
return results
|
|
230
|
+
|
|
231
|
+
except Exception:
|
|
232
|
+
return []
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
|
+
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
|
|
4
|
+
from parsel import Selector
|
|
5
|
+
import re, base64, json, urllib.parse
|
|
6
|
+
|
|
7
|
+
class SelcukFlix(PluginBase):
|
|
8
|
+
name = "SelcukFlix"
|
|
9
|
+
lang = "tr"
|
|
10
|
+
main_url = "https://selcukflix.net"
|
|
11
|
+
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
|
|
12
|
+
description = "Selcukflix'te her türden en yeni ve en popüler dizi ve filmleri izlemenin keyfini çıkarın. Aksiyondan romantiğe, bilim kurgudan dramaya, geniş kütüphanemizde herkes için bir şey var."
|
|
13
|
+
|
|
14
|
+
main_page = {
|
|
15
|
+
f"{main_url}/tum-bolumler" : "Yeni Eklenen Bölümler",
|
|
16
|
+
"" : "Yeni Diziler",
|
|
17
|
+
"" : "Kore Dizileri",
|
|
18
|
+
"" : "Yerli Diziler",
|
|
19
|
+
"15" : "Aile",
|
|
20
|
+
"17" : "Animasyon",
|
|
21
|
+
"9" : "Aksiyon",
|
|
22
|
+
"5" : "Bilim Kurgu",
|
|
23
|
+
"2" : "Dram",
|
|
24
|
+
"12" : "Fantastik",
|
|
25
|
+
"18" : "Gerilim",
|
|
26
|
+
"3" : "Gizem",
|
|
27
|
+
"8" : "Korku",
|
|
28
|
+
"4" : "Komedi",
|
|
29
|
+
"7" : "Romantik"
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
33
|
+
results = []
|
|
34
|
+
if "tum-bolumler" in url:
|
|
35
|
+
try:
|
|
36
|
+
resp = await self.httpx.get(url)
|
|
37
|
+
sel = Selector(resp.text)
|
|
38
|
+
|
|
39
|
+
for item in sel.css("div.col-span-3 a"):
|
|
40
|
+
name = item.css("h2::text").get()
|
|
41
|
+
ep_info = item.css("div.opacity-80::text").get()
|
|
42
|
+
href = item.css("::attr(href)").get()
|
|
43
|
+
poster = item.css("div.image img::attr(src)").get()
|
|
44
|
+
|
|
45
|
+
if name and href:
|
|
46
|
+
title = f"{name} - {ep_info}" if ep_info else name
|
|
47
|
+
final_url = self.fix_url(href)
|
|
48
|
+
|
|
49
|
+
if "/dizi/" in final_url and "/sezon-" in final_url:
|
|
50
|
+
final_url = final_url.split("/sezon-")[0]
|
|
51
|
+
|
|
52
|
+
results.append(MainPageResult(
|
|
53
|
+
category = category,
|
|
54
|
+
title = title,
|
|
55
|
+
url = final_url,
|
|
56
|
+
poster = self.fix_url(poster)
|
|
57
|
+
))
|
|
58
|
+
except Exception:
|
|
59
|
+
pass
|
|
60
|
+
return results
|
|
61
|
+
|
|
62
|
+
base_api = f"{self.main_url}/api/bg/findSeries"
|
|
63
|
+
|
|
64
|
+
params = {
|
|
65
|
+
"releaseYearStart" : "1900",
|
|
66
|
+
"releaseYearEnd" : "2026",
|
|
67
|
+
"imdbPointMin" : "1",
|
|
68
|
+
"imdbPointMax" : "10",
|
|
69
|
+
"categoryIdsComma" : "",
|
|
70
|
+
"countryIdsComma" : "",
|
|
71
|
+
"orderType" : "date_desc",
|
|
72
|
+
"languageId" : "-1",
|
|
73
|
+
"currentPage" : page,
|
|
74
|
+
"currentPageCount" : "24",
|
|
75
|
+
"queryStr" : "",
|
|
76
|
+
"categorySlugsComma" : "",
|
|
77
|
+
"countryCodesComma" : ""
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if "Yerli Diziler" in category:
|
|
81
|
+
params["imdbPointMin"] = "5"
|
|
82
|
+
params["countryIdsComma"] = "29"
|
|
83
|
+
elif "Kore Dizileri" in category:
|
|
84
|
+
params["countryIdsComma"] = "21"
|
|
85
|
+
params["countryCodesComma"] = "KR"
|
|
86
|
+
else:
|
|
87
|
+
params["categoryIdsComma"] = url
|
|
88
|
+
|
|
89
|
+
full_url = f"{base_api}?{urllib.parse.urlencode(params)}"
|
|
90
|
+
|
|
91
|
+
headers = {
|
|
92
|
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0",
|
|
93
|
+
"Accept" : "application/json, text/plain, */*",
|
|
94
|
+
"Accept-Language" : "en-US,en;q=0.5",
|
|
95
|
+
"X-Requested-With" : "XMLHttpRequest",
|
|
96
|
+
"Sec-Fetch-Site" : "same-origin",
|
|
97
|
+
"Sec-Fetch-Mode" : "cors",
|
|
98
|
+
"Sec-Fetch-Dest" : "empty",
|
|
99
|
+
"Referer" : f"{self.main_url}/"
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
post_resp = await self.httpx.post(full_url, headers=headers)
|
|
104
|
+
resp_json = post_resp.json()
|
|
105
|
+
response_data = resp_json.get("response")
|
|
106
|
+
|
|
107
|
+
raw_data = base64.b64decode(response_data)
|
|
108
|
+
try:
|
|
109
|
+
decoded_str = raw_data.decode('utf-8')
|
|
110
|
+
except UnicodeDecodeError:
|
|
111
|
+
decoded_str = raw_data.decode('iso-8859-1').encode('utf-8').decode('utf-8')
|
|
112
|
+
|
|
113
|
+
data = json.loads(decoded_str)
|
|
114
|
+
|
|
115
|
+
for item in data.get("result", []):
|
|
116
|
+
title = item.get("title")
|
|
117
|
+
slug = item.get("slug")
|
|
118
|
+
poster = item.get("poster")
|
|
119
|
+
|
|
120
|
+
if poster:
|
|
121
|
+
poster = self.clean_image_url(poster)
|
|
122
|
+
|
|
123
|
+
if slug:
|
|
124
|
+
results.append(MainPageResult(
|
|
125
|
+
category = category,
|
|
126
|
+
title = title,
|
|
127
|
+
url = self.fix_url(slug),
|
|
128
|
+
poster = poster
|
|
129
|
+
))
|
|
130
|
+
|
|
131
|
+
except Exception:
|
|
132
|
+
pass
|
|
133
|
+
|
|
134
|
+
return results
|
|
135
|
+
|
|
136
|
+
async def search(self, query: str) -> list[SearchResult]:
|
|
137
|
+
search_url = f"{self.main_url}/api/bg/searchcontent?searchterm={query}"
|
|
138
|
+
|
|
139
|
+
headers = {
|
|
140
|
+
"Accept" : "application/json, text/plain, */*",
|
|
141
|
+
"X-Requested-With" : "XMLHttpRequest",
|
|
142
|
+
"Referer" : f"{self.main_url}/"
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
post_resp = await self.httpx.post(search_url, headers=headers)
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
resp_json = post_resp.json()
|
|
149
|
+
response_data = resp_json.get("response")
|
|
150
|
+
raw_data = base64.b64decode(response_data)
|
|
151
|
+
try:
|
|
152
|
+
decoded_str = raw_data.decode('utf-8')
|
|
153
|
+
except UnicodeDecodeError:
|
|
154
|
+
decoded_str = raw_data.decode('iso-8859-1').encode('utf-8').decode('utf-8')
|
|
155
|
+
|
|
156
|
+
search_data = json.loads(decoded_str)
|
|
157
|
+
|
|
158
|
+
results = []
|
|
159
|
+
for item in search_data.get("result", []):
|
|
160
|
+
title = item.get("title")
|
|
161
|
+
slug = item.get("slug")
|
|
162
|
+
poster = item.get("poster")
|
|
163
|
+
|
|
164
|
+
if poster:
|
|
165
|
+
poster = self.clean_image_url(poster)
|
|
166
|
+
|
|
167
|
+
if slug and "/seri-filmler/" not in slug:
|
|
168
|
+
results.append(SearchResult(
|
|
169
|
+
title = title,
|
|
170
|
+
url = self.fix_url(slug),
|
|
171
|
+
poster = poster
|
|
172
|
+
))
|
|
173
|
+
|
|
174
|
+
return results
|
|
175
|
+
|
|
176
|
+
except Exception:
|
|
177
|
+
return []
|
|
178
|
+
|
|
179
|
+
async def load_item(self, url: str) -> SeriesInfo:
|
|
180
|
+
resp = await self.httpx.get(url)
|
|
181
|
+
sel = Selector(resp.text)
|
|
182
|
+
|
|
183
|
+
next_data = sel.css("script#__NEXT_DATA__::text").get()
|
|
184
|
+
if not next_data:
|
|
185
|
+
return None
|
|
186
|
+
|
|
187
|
+
data = json.loads(next_data)
|
|
188
|
+
secure_data = data["props"]["pageProps"]["secureData"]
|
|
189
|
+
raw_data = base64.b64decode(secure_data.replace('"', ''))
|
|
190
|
+
try:
|
|
191
|
+
decoded_str = raw_data.decode('utf-8')
|
|
192
|
+
except UnicodeDecodeError:
|
|
193
|
+
decoded_str = raw_data.decode('iso-8859-1') # .encode('utf-8').decode('utf-8') implied
|
|
194
|
+
|
|
195
|
+
content_details = json.loads(decoded_str)
|
|
196
|
+
item = content_details.get("contentItem", {})
|
|
197
|
+
|
|
198
|
+
title = item.get("original_title") or item.get("originalTitle")
|
|
199
|
+
poster = self.clean_image_url(item.get("poster_url") or item.get("posterUrl"))
|
|
200
|
+
description = item.get("description") or item.get("used_description")
|
|
201
|
+
rating = str(item.get("imdb_point") or item.get("imdbPoint", ""))
|
|
202
|
+
|
|
203
|
+
series_data = content_details.get("relatedData", {}).get("seriesData")
|
|
204
|
+
if not series_data and "RelatedResults" in content_details:
|
|
205
|
+
series_data = content_details["RelatedResults"].get("getSerieSeasonAndEpisodes", {}).get("result")
|
|
206
|
+
if series_data and isinstance(series_data, list):
|
|
207
|
+
pass
|
|
208
|
+
|
|
209
|
+
episodes = []
|
|
210
|
+
if series_data:
|
|
211
|
+
seasons_list = []
|
|
212
|
+
if isinstance(series_data, dict):
|
|
213
|
+
seasons_list = series_data.get("seasons", [])
|
|
214
|
+
elif isinstance(series_data, list):
|
|
215
|
+
seasons_list = series_data
|
|
216
|
+
|
|
217
|
+
for season in seasons_list:
|
|
218
|
+
if not isinstance(season, dict): continue
|
|
219
|
+
s_no = season.get("season_no") or season.get("seasonNo") # Try snake_case too
|
|
220
|
+
ep_list = season.get("episodes", [])
|
|
221
|
+
for ep in ep_list:
|
|
222
|
+
episodes.append(Episode(
|
|
223
|
+
season = s_no,
|
|
224
|
+
episode = ep.get("episode_no") or ep.get("episodeNo"),
|
|
225
|
+
title = ep.get("ep_text") or ep.get("epText"),
|
|
226
|
+
url = self.fix_url(ep.get("used_slug") or ep.get("usedSlug"))
|
|
227
|
+
))
|
|
228
|
+
|
|
229
|
+
return SeriesInfo(
|
|
230
|
+
title = title,
|
|
231
|
+
url = url,
|
|
232
|
+
poster = poster,
|
|
233
|
+
description = description,
|
|
234
|
+
rating = rating,
|
|
235
|
+
episodes = episodes
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
async def load_links(self, url: str) -> list[dict]:
|
|
239
|
+
resp = await self.httpx.get(url)
|
|
240
|
+
sel = Selector(resp.text)
|
|
241
|
+
|
|
242
|
+
next_data = sel.css("script#__NEXT_DATA__::text").get()
|
|
243
|
+
if not next_data: return []
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
data = json.loads(next_data)
|
|
247
|
+
secure_data = data["props"]["pageProps"]["secureData"]
|
|
248
|
+
raw_data = base64.b64decode(secure_data.replace('"', ''))
|
|
249
|
+
try:
|
|
250
|
+
decoded_str = raw_data.decode('utf-8')
|
|
251
|
+
except UnicodeDecodeError:
|
|
252
|
+
decoded_str = raw_data.decode('iso-8859-1')
|
|
253
|
+
|
|
254
|
+
content_details = json.loads(decoded_str)
|
|
255
|
+
related_data = content_details.get("relatedData", {})
|
|
256
|
+
|
|
257
|
+
source_content = None
|
|
258
|
+
|
|
259
|
+
# Check if Series (episode) or Movie
|
|
260
|
+
if "/dizi/" in url:
|
|
261
|
+
if related_data.get("episodeSources", {}).get("state"):
|
|
262
|
+
res = related_data["episodeSources"].get("result", [])
|
|
263
|
+
if res:
|
|
264
|
+
source_content = res[0].get("sourceContent")
|
|
265
|
+
else:
|
|
266
|
+
# Movie
|
|
267
|
+
if related_data.get("movieParts", {}).get("state"):
|
|
268
|
+
# Looking for first part source
|
|
269
|
+
movie_parts = related_data["movieParts"].get("result", [])
|
|
270
|
+
if movie_parts:
|
|
271
|
+
first_part_id = movie_parts[0].get("id")
|
|
272
|
+
# RelatedResults -> getMoviePartSourcesById_ID
|
|
273
|
+
rr = content_details.get("RelatedResults", {})
|
|
274
|
+
key = f"getMoviePartSourcesById_{first_part_id}"
|
|
275
|
+
if key in rr:
|
|
276
|
+
res = rr[key].get("result", [])
|
|
277
|
+
if res:
|
|
278
|
+
source_content = res[0].get("source_content")
|
|
279
|
+
|
|
280
|
+
results = []
|
|
281
|
+
if source_content:
|
|
282
|
+
iframe_sel = Selector(source_content)
|
|
283
|
+
iframe_src = iframe_sel.css("iframe::attr(src)").get()
|
|
284
|
+
if iframe_src:
|
|
285
|
+
iframe_src = self.fix_url(iframe_src)
|
|
286
|
+
# Domain replace
|
|
287
|
+
if "sn.dplayer74.site" in iframe_src:
|
|
288
|
+
iframe_src = iframe_src.replace("sn.dplayer74.site", "sn.hotlinger.com")
|
|
289
|
+
|
|
290
|
+
extractor = self.ex_manager.find_extractor(iframe_src)
|
|
291
|
+
results.append({
|
|
292
|
+
"url": iframe_src,
|
|
293
|
+
"name": extractor.name if extractor else "Iframe"
|
|
294
|
+
})
|
|
295
|
+
|
|
296
|
+
return results
|
|
297
|
+
|
|
298
|
+
except Exception:
|
|
299
|
+
return []
|
|
300
|
+
|
|
301
|
+
def clean_image_url(self, url: str) -> str:
|
|
302
|
+
if not url: return None
|
|
303
|
+
url = url.replace("images-macellan-online.cdn.ampproject.org/i/s/", "")
|
|
304
|
+
url = url.replace("file.dizilla.club", "file.macellan.online")
|
|
305
|
+
url = url.replace("images.dizilla.club", "images.macellan.online")
|
|
306
|
+
url = url.replace("images.dizimia4.com", "images.macellan.online")
|
|
307
|
+
url = url.replace("file.dizimia4.com", "file.macellan.online")
|
|
308
|
+
url = url.replace("/f/f/", "/630/910/")
|
|
309
|
+
return self.fix_url(url)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
|
-
from KekikStream.Core import
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
|
|
4
4
|
from parsel import Selector
|
|
5
5
|
|
|
6
6
|
class SezonlukDizi(PluginBase):
|
|
@@ -21,9 +21,8 @@ class SezonlukDizi(PluginBase):
|
|
|
21
21
|
f"{main_url}/diziler.asp?siralama_tipi=id&kat=6&s=" : "Belgeseller",
|
|
22
22
|
}
|
|
23
23
|
|
|
24
|
-
#@kekik_cache(ttl=60*60)
|
|
25
24
|
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
26
|
-
istek = await self.
|
|
25
|
+
istek = await self.httpx.get(f"{url}{page}")
|
|
27
26
|
secici = Selector(istek.text)
|
|
28
27
|
|
|
29
28
|
return [
|
|
@@ -36,9 +35,8 @@ class SezonlukDizi(PluginBase):
|
|
|
36
35
|
for veri in secici.css("div.afis a") if veri.css("div.description::text").get()
|
|
37
36
|
]
|
|
38
37
|
|
|
39
|
-
#@kekik_cache(ttl=60*60)
|
|
40
38
|
async def search(self, query: str) -> list[SearchResult]:
|
|
41
|
-
istek = await self.
|
|
39
|
+
istek = await self.httpx.get(f"{self.main_url}/diziler.asp?adi={query}")
|
|
42
40
|
secici = Selector(istek.text)
|
|
43
41
|
|
|
44
42
|
return [
|
|
@@ -50,9 +48,8 @@ class SezonlukDizi(PluginBase):
|
|
|
50
48
|
for afis in secici.css("div.afis a.column")
|
|
51
49
|
]
|
|
52
50
|
|
|
53
|
-
#@kekik_cache(ttl=60*60)
|
|
54
51
|
async def load_item(self, url: str) -> SeriesInfo:
|
|
55
|
-
istek = await self.
|
|
52
|
+
istek = await self.httpx.get(url)
|
|
56
53
|
secici = Selector(istek.text)
|
|
57
54
|
|
|
58
55
|
title = secici.css("div.header::text").get().strip()
|
|
@@ -63,14 +60,14 @@ class SezonlukDizi(PluginBase):
|
|
|
63
60
|
rating = secici.css("div.dizipuani a div::text").re_first(r"[\d.,]+")
|
|
64
61
|
actors = []
|
|
65
62
|
|
|
66
|
-
actors_istek = await self.
|
|
63
|
+
actors_istek = await self.httpx.get(f"{self.main_url}/oyuncular/{url.split('/')[-1]}")
|
|
67
64
|
actors_secici = Selector(actors_istek.text)
|
|
68
65
|
actors = [
|
|
69
66
|
actor.css("div.header::text").get().strip()
|
|
70
67
|
for actor in actors_secici.css("div.doubling div.ui")
|
|
71
68
|
]
|
|
72
69
|
|
|
73
|
-
episodes_istek = await self.
|
|
70
|
+
episodes_istek = await self.httpx.get(f"{self.main_url}/bolumler/{url.split('/')[-1]}")
|
|
74
71
|
episodes_secici = Selector(episodes_istek.text)
|
|
75
72
|
episodes = []
|
|
76
73
|
|
|
@@ -102,9 +99,8 @@ class SezonlukDizi(PluginBase):
|
|
|
102
99
|
actors = actors
|
|
103
100
|
)
|
|
104
101
|
|
|
105
|
-
#@kekik_cache(ttl=15*60)
|
|
106
102
|
async def load_links(self, url: str) -> list[dict]:
|
|
107
|
-
istek = await self.
|
|
103
|
+
istek = await self.httpx.get(url)
|
|
108
104
|
secici = Selector(istek.text)
|
|
109
105
|
|
|
110
106
|
bid = secici.css("div#dilsec::attr(data-id)").get()
|
|
@@ -113,7 +109,7 @@ class SezonlukDizi(PluginBase):
|
|
|
113
109
|
|
|
114
110
|
results = []
|
|
115
111
|
for dil, label in [("1", "Altyazı"), ("0", "Dublaj")]:
|
|
116
|
-
dil_istek = await self.
|
|
112
|
+
dil_istek = await self.httpx.post(
|
|
117
113
|
url = f"{self.main_url}/ajax/dataAlternatif22.asp",
|
|
118
114
|
headers = {"X-Requested-With": "XMLHttpRequest"},
|
|
119
115
|
data = {"bid": bid, "dil": dil},
|
|
@@ -126,7 +122,7 @@ class SezonlukDizi(PluginBase):
|
|
|
126
122
|
|
|
127
123
|
if dil_json.get("status") == "success":
|
|
128
124
|
for idx, veri in enumerate(dil_json.get("data", [])):
|
|
129
|
-
veri_response = await self.
|
|
125
|
+
veri_response = await self.httpx.post(
|
|
130
126
|
url = f"{self.main_url}/ajax/dataEmbed22.asp",
|
|
131
127
|
headers = {"X-Requested-With": "XMLHttpRequest"},
|
|
132
128
|
data = {"id": veri.get("id")},
|
|
@@ -134,6 +130,9 @@ class SezonlukDizi(PluginBase):
|
|
|
134
130
|
secici = Selector(veri_response.text)
|
|
135
131
|
|
|
136
132
|
if iframe := secici.css("iframe::attr(src)").get():
|
|
133
|
+
if "link.asp" in iframe:
|
|
134
|
+
continue
|
|
135
|
+
|
|
137
136
|
extractor = self.ex_manager.find_extractor(self.fix_url(iframe))
|
|
138
137
|
results.append({
|
|
139
138
|
"url" : self.fix_url(iframe),
|