KekikStream 2.3.9__py3-none-any.whl → 2.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- KekikStream/Core/Extractor/ExtractorBase.py +3 -2
- KekikStream/Core/Extractor/ExtractorLoader.py +8 -14
- KekikStream/Core/HTMLHelper.py +120 -49
- KekikStream/Core/Plugin/PluginBase.py +35 -12
- KekikStream/Core/Plugin/PluginLoader.py +12 -14
- KekikStream/Core/Plugin/PluginManager.py +2 -2
- KekikStream/Core/Plugin/PluginModels.py +0 -3
- KekikStream/Extractors/Abstream.py +27 -0
- KekikStream/Extractors/CloseLoad.py +30 -54
- KekikStream/Extractors/ContentX.py +27 -72
- KekikStream/Extractors/DonilasPlay.py +33 -77
- KekikStream/Extractors/DzenRu.py +10 -24
- KekikStream/Extractors/ExPlay.py +20 -38
- KekikStream/Extractors/Filemoon.py +21 -46
- KekikStream/Extractors/HDMomPlayer.py +30 -0
- KekikStream/Extractors/HDPlayerSystem.py +13 -31
- KekikStream/Extractors/HotStream.py +27 -0
- KekikStream/Extractors/JFVid.py +3 -24
- KekikStream/Extractors/JetTv.py +21 -34
- KekikStream/Extractors/JetV.py +55 -0
- KekikStream/Extractors/MailRu.py +11 -29
- KekikStream/Extractors/MixPlayHD.py +15 -28
- KekikStream/Extractors/MixTiger.py +17 -40
- KekikStream/Extractors/MolyStream.py +17 -21
- KekikStream/Extractors/Odnoklassniki.py +40 -104
- KekikStream/Extractors/PeaceMakerst.py +18 -45
- KekikStream/Extractors/PixelDrain.py +8 -16
- KekikStream/Extractors/PlayerFilmIzle.py +22 -41
- KekikStream/Extractors/RapidVid.py +21 -35
- KekikStream/Extractors/SetPlay.py +18 -43
- KekikStream/Extractors/SibNet.py +7 -17
- KekikStream/Extractors/Sobreatsesuyp.py +23 -45
- KekikStream/Extractors/TRsTX.py +23 -53
- KekikStream/Extractors/TurboImgz.py +7 -14
- KekikStream/Extractors/VCTPlay.py +10 -28
- KekikStream/Extractors/Veev.py +145 -0
- KekikStream/Extractors/VidBiz.py +62 -0
- KekikStream/Extractors/VidHide.py +58 -30
- KekikStream/Extractors/VidMoly.py +65 -99
- KekikStream/Extractors/VidMoxy.py +16 -27
- KekikStream/Extractors/VidPapi.py +24 -54
- KekikStream/Extractors/VideoSeyred.py +19 -40
- KekikStream/Extractors/Videostr.py +58 -0
- KekikStream/Extractors/Vidoza.py +18 -0
- KekikStream/Extractors/Vtbe.py +38 -0
- KekikStream/Extractors/YTDLP.py +2 -2
- KekikStream/Extractors/YildizKisaFilm.py +13 -31
- KekikStream/Extractors/Zeus.py +61 -0
- KekikStream/Plugins/BelgeselX.py +97 -77
- KekikStream/Plugins/DiziBox.py +28 -45
- KekikStream/Plugins/DiziMom.py +179 -0
- KekikStream/Plugins/DiziPal.py +95 -161
- KekikStream/Plugins/DiziYou.py +51 -147
- KekikStream/Plugins/Dizilla.py +40 -61
- KekikStream/Plugins/FilmBip.py +90 -39
- KekikStream/Plugins/FilmEkseni.py +199 -0
- KekikStream/Plugins/FilmMakinesi.py +72 -73
- KekikStream/Plugins/FilmModu.py +25 -35
- KekikStream/Plugins/Filmatek.py +184 -0
- KekikStream/Plugins/FilmciBaba.py +155 -0
- KekikStream/Plugins/FullHDFilmizlesene.py +16 -37
- KekikStream/Plugins/HDFilm.py +243 -0
- KekikStream/Plugins/HDFilmCehennemi.py +242 -189
- KekikStream/Plugins/JetFilmizle.py +101 -69
- KekikStream/Plugins/KultFilmler.py +138 -104
- KekikStream/Plugins/RecTV.py +52 -73
- KekikStream/Plugins/RoketDizi.py +18 -27
- KekikStream/Plugins/SelcukFlix.py +30 -48
- KekikStream/Plugins/SetFilmIzle.py +76 -104
- KekikStream/Plugins/SezonlukDizi.py +90 -94
- KekikStream/Plugins/Sinefy.py +195 -167
- KekikStream/Plugins/SinemaCX.py +148 -78
- KekikStream/Plugins/Sinezy.py +29 -31
- KekikStream/Plugins/SuperFilmGeldi.py +12 -17
- KekikStream/Plugins/UgurFilm.py +85 -38
- KekikStream/Plugins/Watch32.py +160 -0
- KekikStream/Plugins/YabanciDizi.py +176 -211
- {kekikstream-2.3.9.dist-info → kekikstream-2.5.4.dist-info}/METADATA +1 -1
- kekikstream-2.5.4.dist-info/RECORD +99 -0
- {kekikstream-2.3.9.dist-info → kekikstream-2.5.4.dist-info}/WHEEL +1 -1
- KekikStream/Plugins/FullHDFilm.py +0 -249
- kekikstream-2.3.9.dist-info/RECORD +0 -84
- {kekikstream-2.3.9.dist-info → kekikstream-2.5.4.dist-info}/entry_points.txt +0 -0
- {kekikstream-2.3.9.dist-info → kekikstream-2.5.4.dist-info}/licenses/LICENSE +0 -0
- {kekikstream-2.3.9.dist-info → kekikstream-2.5.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
|
+
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, HTMLHelper
|
|
4
|
+
|
|
5
|
+
class Watch32(PluginBase):
|
|
6
|
+
name = "Watch32"
|
|
7
|
+
language = "en"
|
|
8
|
+
main_url = "https://watch32.sx"
|
|
9
|
+
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
|
|
10
|
+
description = "Watch Your Favorite Movies & TV Shows Online - Streaming For Free. With Movies & TV Shows Full HD. Find Your Movies & Watch NOW!"
|
|
11
|
+
|
|
12
|
+
main_page = {
|
|
13
|
+
# Main Categories
|
|
14
|
+
f"{main_url}/movie?page=" : "Popular Movies",
|
|
15
|
+
f"{main_url}/tv-show?page=" : "Popular TV Shows",
|
|
16
|
+
f"{main_url}/coming-soon?page=" : "Coming Soon",
|
|
17
|
+
f"{main_url}/top-imdb?page=" : "Top IMDB Rating",
|
|
18
|
+
# Genre Categories
|
|
19
|
+
f"{main_url}/genre/action?page=" : "Action",
|
|
20
|
+
f"{main_url}/genre/adventure?page=" : "Adventure",
|
|
21
|
+
f"{main_url}/genre/animation?page=" : "Animation",
|
|
22
|
+
f"{main_url}/genre/biography?page=" : "Biography",
|
|
23
|
+
f"{main_url}/genre/comedy?page=" : "Comedy",
|
|
24
|
+
f"{main_url}/genre/crime?page=" : "Crime",
|
|
25
|
+
f"{main_url}/genre/documentary?page=" : "Documentary",
|
|
26
|
+
f"{main_url}/genre/drama?page=" : "Drama",
|
|
27
|
+
f"{main_url}/genre/family?page=" : "Family",
|
|
28
|
+
f"{main_url}/genre/fantasy?page=" : "Fantasy",
|
|
29
|
+
f"{main_url}/genre/history?page=" : "History",
|
|
30
|
+
f"{main_url}/genre/horror?page=" : "Horror",
|
|
31
|
+
f"{main_url}/genre/music?page=" : "Music",
|
|
32
|
+
f"{main_url}/genre/mystery?page=" : "Mystery",
|
|
33
|
+
f"{main_url}/genre/romance?page=" : "Romance",
|
|
34
|
+
f"{main_url}/genre/science-fiction?page=" : "Science Fiction",
|
|
35
|
+
f"{main_url}/genre/thriller?page=" : "Thriller",
|
|
36
|
+
f"{main_url}/genre/war?page=" : "War",
|
|
37
|
+
f"{main_url}/genre/western?page=" : "Western",
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
41
|
+
istek = await self.httpx.get(f"{url}{page}")
|
|
42
|
+
helper = HTMLHelper(istek.text)
|
|
43
|
+
items = helper.select("div.flw-item")
|
|
44
|
+
|
|
45
|
+
return [
|
|
46
|
+
MainPageResult(
|
|
47
|
+
category = category,
|
|
48
|
+
title = helper.select_attr("h2.film-name a", "title", veri),
|
|
49
|
+
url = self.fix_url(helper.select_attr("h2.film-name a", "href", veri)),
|
|
50
|
+
poster = helper.select_attr("img.film-poster-img", "data-src", veri)
|
|
51
|
+
)
|
|
52
|
+
for veri in items
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
async def search(self, query: str) -> list[SearchResult]:
|
|
56
|
+
istek = await self.httpx.get(f"{self.main_url}/search/{query.replace(' ', '-')}")
|
|
57
|
+
secici = HTMLHelper(istek.text)
|
|
58
|
+
|
|
59
|
+
return [
|
|
60
|
+
SearchResult(
|
|
61
|
+
title = secici.select_attr("h2.film-name a", "title", veri),
|
|
62
|
+
url = self.fix_url(secici.select_attr("h2.film-name a", "href", veri)),
|
|
63
|
+
poster = secici.select_attr("img.film-poster-img", "data-src", veri)
|
|
64
|
+
)
|
|
65
|
+
for veri in secici.select("div.flw-item")
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
|
|
69
|
+
istek = await self.httpx.get(url)
|
|
70
|
+
secici = HTMLHelper(istek.text)
|
|
71
|
+
|
|
72
|
+
content_id = secici.select_attr("div.detail_page-watch", "data-id")
|
|
73
|
+
details = secici.select_first("div.detail_page-infor")
|
|
74
|
+
name = secici.select_text("h2.heading-name > a", details)
|
|
75
|
+
poster = secici.select_poster("div.film-poster > img", details)
|
|
76
|
+
description = secici.select_text("div.description", details)
|
|
77
|
+
year = str(secici.extract_year())
|
|
78
|
+
tags = secici.meta_list("Genre", container_selector="div.row-line")
|
|
79
|
+
rating = secici.select_text("button.btn-imdb").replace("N/A", "").split(":")[-1].strip() if secici.select_text("button.btn-imdb") else None
|
|
80
|
+
actors = secici.meta_list("Casts", container_selector="div.row-line")
|
|
81
|
+
|
|
82
|
+
common_info = {
|
|
83
|
+
"url" : url,
|
|
84
|
+
"poster" : self.fix_url(poster),
|
|
85
|
+
"title" : name,
|
|
86
|
+
"description" : description,
|
|
87
|
+
"tags" : tags,
|
|
88
|
+
"rating" : rating,
|
|
89
|
+
"year" : year,
|
|
90
|
+
"actors" : actors
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if "movie" in url:
|
|
94
|
+
return MovieInfo(**common_info)
|
|
95
|
+
|
|
96
|
+
episodes = []
|
|
97
|
+
seasons_resp = await self.httpx.get(f"{self.main_url}/ajax/season/list/{content_id}")
|
|
98
|
+
sh = HTMLHelper(seasons_resp.text)
|
|
99
|
+
|
|
100
|
+
for season in sh.select("a.dropdown-item"):
|
|
101
|
+
season_id = season.attrs.get("data-id")
|
|
102
|
+
s_val, _ = sh.extract_season_episode(season.text())
|
|
103
|
+
|
|
104
|
+
e_resp = await self.httpx.get(f"{self.main_url}/ajax/season/episodes/{season_id}")
|
|
105
|
+
eh = HTMLHelper(e_resp.text)
|
|
106
|
+
|
|
107
|
+
for ep in eh.select("a.eps-item"):
|
|
108
|
+
ep_id = ep.attrs.get("data-id")
|
|
109
|
+
ep_title = ep.attrs.get("title", "")
|
|
110
|
+
_, e_val = eh.extract_season_episode(ep_title)
|
|
111
|
+
|
|
112
|
+
episodes.append(Episode(
|
|
113
|
+
season = s_val or 1,
|
|
114
|
+
episode = e_val or 1,
|
|
115
|
+
title = ep_title,
|
|
116
|
+
url = f"servers/{ep_id}"
|
|
117
|
+
))
|
|
118
|
+
|
|
119
|
+
return SeriesInfo(**common_info, episodes=episodes)
|
|
120
|
+
|
|
121
|
+
async def load_links(self, url: str) -> list[ExtractResult]:
|
|
122
|
+
# url in load_links might be the full page URL for movies or "servers/epId" for episodes
|
|
123
|
+
if "servers/" in url:
|
|
124
|
+
data = url.split("/")[-1]
|
|
125
|
+
servers_url = f"servers/{data}"
|
|
126
|
+
elif "list/" in url:
|
|
127
|
+
data = url.split("/")[-1]
|
|
128
|
+
servers_url = f"list/{data}"
|
|
129
|
+
else:
|
|
130
|
+
# Re-fetch page to get contentId only if we don't have list/ or servers/
|
|
131
|
+
istek = await self.httpx.get(url)
|
|
132
|
+
secici = HTMLHelper(istek.text)
|
|
133
|
+
content_id = secici.select_attr("div.detail_page-watch", "data-id")
|
|
134
|
+
if not content_id:
|
|
135
|
+
return []
|
|
136
|
+
servers_url = f"list/{content_id}"
|
|
137
|
+
|
|
138
|
+
servers_resp = await self.httpx.get(f"{self.main_url}/ajax/episode/{servers_url}")
|
|
139
|
+
sh = HTMLHelper(servers_resp.text)
|
|
140
|
+
servers = sh.select("a.link-item")
|
|
141
|
+
|
|
142
|
+
results = []
|
|
143
|
+
for server in servers:
|
|
144
|
+
server_name = server.text(strip=True)
|
|
145
|
+
link_id = server.attrs.get("data-linkid") or server.attrs.get("data-id")
|
|
146
|
+
source_resp = await self.httpx.get(f"{self.main_url}/ajax/episode/sources/{link_id}")
|
|
147
|
+
source_data = source_resp.json()
|
|
148
|
+
video_url = source_data.get("link")
|
|
149
|
+
|
|
150
|
+
if video_url:
|
|
151
|
+
extract_result = await self.extract(video_url, name_override=server_name)
|
|
152
|
+
if extract_result:
|
|
153
|
+
results.extend(extract_result if isinstance(extract_result, list) else [extract_result])
|
|
154
|
+
else:
|
|
155
|
+
results.append(ExtractResult(
|
|
156
|
+
url = video_url,
|
|
157
|
+
name = f"{self.name} | {server_name}"
|
|
158
|
+
))
|
|
159
|
+
|
|
160
|
+
return results
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
3
|
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, MovieInfo, Episode, ExtractResult, HTMLHelper
|
|
4
|
-
import
|
|
4
|
+
import asyncio, time, json
|
|
5
5
|
|
|
6
6
|
class YabanciDizi(PluginBase):
|
|
7
7
|
name = "YabanciDizi"
|
|
@@ -19,52 +19,50 @@ class YabanciDizi(PluginBase):
|
|
|
19
19
|
}
|
|
20
20
|
|
|
21
21
|
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
istek = await self.httpx.get(
|
|
23
|
+
url = url if page == 1 else f"{url}/{page}",
|
|
24
|
+
headers = {"Referer": f"{self.main_url}/"}
|
|
25
|
+
)
|
|
26
|
+
secici = HTMLHelper(istek.text)
|
|
26
27
|
|
|
27
28
|
results = []
|
|
28
|
-
for item in
|
|
29
|
-
title =
|
|
30
|
-
href =
|
|
31
|
-
poster =
|
|
32
|
-
score = sel.select_text("span.rating", item)
|
|
29
|
+
for item in secici.select("li.mb-lg, li.segment-poster"):
|
|
30
|
+
title = secici.select_text("h2", item)
|
|
31
|
+
href = secici.select_attr("a", "href", item)
|
|
32
|
+
poster = secici.select_attr("img", "src", item)
|
|
33
33
|
|
|
34
34
|
if title and href:
|
|
35
35
|
results.append(MainPageResult(
|
|
36
36
|
category = category,
|
|
37
37
|
title = title,
|
|
38
38
|
url = self.fix_url(href),
|
|
39
|
-
poster = self.fix_url(poster)
|
|
39
|
+
poster = self.fix_url(poster),
|
|
40
40
|
))
|
|
41
41
|
|
|
42
42
|
return results
|
|
43
43
|
|
|
44
44
|
async def search(self, query: str) -> list[SearchResult]:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
resp = await self.httpx.post(search_url, headers=headers)
|
|
45
|
+
istek = await self.httpx.post(
|
|
46
|
+
url = f"{self.main_url}/search?qr={query}",
|
|
47
|
+
headers = {
|
|
48
|
+
"X-Requested-With" : "XMLHttpRequest",
|
|
49
|
+
"Referer" : f"{self.main_url}/"
|
|
50
|
+
}
|
|
51
|
+
)
|
|
53
52
|
|
|
54
53
|
try:
|
|
55
|
-
raw =
|
|
56
|
-
# Kotlin mapping: JsonResponse -> Data -> ResultItem
|
|
54
|
+
raw = istek.json()
|
|
57
55
|
res_array = raw.get("data", {}).get("result", [])
|
|
58
|
-
|
|
56
|
+
|
|
59
57
|
results = []
|
|
60
58
|
for item in res_array:
|
|
61
59
|
title = item.get("s_name")
|
|
62
60
|
image = item.get("s_image")
|
|
63
61
|
slug = item.get("s_link")
|
|
64
62
|
s_type = item.get("s_type") # 0: dizi, 1: film
|
|
65
|
-
|
|
63
|
+
|
|
66
64
|
poster = f"{self.main_url}/uploads/series/{image}" if image else None
|
|
67
|
-
|
|
65
|
+
|
|
68
66
|
if s_type == "1":
|
|
69
67
|
href = f"{self.main_url}/film/{slug}"
|
|
70
68
|
else:
|
|
@@ -74,212 +72,179 @@ class YabanciDizi(PluginBase):
|
|
|
74
72
|
results.append(SearchResult(
|
|
75
73
|
title = title,
|
|
76
74
|
url = self.fix_url(href),
|
|
77
|
-
poster = self.fix_url(poster)
|
|
75
|
+
poster = self.fix_url(poster)
|
|
78
76
|
))
|
|
79
77
|
return results
|
|
80
78
|
except Exception:
|
|
81
79
|
return []
|
|
82
80
|
|
|
83
81
|
async def load_item(self, url: str) -> SeriesInfo | MovieInfo:
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
tags
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
if "T\u00fcr\u00fc:" in text:
|
|
108
|
-
tags = [t.strip() for t in text.replace("T\u00fcr\u00fc:", "").split(",")]
|
|
109
|
-
elif "IMDb Puan\u0131" in text:
|
|
110
|
-
rating = text.replace("IMDb Puan\u0131", "").strip()
|
|
111
|
-
elif "Yap\u0131m Y\u0131l\u0131" in text:
|
|
112
|
-
year_match = sel.regex_first(r"(\d{4})", text)
|
|
113
|
-
if year_match:
|
|
114
|
-
year = year_match
|
|
115
|
-
elif "Takip\u00e7iler" in text:
|
|
116
|
-
continue
|
|
117
|
-
elif "S\u00fcre" in text:
|
|
118
|
-
dur_match = sel.regex_first(r"(\d+)", text)
|
|
119
|
-
if dur_match:
|
|
120
|
-
duration = dur_match
|
|
121
|
-
elif "Oyuncular:" in text:
|
|
122
|
-
actors = [a.text(strip=True) for a in sel.select("a", item)]
|
|
123
|
-
|
|
124
|
-
if not actors:
|
|
125
|
-
actors = [a.text(strip=True) for a in sel.select("div#common-cast-list div.item h5")]
|
|
126
|
-
|
|
127
|
-
trailer_match = sel.regex_first(r"embed\/(.*)\?rel", resp.text)
|
|
128
|
-
trailer = f"https://www.youtube.com/embed/{trailer_match}" if trailer_match else None
|
|
82
|
+
istek = await self.httpx.get(url, follow_redirects=True)
|
|
83
|
+
secici = HTMLHelper(istek.text)
|
|
84
|
+
|
|
85
|
+
title = (secici.select_attr("meta[property='og:title']", "content") or "").split("|")[0].strip() or secici.select_text("h1")
|
|
86
|
+
poster = secici.select_poster("meta[property='og:image']")
|
|
87
|
+
description = secici.select_text("p#tv-series-desc")
|
|
88
|
+
year = secici.extract_year("td div.truncate")
|
|
89
|
+
tags = secici.meta_list("Türü", container_selector="div.item")
|
|
90
|
+
rating = secici.meta_value("IMDb Puanı", container_selector="div.item")
|
|
91
|
+
duration = int(secici.regex_first(r"(\d+)", secici.meta_value("Süre", container_selector="div.item")) or 0)
|
|
92
|
+
actors = secici.meta_list("Oyuncular", container_selector="div.item") or secici.select_texts("div#common-cast-list div.item h5")
|
|
93
|
+
|
|
94
|
+
common_info = {
|
|
95
|
+
"url" : url,
|
|
96
|
+
"poster" : self.fix_url(poster),
|
|
97
|
+
"title" : title,
|
|
98
|
+
"description" : description,
|
|
99
|
+
"tags" : tags,
|
|
100
|
+
"rating" : rating,
|
|
101
|
+
"year" : year,
|
|
102
|
+
"actors" : actors,
|
|
103
|
+
"duration" : duration
|
|
104
|
+
}
|
|
129
105
|
|
|
130
106
|
if "/film/" in url:
|
|
131
|
-
return MovieInfo(
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
duration = int(duration) if duration and duration.isdigit() else None
|
|
141
|
-
)
|
|
142
|
-
else:
|
|
143
|
-
episodes = []
|
|
144
|
-
for bolum_item in sel.select("div.episodes-list div.ui td:has(h6)"):
|
|
145
|
-
link_el = sel.select_first("a", bolum_item)
|
|
146
|
-
if not link_el: continue
|
|
147
|
-
|
|
148
|
-
bolum_href = link_el.attrs.get("href")
|
|
149
|
-
bolum_name = sel.select_text("h6", bolum_item) or link_el.text(strip=True)
|
|
150
|
-
|
|
151
|
-
season = sel.regex_first(r"sezon-(\d+)", bolum_href)
|
|
152
|
-
episode = sel.regex_first(r"bolum-(\d+)", bolum_href)
|
|
153
|
-
|
|
154
|
-
ep_season = int(season) if season and season.isdigit() else None
|
|
155
|
-
ep_episode = int(episode) if episode and episode.isdigit() else None
|
|
156
|
-
|
|
107
|
+
return MovieInfo(**common_info)
|
|
108
|
+
|
|
109
|
+
episodes = []
|
|
110
|
+
for bolum in secici.select("div.episodes-list div.ui td:has(h6)"):
|
|
111
|
+
link = secici.select_first("a", bolum)
|
|
112
|
+
if link:
|
|
113
|
+
href = link.attrs.get("href")
|
|
114
|
+
name = secici.select_text("h6", bolum) or link.text(strip=True)
|
|
115
|
+
s, e = secici.extract_season_episode(href)
|
|
157
116
|
episodes.append(Episode(
|
|
158
|
-
season =
|
|
159
|
-
episode =
|
|
160
|
-
title =
|
|
161
|
-
url = self.fix_url(
|
|
117
|
+
season = s or 1,
|
|
118
|
+
episode = e or 1,
|
|
119
|
+
title = name,
|
|
120
|
+
url = self.fix_url(href)
|
|
162
121
|
))
|
|
163
122
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
title = title,
|
|
169
|
-
url = url,
|
|
170
|
-
poster = self.fix_url(poster) if poster else None,
|
|
171
|
-
description = description,
|
|
172
|
-
rating = rating,
|
|
173
|
-
tags = tags,
|
|
174
|
-
actors = actors,
|
|
175
|
-
year = year,
|
|
176
|
-
episodes = episodes
|
|
177
|
-
)
|
|
123
|
+
if episodes and (episodes[0].episode or 0) > (episodes[-1].episode or 0):
|
|
124
|
+
episodes.reverse()
|
|
125
|
+
|
|
126
|
+
return SeriesInfo(**common_info, episodes=episodes)
|
|
178
127
|
|
|
179
128
|
async def load_links(self, url: str) -> list[ExtractResult]:
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
129
|
+
loop = asyncio.get_event_loop()
|
|
130
|
+
|
|
131
|
+
# 1. Ana sayfayı çek
|
|
132
|
+
istek = await loop.run_in_executor(None, lambda: self.cloudscraper.get(url, headers={"Referer": f"{self.main_url}/"}))
|
|
133
|
+
secici = HTMLHelper(istek.text)
|
|
134
|
+
|
|
184
135
|
results = []
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
136
|
+
timestamp_ms = int(time.time() * 1000) - 50000
|
|
137
|
+
|
|
138
|
+
# 2. Dil Tablarını Bul
|
|
139
|
+
tabs = secici.select("div#series-tabs a")
|
|
140
|
+
|
|
141
|
+
async def process_tab(tab_el):
|
|
142
|
+
data_eid = tab_el.attrs.get("data-eid")
|
|
143
|
+
data_type = tab_el.attrs.get("data-type") # 1: Altyazı, 2: Dublaj
|
|
144
|
+
if not data_eid or not data_type:
|
|
145
|
+
return []
|
|
146
|
+
|
|
147
|
+
dil_adi = "Dublaj" if data_type == "2" else "Altyazı"
|
|
148
|
+
|
|
194
149
|
try:
|
|
195
|
-
post_resp = self.cloudscraper.post(
|
|
196
|
-
f"{self.main_url}/ajax/service",
|
|
197
|
-
data = {
|
|
198
|
-
"link" : data_link,
|
|
199
|
-
"hash" : data_hash,
|
|
200
|
-
"querytype" : q_type,
|
|
201
|
-
"type" : "videoGet"
|
|
202
|
-
},
|
|
150
|
+
post_resp = await loop.run_in_executor(None, lambda: self.cloudscraper.post(
|
|
151
|
+
url = f"{self.main_url}/ajax/service",
|
|
203
152
|
headers = {
|
|
204
153
|
"X-Requested-With" : "XMLHttpRequest",
|
|
205
|
-
"Referer" :
|
|
154
|
+
"Referer" : url
|
|
206
155
|
},
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
api_iframe = service_data.get("api_iframe")
|
|
212
|
-
if api_iframe:
|
|
213
|
-
extract_res = await self._fetch_and_extract(api_iframe, prefix="Alt")
|
|
214
|
-
if extract_res:
|
|
215
|
-
results.extend(extract_res if isinstance(extract_res, list) else [extract_res])
|
|
216
|
-
except Exception:
|
|
217
|
-
continue
|
|
218
|
-
|
|
219
|
-
# Method 2: pointing[data-eid]
|
|
220
|
-
for id_el in sel.select("a.ui.pointing[data-eid]"):
|
|
221
|
-
dil = id_el.text(strip=True)
|
|
222
|
-
v_lang = "tr" if "Dublaj" in dil else "en"
|
|
223
|
-
data_eid = id_el.attrs.get("data-eid")
|
|
224
|
-
|
|
225
|
-
try:
|
|
226
|
-
post_resp = self.cloudscraper.post(
|
|
227
|
-
f"{self.main_url}/ajax/service",
|
|
228
|
-
data = {
|
|
229
|
-
"e_id" : data_eid,
|
|
230
|
-
"v_lang" : v_lang,
|
|
231
|
-
"type" : "get_whatwehave"
|
|
232
|
-
},
|
|
233
|
-
headers = {
|
|
234
|
-
"X-Requested-With" : "XMLHttpRequest",
|
|
235
|
-
"Referer" : f"{self.main_url}/"
|
|
156
|
+
data = {
|
|
157
|
+
"lang" : data_type,
|
|
158
|
+
"episode" : data_eid,
|
|
159
|
+
"type" : "langTab"
|
|
236
160
|
},
|
|
237
|
-
cookies = {"udys":
|
|
238
|
-
)
|
|
239
|
-
|
|
240
|
-
service_data = post_resp.json()
|
|
241
|
-
api_iframe = service_data.get("api_iframe")
|
|
242
|
-
if api_iframe:
|
|
243
|
-
extract_res = await self._fetch_and_extract(api_iframe, prefix=dil)
|
|
244
|
-
if extract_res:
|
|
245
|
-
results.extend(extract_res if isinstance(extract_res, list) else [extract_res])
|
|
246
|
-
except Exception:
|
|
247
|
-
continue
|
|
161
|
+
cookies = {"udys": str(timestamp_ms)}
|
|
162
|
+
))
|
|
248
163
|
|
|
249
|
-
|
|
164
|
+
res_json = post_resp.json()
|
|
165
|
+
if not res_json.get("data"): return []
|
|
166
|
+
|
|
167
|
+
res_sel = HTMLHelper(res_json["data"])
|
|
168
|
+
sources = []
|
|
169
|
+
|
|
170
|
+
for item in res_sel.select("div.item"):
|
|
171
|
+
name = item.text(strip=True)
|
|
172
|
+
data_link = item.attrs.get("data-link")
|
|
173
|
+
if not data_link: continue
|
|
174
|
+
|
|
175
|
+
# Link normalizasyonu
|
|
176
|
+
safe_link = data_link.replace("/", "_").replace("+", "-")
|
|
177
|
+
|
|
178
|
+
# API Endpoint belirleme
|
|
179
|
+
api_path = None
|
|
180
|
+
if "VidMoly" in name:
|
|
181
|
+
api_path = "moly"
|
|
182
|
+
elif "Okru" in name:
|
|
183
|
+
api_path = "ruplay"
|
|
184
|
+
elif "Mac" in name:
|
|
185
|
+
api_path = "drive"
|
|
186
|
+
|
|
187
|
+
if api_path:
|
|
188
|
+
sources.append({
|
|
189
|
+
"name" : name,
|
|
190
|
+
"api_url" : f"{self.main_url}/api/{api_path}/{safe_link}",
|
|
191
|
+
"dil" : dil_adi
|
|
192
|
+
})
|
|
193
|
+
|
|
194
|
+
tab_results = []
|
|
195
|
+
for src in sources:
|
|
196
|
+
try:
|
|
197
|
+
# API sayfasını çekip içindeki iframe'i bulalım
|
|
198
|
+
api_resp = await loop.run_in_executor(None, lambda: self.cloudscraper.get(
|
|
199
|
+
src["api_url"],
|
|
200
|
+
headers={"Referer": f"{self.main_url}/"},
|
|
201
|
+
cookies={"udys": str(timestamp_ms)}
|
|
202
|
+
))
|
|
203
|
+
|
|
204
|
+
api_sel = HTMLHelper(api_resp.text)
|
|
205
|
+
iframe = api_sel.select_attr("iframe", "src")
|
|
206
|
+
|
|
207
|
+
if not iframe and "drive" in src["api_url"]:
|
|
208
|
+
t_sec = int(time.time())
|
|
209
|
+
drives_url = f"{src['api_url'].replace('/api/drive/', '/api/drives/')}?t={t_sec}"
|
|
210
|
+
api_resp = await loop.run_in_executor(None, lambda: self.cloudscraper.get(
|
|
211
|
+
drives_url,
|
|
212
|
+
headers={"Referer": src["api_url"]},
|
|
213
|
+
cookies={"udys": str(timestamp_ms)}
|
|
214
|
+
))
|
|
215
|
+
api_sel = HTMLHelper(api_resp.text)
|
|
216
|
+
iframe = api_sel.select_attr("iframe", "src")
|
|
217
|
+
|
|
218
|
+
if iframe:
|
|
219
|
+
prefix = f"{src['dil']} | {src['name']}"
|
|
220
|
+
extracted = await self.extract(self.fix_url(iframe), prefix=prefix)
|
|
221
|
+
if extracted:
|
|
222
|
+
tab_results.extend(extracted if isinstance(extracted, list) else [extracted])
|
|
223
|
+
except Exception:
|
|
224
|
+
continue
|
|
225
|
+
return tab_results
|
|
250
226
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
# Initial fetch
|
|
254
|
-
resp = self.cloudscraper.get(
|
|
255
|
-
iframe_url,
|
|
256
|
-
headers = {"Referer": f"{self.main_url}/"},
|
|
257
|
-
cookies = {"udys": "1760709729873", "level": "1"}
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
# Handle "Lütfen bekleyiniz" check from Kotlin
|
|
261
|
-
if "Lütfen bekleyiniz" in resp.text:
|
|
262
|
-
import time as time_module
|
|
263
|
-
time_module.sleep(1)
|
|
264
|
-
timestamp = int(time_module.time())
|
|
265
|
-
# Retry with t=timestamp as in Kotlin
|
|
266
|
-
sep = "&" if "?" in iframe_url else "?"
|
|
267
|
-
resp = self.cloudscraper.get(
|
|
268
|
-
f"{iframe_url}{sep}t={timestamp}",
|
|
269
|
-
headers = {"Referer": f"{self.main_url}/"},
|
|
270
|
-
cookies = resp.cookies # Use cookies from first response
|
|
271
|
-
)
|
|
272
|
-
|
|
273
|
-
sel = HTMLHelper(resp.text)
|
|
274
|
-
final_iframe = sel.select_attr("iframe", "src")
|
|
275
|
-
|
|
276
|
-
return final_iframe
|
|
227
|
+
except Exception:
|
|
228
|
+
return []
|
|
277
229
|
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
230
|
+
if tabs:
|
|
231
|
+
results_groups = await asyncio.gather(*(process_tab(tab) for tab in tabs))
|
|
232
|
+
for group in results_groups:
|
|
233
|
+
results.extend(group)
|
|
234
|
+
else:
|
|
235
|
+
# Tab yoksa mevcut sayfada iframe ara
|
|
236
|
+
iframe = secici.select_attr("iframe", "src")
|
|
237
|
+
if iframe:
|
|
238
|
+
extracted = await self.extract(self.fix_url(iframe), name_override="Main")
|
|
239
|
+
if extracted:
|
|
240
|
+
results.extend(extracted if isinstance(extracted, list) else [extracted])
|
|
241
|
+
|
|
242
|
+
# Duplicate kontrolü
|
|
243
|
+
unique_results = []
|
|
244
|
+
seen = set()
|
|
245
|
+
for res in results:
|
|
246
|
+
if res.url and res.url not in seen:
|
|
247
|
+
unique_results.append(res)
|
|
248
|
+
seen.add(res.url)
|
|
249
|
+
|
|
250
|
+
return unique_results
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: KekikStream
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.5.4
|
|
4
4
|
Summary: terminal üzerinden medya içeriği aramanızı ve VLC/MPV gibi popüler medya oynatıcılar aracılığıyla doğrudan izlemenizi sağlayan modüler ve genişletilebilir bir bıdı bıdı
|
|
5
5
|
Home-page: https://github.com/keyiflerolsun/KekikStream
|
|
6
6
|
Author: keyiflerolsun
|