KekikStream 2.3.8__py3-none-any.whl → 2.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of KekikStream might be problematic. Click here for more details.
- KekikStream/Core/HTMLHelper.py +2 -2
- KekikStream/Core/Plugin/PluginModels.py +0 -3
- KekikStream/Extractors/HDMomPlayer.py +62 -0
- KekikStream/Extractors/MolyStream.py +8 -49
- KekikStream/Extractors/Videostr.py +115 -0
- KekikStream/Plugins/DiziBox.py +15 -25
- KekikStream/Plugins/DiziMom.py +248 -0
- KekikStream/Plugins/FilmEkseni.py +140 -0
- KekikStream/Plugins/Filmatek.py +188 -0
- KekikStream/Plugins/Full4kizle.py +190 -0
- KekikStream/Plugins/RecTV.py +32 -19
- KekikStream/Plugins/Watch32.py +185 -0
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/METADATA +3 -3
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/RECORD +18 -12
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/WHEEL +1 -1
- KekikStream/Plugins/DiziWatch.py +0 -212
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/entry_points.txt +0 -0
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/licenses/LICENSE +0 -0
- {kekikstream-2.3.8.dist-info → kekikstream-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
|
+
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, HTMLHelper
|
|
4
|
+
from json import dumps, loads
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
class Watch32(PluginBase):
|
|
8
|
+
name = "Watch32"
|
|
9
|
+
language = "en"
|
|
10
|
+
main_url = "https://watch32.sx"
|
|
11
|
+
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
|
|
12
|
+
description = "Watch Your Favorite Movies & TV Shows Online - Streaming For Free. With Movies & TV Shows Full HD. Find Your Movies & Watch NOW!"
|
|
13
|
+
|
|
14
|
+
main_page = {
|
|
15
|
+
f"{main_url}/movie?page=" : "Popular Movies",
|
|
16
|
+
f"{main_url}/tv-show?page=" : "Popular TV Shows",
|
|
17
|
+
f"{main_url}/coming-soon?page=" : "Coming Soon",
|
|
18
|
+
f"{main_url}/top-imdb?page=" : "Top IMDB Rating",
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
22
|
+
istek = await self.httpx.get(f"{url}{page}")
|
|
23
|
+
helper = HTMLHelper(istek.text)
|
|
24
|
+
items = helper.select("div.flw-item")
|
|
25
|
+
|
|
26
|
+
return [
|
|
27
|
+
MainPageResult(
|
|
28
|
+
category = category,
|
|
29
|
+
title = helper.select_attr("h2.film-name a", "title", veri),
|
|
30
|
+
url = self.fix_url(helper.select_attr("h2.film-name a", "href", veri)),
|
|
31
|
+
poster = helper.select_attr("img.film-poster-img", "data-src", veri)
|
|
32
|
+
)
|
|
33
|
+
for veri in items
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
async def search(self, query: str) -> list[SearchResult]:
|
|
37
|
+
slug = query.replace(" ", "-")
|
|
38
|
+
url = f"{self.main_url}/search/{slug}"
|
|
39
|
+
|
|
40
|
+
istek = await self.httpx.get(url)
|
|
41
|
+
helper = HTMLHelper(istek.text)
|
|
42
|
+
items = helper.select("div.flw-item")
|
|
43
|
+
|
|
44
|
+
return [
|
|
45
|
+
SearchResult(
|
|
46
|
+
title = helper.select_attr("h2.film-name a", "title", veri),
|
|
47
|
+
url = self.fix_url(helper.select_attr("h2.film-name a", "href", veri)),
|
|
48
|
+
poster = helper.select_attr("img.film-poster-img", "data-src", veri)
|
|
49
|
+
)
|
|
50
|
+
for veri in items
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
|
|
54
|
+
istek = await self.httpx.get(url)
|
|
55
|
+
helper = HTMLHelper(istek.text)
|
|
56
|
+
|
|
57
|
+
content_id = helper.select_attr("div.detail_page-watch", "data-id")
|
|
58
|
+
details = helper.select_first("div.detail_page-infor")
|
|
59
|
+
name = helper.select_text("h2.heading-name > a", details)
|
|
60
|
+
|
|
61
|
+
poster = helper.select_attr("div.film-poster > img", "src", details)
|
|
62
|
+
description = helper.select_text("div.description", details)
|
|
63
|
+
|
|
64
|
+
# Release year extraction
|
|
65
|
+
year_text = helper.regex_first(r"Released:\s*(\d{4})")
|
|
66
|
+
if not year_text:
|
|
67
|
+
# Fallback for series
|
|
68
|
+
year_text = helper.regex_first(r"Released:.+?(\d{4})")
|
|
69
|
+
|
|
70
|
+
# Tags/Genres
|
|
71
|
+
tags = helper.select_all_text("div.row-line:has(> span.type > strong:contains(Genre)) a")
|
|
72
|
+
|
|
73
|
+
# Rating
|
|
74
|
+
rating = helper.select_text("button.btn-imdb")
|
|
75
|
+
if rating:
|
|
76
|
+
rating = rating.replace("N/A", "").split(":")[-1].strip()
|
|
77
|
+
|
|
78
|
+
# Actors
|
|
79
|
+
actors = helper.select_all_text("div.row-line:has(> span.type > strong:contains(Casts)) a")
|
|
80
|
+
|
|
81
|
+
if "movie" in url:
|
|
82
|
+
return MovieInfo(
|
|
83
|
+
url = url,
|
|
84
|
+
poster = self.fix_url(poster),
|
|
85
|
+
title = name,
|
|
86
|
+
description = description,
|
|
87
|
+
tags = tags,
|
|
88
|
+
rating = rating,
|
|
89
|
+
year = year_text,
|
|
90
|
+
actors = actors
|
|
91
|
+
)
|
|
92
|
+
else:
|
|
93
|
+
episodes = []
|
|
94
|
+
seasons_resp = await self.httpx.get(f"{self.main_url}/ajax/season/list/{content_id}")
|
|
95
|
+
sh = HTMLHelper(seasons_resp.text)
|
|
96
|
+
seasons = sh.select("a.dropdown-item") # Relaxed selector from a.ss-item
|
|
97
|
+
|
|
98
|
+
for season in seasons:
|
|
99
|
+
season_id = season.attrs.get("data-id")
|
|
100
|
+
season_num_text = season.text().replace("Season ", "").replace("Series", "").strip()
|
|
101
|
+
season_num = int(season_num_text) if season_num_text.isdigit() else 1
|
|
102
|
+
|
|
103
|
+
episodes_resp = await self.httpx.get(f"{self.main_url}/ajax/season/episodes/{season_id}")
|
|
104
|
+
eh = HTMLHelper(episodes_resp.text)
|
|
105
|
+
eps = eh.select("a.eps-item")
|
|
106
|
+
|
|
107
|
+
for ep in eps:
|
|
108
|
+
ep_id = ep.attrs.get("data-id")
|
|
109
|
+
ep_title_raw = ep.attrs.get("title", "")
|
|
110
|
+
# Eps 1: Name
|
|
111
|
+
m = re.search(r"Eps (\d+): (.+)", ep_title_raw)
|
|
112
|
+
if m:
|
|
113
|
+
ep_num = int(m.group(1))
|
|
114
|
+
ep_name = m.group(2)
|
|
115
|
+
else:
|
|
116
|
+
ep_num = 1
|
|
117
|
+
ep_name = ep_title_raw
|
|
118
|
+
|
|
119
|
+
episodes.append(Episode(
|
|
120
|
+
season = season_num,
|
|
121
|
+
episode = ep_num,
|
|
122
|
+
title = ep_name,
|
|
123
|
+
url = f"servers/{ep_id}"
|
|
124
|
+
))
|
|
125
|
+
|
|
126
|
+
return SeriesInfo(
|
|
127
|
+
url = url,
|
|
128
|
+
poster = self.fix_url(poster),
|
|
129
|
+
title = name,
|
|
130
|
+
description = description,
|
|
131
|
+
tags = tags,
|
|
132
|
+
rating = rating,
|
|
133
|
+
year = year_text,
|
|
134
|
+
actors = actors,
|
|
135
|
+
episodes = episodes
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
async def load_links(self, url: str) -> list[ExtractResult]:
|
|
139
|
+
# url in load_links might be the full page URL for movies or "servers/epId" for episodes
|
|
140
|
+
if "servers/" in url:
|
|
141
|
+
data = url.split("/")[-1]
|
|
142
|
+
if "servers/" in url:
|
|
143
|
+
data = url.split("/")[-1]
|
|
144
|
+
servers_url = f"servers/{data}"
|
|
145
|
+
elif "list/" in url:
|
|
146
|
+
data = url.split("/")[-1]
|
|
147
|
+
servers_url = f"list/{data}"
|
|
148
|
+
else:
|
|
149
|
+
# Re-fetch page to get contentId only if we don't have list/ or servers/
|
|
150
|
+
istek = await self.httpx.get(url)
|
|
151
|
+
helper = HTMLHelper(istek.text)
|
|
152
|
+
content_id = helper.select_attr("div.detail_page-watch", "data-id")
|
|
153
|
+
if not content_id:
|
|
154
|
+
# Try to get id from url if direct parse fails, similar to Kotlin logic
|
|
155
|
+
# But Kotlin parses search first. Here we assume we are at the page.
|
|
156
|
+
# If no content_id found, maybe it's not a valid page or structure changed.
|
|
157
|
+
return []
|
|
158
|
+
servers_url = f"list/{content_id}"
|
|
159
|
+
|
|
160
|
+
servers_resp = await self.httpx.get(f"{self.main_url}/ajax/episode/{servers_url}")
|
|
161
|
+
sh = HTMLHelper(servers_resp.text)
|
|
162
|
+
servers = sh.select("a.link-item")
|
|
163
|
+
|
|
164
|
+
results = []
|
|
165
|
+
for server in servers:
|
|
166
|
+
link_id = server.attrs.get("data-linkid") or server.attrs.get("data-id")
|
|
167
|
+
source_resp = await self.httpx.get(f"{self.main_url}/ajax/episode/sources/{link_id}")
|
|
168
|
+
source_data = source_resp.json()
|
|
169
|
+
video_url = source_data.get("link")
|
|
170
|
+
|
|
171
|
+
if video_url:
|
|
172
|
+
# Use extractors if possible
|
|
173
|
+
extract_result = await self.extract(video_url)
|
|
174
|
+
if extract_result:
|
|
175
|
+
if isinstance(extract_result, list):
|
|
176
|
+
results.extend(extract_result)
|
|
177
|
+
else:
|
|
178
|
+
results.append(extract_result)
|
|
179
|
+
else:
|
|
180
|
+
results.append(ExtractResult(
|
|
181
|
+
url = video_url,
|
|
182
|
+
name = f"{self.name} | {server.text()}"
|
|
183
|
+
))
|
|
184
|
+
|
|
185
|
+
return results
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: KekikStream
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.4.0
|
|
4
4
|
Summary: terminal üzerinden medya içeriği aramanızı ve VLC/MPV gibi popüler medya oynatıcılar aracılığıyla doğrudan izlemenizi sağlayan modüler ve genişletilebilir bir bıdı bıdı
|
|
5
5
|
Home-page: https://github.com/keyiflerolsun/KekikStream
|
|
6
6
|
Author: keyiflerolsun
|
|
@@ -63,13 +63,13 @@ Terminal üzerinden içerik arayın, VLC/MPV ile doğrudan izleyin veya kendi AP
|
|
|
63
63
|
|
|
64
64
|
## 🚦 Ne Sunar?
|
|
65
65
|
|
|
66
|
-
KekikStream, Türkçe medya kaynaklarını tek CLI arayüzünde toplayarak hızlı arama ve oynatma sunar. Plugin mimarisi sayesinde yeni kaynaklar eklemek ve [
|
|
66
|
+
KekikStream, Türkçe medya kaynaklarını tek CLI arayüzünde toplayarak hızlı arama ve oynatma sunar. Plugin mimarisi sayesinde yeni kaynaklar eklemek ve [WatchBuddy](https://github.com/WatchBuddy-tv/Stream) ile web/API üzerinden yayın yapmak kolaydır.
|
|
67
67
|
|
|
68
68
|
- 🎥 Çoklu kaynak desteği: Onlarca Türkçe medya sitesi
|
|
69
69
|
- 🔌 Plugin mimarisi: Yeni kaynak eklemek dakikalar sürer
|
|
70
70
|
- 🎬 Çoklu oynatıcı: VLC, MPV, MX Player
|
|
71
71
|
- 🖥️ CLI & kütüphane: Terminalde veya kod içinde kullanın
|
|
72
|
-
- 🌐 API/Web UI:
|
|
72
|
+
- 🌐 API/Web UI: WatchBuddy üzerinden uzak erişim
|
|
73
73
|
|
|
74
74
|
---
|
|
75
75
|
|
|
@@ -3,7 +3,7 @@ KekikStream/__main__.py,sha256=B81dQoeGEb-T5Sycs3eNAmW7unvx0Mef0syCjs4nPds,137
|
|
|
3
3
|
KekikStream/requirements.txt,sha256=V-Rk-4DnK8B-HRR6RtSKmTR3sHfaYgOrnBj8kmVz17w,80
|
|
4
4
|
KekikStream/CLI/__init__.py,sha256=U6oLq_O7u5y2eHhBnmfhZNns_EqHHJXJmzl8jvZFUNY,230
|
|
5
5
|
KekikStream/CLI/pypi_kontrol.py,sha256=q6fNs6EKJDc5VuUFig9DBzLzNPp_kMD1vOVgLElcii8,1487
|
|
6
|
-
KekikStream/Core/HTMLHelper.py,sha256=
|
|
6
|
+
KekikStream/Core/HTMLHelper.py,sha256=Rpj89vqL08ef_Q8bDUJuPQOBnSEQSOuFWWJJLli4oDQ,5434
|
|
7
7
|
KekikStream/Core/__init__.py,sha256=sk2pWup1_jsGB43HJzbbqgQYFDZpf2TquEBYUhqOdN4,807
|
|
8
8
|
KekikStream/Core/Extractor/ExtractorBase.py,sha256=CAdeel6zGlj_RHD0lwjyNW5hAaivo1XyAZbnmiVDaZo,2023
|
|
9
9
|
KekikStream/Core/Extractor/ExtractorLoader.py,sha256=GPGCmgFpDBywR8CsNw43-ddseZhSKTjAUETp1Ohbi6E,4796
|
|
@@ -15,7 +15,7 @@ KekikStream/Core/Media/MediaManager.py,sha256=AaUq2D7JSJIphjoAj2fjLOJjswm7Qf5hjY
|
|
|
15
15
|
KekikStream/Core/Plugin/PluginBase.py,sha256=DMuYf6fRZeRJWYh3UdGcV1FpmDjUnjO1pgIBMRqHjiY,6638
|
|
16
16
|
KekikStream/Core/Plugin/PluginLoader.py,sha256=6LE5id0571bB-gJZxaLfd973XcG6oaGeMhLVcYYY7kw,3768
|
|
17
17
|
KekikStream/Core/Plugin/PluginManager.py,sha256=6a0Q2mHtzIpx1ttdSTsVHg2HfLJIO0r_iHjK3Kui1Rw,939
|
|
18
|
-
KekikStream/Core/Plugin/PluginModels.py,sha256=
|
|
18
|
+
KekikStream/Core/Plugin/PluginModels.py,sha256=7g1uHjJstfnrdTabDgyrBnu1ojIQ025hsmw85cDXFS8,2353
|
|
19
19
|
KekikStream/Core/UI/UIManager.py,sha256=T4V_kdTTWa-UDamgLSKa__dWJuzcvRK9NuwBlzU9Bzc,1693
|
|
20
20
|
KekikStream/Extractors/CloseLoad.py,sha256=qRsiW5SloxWgm6MNUd4DF4vC7aSeyJqD3_0vZoFp7Jc,3176
|
|
21
21
|
KekikStream/Extractors/ContentX.py,sha256=6-pzHBGrwJeGzeMaPZ5s82RCQZL9MEhHDyI3c4L-xMM,3975
|
|
@@ -23,13 +23,14 @@ KekikStream/Extractors/DonilasPlay.py,sha256=-Bhfpp0AN_wNYAnsaWdL--wo8DD2VPblTAl
|
|
|
23
23
|
KekikStream/Extractors/DzenRu.py,sha256=WIUZUIixP4X6TweJHpY86fenRY150ucH2VNImvdxcRc,1213
|
|
24
24
|
KekikStream/Extractors/ExPlay.py,sha256=G2ZmXGcsjpZ5ihtL0ZYkyVO8nPuzSC_8AR0zvED6ScQ,1746
|
|
25
25
|
KekikStream/Extractors/Filemoon.py,sha256=Dls1Y0HhYX4j5zJm9RP-9XFq1pzJ4eadL5Cp0uES_qo,3365
|
|
26
|
+
KekikStream/Extractors/HDMomPlayer.py,sha256=5uP3L5iZ4jIf1I9QcT_cfTGs7qsHIMARDvkUPyc8uEk,2190
|
|
26
27
|
KekikStream/Extractors/HDPlayerSystem.py,sha256=EgnFzx5Q4PkuwAtuff5SYU9k59B-CyOdySl7lbCZ9hM,1312
|
|
27
28
|
KekikStream/Extractors/JFVid.py,sha256=_6A0zmYrWZxIfkCCKAaNxMRLjU-_0Z0hCxCNSApcknk,1350
|
|
28
29
|
KekikStream/Extractors/JetTv.py,sha256=2X1vYDQ0hxBTcpnE_XTcbw9tMS1aXFURcobnPdN8Zxg,1596
|
|
29
30
|
KekikStream/Extractors/MailRu.py,sha256=xQVCWwYqNoG5T43VAW1_m0v4e80FbO-1pNPKkwhTccU,1218
|
|
30
31
|
KekikStream/Extractors/MixPlayHD.py,sha256=u5fUePHfjOI3n7KlNsWhXIv7HA_NMj5bPw1ug-eiXLU,1557
|
|
31
32
|
KekikStream/Extractors/MixTiger.py,sha256=4VbOYgE4s5H-BGVvJI0AI57M-WBWqnek_LGfCFHAucw,2116
|
|
32
|
-
KekikStream/Extractors/MolyStream.py,sha256=
|
|
33
|
+
KekikStream/Extractors/MolyStream.py,sha256=6PYwvQ-seYM_Vm42BZXICseYSvAQTaLUbwSMjp5GgFs,1450
|
|
33
34
|
KekikStream/Extractors/Odnoklassniki.py,sha256=hajKPhWKiIuu_i441TXrWVORpLo2CdTcoJiyU3WQAuI,4038
|
|
34
35
|
KekikStream/Extractors/PeaceMakerst.py,sha256=BJ5Cv5X2GEaMTwn_XFpAVVmts1h5xGno3l5rL7Ugob4,2335
|
|
35
36
|
KekikStream/Extractors/PixelDrain.py,sha256=xPud8W_hqLUXJSU5O-MiCOblcmzrlDJpnEtuxr4ZdI4,1011
|
|
@@ -49,23 +50,27 @@ KekikStream/Extractors/VidMoly.py,sha256=4k5z68MrUASUuDMEWZ_Ynvp1Z7njjRcXPBZAnpb
|
|
|
49
50
|
KekikStream/Extractors/VidMoxy.py,sha256=dM7yBfrXSESvYyqc2uP_gLSgV61gpIAY940NAQ58Mts,1843
|
|
50
51
|
KekikStream/Extractors/VidPapi.py,sha256=9y8TN-o4C3JvRyr2V8Ox908tFE1I2BItQLHZlqs8AuI,3175
|
|
51
52
|
KekikStream/Extractors/VideoSeyred.py,sha256=KJxbJkuupmn4wWBj_ejnoDvmjUXwEXkzStYha3EsSpA,1995
|
|
53
|
+
KekikStream/Extractors/Videostr.py,sha256=epoWgLta1TpewK4opDnBXHI8Nu4pDupb5ehsqCLf4h8,4523
|
|
52
54
|
KekikStream/Extractors/YTDLP.py,sha256=Hy8loCSFSquu2zaL3INord-Jm6T8CM6K2-VcDA2K79g,7390
|
|
53
55
|
KekikStream/Extractors/YildizKisaFilm.py,sha256=R_JlrOVeMiDlXYcuTdItnKvidyx8_u3B14fSrxew2aE,1316
|
|
54
56
|
KekikStream/Plugins/BelgeselX.py,sha256=smoLjEJTdptjb7h4m6LhG7ZUmJQtIhYyi0CUFBsk970,8696
|
|
55
|
-
KekikStream/Plugins/DiziBox.py,sha256=
|
|
57
|
+
KekikStream/Plugins/DiziBox.py,sha256=KZGWhs6p2-hUTsd-fjz2fsmGEkanL4At2PI8qHAoDm4,10541
|
|
58
|
+
KekikStream/Plugins/DiziMom.py,sha256=NPK42RNT5GRdVl8LxQSlFJK-vJEwwIISaOaGpcTkhkw,10359
|
|
56
59
|
KekikStream/Plugins/DiziPal.py,sha256=tHUqAN8UvvzBAkJaGS4hFvdLo-eRO4EdQ_C9HYkj_0U,10576
|
|
57
|
-
KekikStream/Plugins/DiziWatch.py,sha256=NK9xccX4-HwWq67FFVfqDDzKwCDc_HvDJDW2QIvqjig,8900
|
|
58
60
|
KekikStream/Plugins/DiziYou.py,sha256=4KOvxHg-84mUHuHWsXoYlIG2SX4DCV2dm6GblHQ5wGo,11162
|
|
59
61
|
KekikStream/Plugins/Dizilla.py,sha256=PLN0pOkWB4IaGC7Toe-8f5rksmaNm_WfdSFMTAtt--0,13624
|
|
60
62
|
KekikStream/Plugins/FilmBip.py,sha256=40eSECwMHSKTWoUmF90UXxTJkbx6f71J_98Ht4Hnoj8,6352
|
|
63
|
+
KekikStream/Plugins/FilmEkseni.py,sha256=W4XvIUVNs98-JIfnt6KgYLrcJQ3_jLk9dYYX0CrqW0A,5808
|
|
61
64
|
KekikStream/Plugins/FilmMakinesi.py,sha256=0bVN28aCEfrxrvXrGyL6XtgipzUKUD9vN2QkHie2gY0,7859
|
|
62
65
|
KekikStream/Plugins/FilmModu.py,sha256=ou1BrFNR4RQaJdxVqPB5FI8vnQ0UmD-siVdwLnpp7x0,7147
|
|
66
|
+
KekikStream/Plugins/Filmatek.py,sha256=0bMY1T0rMttwOUiI6tUp4gmzYHnK1MoiLSjHHLwp5no,7140
|
|
67
|
+
KekikStream/Plugins/Full4kizle.py,sha256=9-d22cEmCg0BuHrhFx597zHmQ4Q_zaBHtI4-he318Fw,7566
|
|
63
68
|
KekikStream/Plugins/FullHDFilm.py,sha256=08NF5qEydmxT0rGYDWpTOSIYSad8Uv1H1V8yCKG_568,10525
|
|
64
69
|
KekikStream/Plugins/FullHDFilmizlesene.py,sha256=OpdndVQ7LjZ-sJdILGEqhYX-0D18yRqTS7Kpu-HrXmY,6870
|
|
65
70
|
KekikStream/Plugins/HDFilmCehennemi.py,sha256=jntMKgE81k_jl3pFzJI3akqvi3g8U961dVx7bj5Pf2w,13140
|
|
66
71
|
KekikStream/Plugins/JetFilmizle.py,sha256=9sH9Z3y4SP8vta9v-gJOQOxFWAQnbZomFea1_G5EbmM,8100
|
|
67
72
|
KekikStream/Plugins/KultFilmler.py,sha256=rvIkd2OXRxuAXHMjiHCRmKrS5m09gy2JoMBgJh7ZIOk,9478
|
|
68
|
-
KekikStream/Plugins/RecTV.py,sha256=
|
|
73
|
+
KekikStream/Plugins/RecTV.py,sha256=a4VxEroCCH7LhTtEjrLAhrS2uLAKaX-JVBWZOK7ODi0,9102
|
|
69
74
|
KekikStream/Plugins/RoketDizi.py,sha256=KiX7Xf5IyPPJ-CVcJLM9qc0M6Fi2dhg7zU3EgWkICXA,9318
|
|
70
75
|
KekikStream/Plugins/SelcukFlix.py,sha256=nJ7I5e5vBkn9AbLC_2bSu9bSSgMQeDhCQZBZovK00bc,15299
|
|
71
76
|
KekikStream/Plugins/SetFilmIzle.py,sha256=Z8A_Ivbe65i13RocGZXwmpwrVxNOwdj7Gh3CS2-Fslg,11437
|
|
@@ -76,10 +81,11 @@ KekikStream/Plugins/SinemaCX.py,sha256=11kzAZWgjkitIonDHHiFHMgnViBj-GjyvTXg7k28M
|
|
|
76
81
|
KekikStream/Plugins/Sinezy.py,sha256=fUj-3WaJMEsKZRnDpHFPxl5Eq2RPLroY80DcftLqvjM,5743
|
|
77
82
|
KekikStream/Plugins/SuperFilmGeldi.py,sha256=StW0ue4qDj8p7CiWy19Lfr2aWtfYvslPExZJuR-3xiY,6348
|
|
78
83
|
KekikStream/Plugins/UgurFilm.py,sha256=H6AA2iTaM0fn6uN8_Dfvr-OqUtM9gDdkg0BKIcZEj7U,4930
|
|
84
|
+
KekikStream/Plugins/Watch32.py,sha256=BrtdX_HJQVgxK2SRnu03Wb9ubGF5C5W_JECnOORhIzs,7930
|
|
79
85
|
KekikStream/Plugins/YabanciDizi.py,sha256=QXzifSl2JMcVOwkwn2vafYIw1jqB5vBTrf-usvsyMBc,11947
|
|
80
|
-
kekikstream-2.
|
|
81
|
-
kekikstream-2.
|
|
82
|
-
kekikstream-2.
|
|
83
|
-
kekikstream-2.
|
|
84
|
-
kekikstream-2.
|
|
85
|
-
kekikstream-2.
|
|
86
|
+
kekikstream-2.4.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
87
|
+
kekikstream-2.4.0.dist-info/METADATA,sha256=AdLUoeRQNujGkuwuF5CU-28zxWFfNH_58MdK83CY1m4,10745
|
|
88
|
+
kekikstream-2.4.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
89
|
+
kekikstream-2.4.0.dist-info/entry_points.txt,sha256=dFwdiTx8djyehI0Gsz-rZwjAfZzUzoBSrmzRu9ubjJc,50
|
|
90
|
+
kekikstream-2.4.0.dist-info/top_level.txt,sha256=DNmGJDXl27Drdfobrak8KYLmocW_uznVYFJOzcjUgmY,12
|
|
91
|
+
kekikstream-2.4.0.dist-info/RECORD,,
|
KekikStream/Plugins/DiziWatch.py
DELETED
|
@@ -1,212 +0,0 @@
|
|
|
1
|
-
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
|
-
|
|
3
|
-
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
|
|
4
|
-
import urllib.parse
|
|
5
|
-
|
|
6
|
-
class DiziWatch(PluginBase):
|
|
7
|
-
name = "DiziWatch"
|
|
8
|
-
language = "tr"
|
|
9
|
-
main_url = "https://diziwatch.to"
|
|
10
|
-
favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
|
|
11
|
-
description = "Diziwatch; en güncel yabancı dizileri ve animeleri, Türkçe altyazılı ve dublaj seçenekleriyle izleyebileceğiniz platform."
|
|
12
|
-
|
|
13
|
-
main_page = {
|
|
14
|
-
f"{main_url}/episodes" : "Yeni Bölümler",
|
|
15
|
-
"9" : "Aksiyon",
|
|
16
|
-
"17" : "Animasyon",
|
|
17
|
-
"5" : "Bilim Kurgu",
|
|
18
|
-
"2" : "Dram",
|
|
19
|
-
"12" : "Fantastik",
|
|
20
|
-
"3" : "Gizem",
|
|
21
|
-
"4" : "Komedi",
|
|
22
|
-
"8" : "Korku",
|
|
23
|
-
"24" : "Macera",
|
|
24
|
-
"14" : "Müzik",
|
|
25
|
-
"7" : "Romantik",
|
|
26
|
-
"23" : "Spor",
|
|
27
|
-
"1" : "Suç",
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
c_key = ""
|
|
31
|
-
c_value = ""
|
|
32
|
-
|
|
33
|
-
async def _init_session(self):
|
|
34
|
-
# Fetch anime-arsivi to get CSRF tokens
|
|
35
|
-
resp = await self.httpx.get(f"{self.main_url}/anime-arsivi")
|
|
36
|
-
sel = HTMLHelper(resp.text)
|
|
37
|
-
|
|
38
|
-
# form.bg-[rgba(255,255,255,.15)] > input
|
|
39
|
-
# We can just look for the first two inputs in that specific form
|
|
40
|
-
inputs = sel.select("form.bg-\\[rgba\\(255\\,255\\,255\\,\\.15\\)\\] input")
|
|
41
|
-
if len(inputs) >= 2:
|
|
42
|
-
self.c_key = inputs[0].attrs.get("value")
|
|
43
|
-
self.c_value = inputs[1].attrs.get("value")
|
|
44
|
-
|
|
45
|
-
async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
|
|
46
|
-
await self._init_session()
|
|
47
|
-
|
|
48
|
-
if url.startswith("https://"):
|
|
49
|
-
full_url = f"{url}?page={page}"
|
|
50
|
-
resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"})
|
|
51
|
-
sel = HTMLHelper(resp.text)
|
|
52
|
-
items = sel.select("div.swiper-slide a")
|
|
53
|
-
else:
|
|
54
|
-
# Category ID based
|
|
55
|
-
full_url = f"{self.main_url}/anime-arsivi?category={url}&minImdb=&name=&release_year=&sort=date_desc&page={page}"
|
|
56
|
-
resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"})
|
|
57
|
-
sel = HTMLHelper(resp.text)
|
|
58
|
-
items = sel.select("div.content-inner a")
|
|
59
|
-
|
|
60
|
-
results = []
|
|
61
|
-
for item in items:
|
|
62
|
-
title = sel.select_text("h2", item)
|
|
63
|
-
href = item.attrs.get("href") if item.tag == "a" else sel.select_attr("a", "href", item)
|
|
64
|
-
poster = sel.select_attr("img", "src", item) or sel.select_attr("img", "data-src", item)
|
|
65
|
-
|
|
66
|
-
if title and href:
|
|
67
|
-
# If it's an episode link, clean it to get show link
|
|
68
|
-
# Regex in Kotlin: /sezon-\d+/bolum-\d+/?$
|
|
69
|
-
clean_href = HTMLHelper(href).regex_replace(r"/sezon-\d+/bolum-\d+/?$", "")
|
|
70
|
-
|
|
71
|
-
# If cleaning changed something, it was an episode link, maybe add it to title
|
|
72
|
-
if clean_href != href:
|
|
73
|
-
se_info = sel.select_text("div.flex.gap-1.items-center", item)
|
|
74
|
-
if se_info:
|
|
75
|
-
title = f"{title} - {se_info}"
|
|
76
|
-
|
|
77
|
-
results.append(MainPageResult(
|
|
78
|
-
category = category,
|
|
79
|
-
title = title,
|
|
80
|
-
url = self.fix_url(clean_href),
|
|
81
|
-
poster = self.fix_url(poster) if poster else None
|
|
82
|
-
))
|
|
83
|
-
|
|
84
|
-
return results
|
|
85
|
-
|
|
86
|
-
async def search(self, query: str) -> list[SearchResult]:
|
|
87
|
-
await self._init_session()
|
|
88
|
-
|
|
89
|
-
post_url = f"{self.main_url}/bg/searchcontent"
|
|
90
|
-
data = {
|
|
91
|
-
"cKey" : self.c_key,
|
|
92
|
-
"cValue" : self.c_value,
|
|
93
|
-
"searchterm" : query
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
headers = {
|
|
97
|
-
"X-Requested-With" : "XMLHttpRequest",
|
|
98
|
-
"Accept" : "application/json, text/javascript, */*; q=0.01",
|
|
99
|
-
"Referer" : f"{self.main_url}/"
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
resp = await self.httpx.post(post_url, data=data, headers=headers)
|
|
103
|
-
|
|
104
|
-
try:
|
|
105
|
-
raw = resp.json()
|
|
106
|
-
# Kotlin maps this to ApiResponse -> DataWrapper -> Icerikler
|
|
107
|
-
res_array = raw.get("data", {}).get("result", [])
|
|
108
|
-
|
|
109
|
-
results = []
|
|
110
|
-
for item in res_array:
|
|
111
|
-
title = item.get("object_name", "").replace("\\", "")
|
|
112
|
-
slug = item.get("used_slug", "").replace("\\", "")
|
|
113
|
-
poster = item.get("object_poster_url", "")
|
|
114
|
-
|
|
115
|
-
# Cleanup poster URL as in Kotlin
|
|
116
|
-
if poster:
|
|
117
|
-
poster = poster.replace("images-macellan-online.cdn.ampproject.org/i/s/", "") \
|
|
118
|
-
.replace("file.dizilla.club", "file.macellan.online") \
|
|
119
|
-
.replace("images.dizilla.club", "images.macellan.online") \
|
|
120
|
-
.replace("images.dizimia4.com", "images.macellan.online") \
|
|
121
|
-
.replace("file.dizimia4.com", "file.macellan.online")
|
|
122
|
-
poster = HTMLHelper(poster).regex_replace(r"(file\.)[\w\.]+\/?", r"\1macellan.online/")
|
|
123
|
-
poster = HTMLHelper(poster).regex_replace(r"(images\.)[\w\.]+\/?", r"\1macellan.online/")
|
|
124
|
-
poster = poster.replace("/f/f/", "/630/910/")
|
|
125
|
-
|
|
126
|
-
if title and slug:
|
|
127
|
-
results.append(SearchResult(
|
|
128
|
-
title = title,
|
|
129
|
-
url = self.fix_url(slug),
|
|
130
|
-
poster = self.fix_url(poster) if poster else None
|
|
131
|
-
))
|
|
132
|
-
return results
|
|
133
|
-
except Exception:
|
|
134
|
-
return []
|
|
135
|
-
|
|
136
|
-
async def load_item(self, url: str) -> SeriesInfo:
|
|
137
|
-
resp = await self.httpx.get(url)
|
|
138
|
-
sel = HTMLHelper(resp.text)
|
|
139
|
-
|
|
140
|
-
title = sel.select_text("h2") or sel.select_text("h1")
|
|
141
|
-
poster = sel.select_attr("img.rounded-md", "src") or sel.select_attr("meta[property='og:image']", "content")
|
|
142
|
-
description = sel.select_text("div.text-sm") or sel.select_text("div.summary")
|
|
143
|
-
|
|
144
|
-
year = sel.regex_first(r"Yap\u0131m Y\u0131l\u0131\s*:\s*(\d+)", resp.text)
|
|
145
|
-
|
|
146
|
-
tags = []
|
|
147
|
-
tags_raw = sel.regex_first(r"T\u00fcr\s*:\s*([^<]+)", resp.text)
|
|
148
|
-
if tags_raw:
|
|
149
|
-
tags = [t.strip() for t in tags_raw.split(",")]
|
|
150
|
-
|
|
151
|
-
rating = sel.select_text(".font-semibold.text-white")
|
|
152
|
-
if rating:
|
|
153
|
-
rating = rating.replace(",", ".").strip()
|
|
154
|
-
|
|
155
|
-
actors = [a.text(strip=True) for a in sel.select("span.valor a")]
|
|
156
|
-
|
|
157
|
-
trailer_match = sel.regex_first(r"embed\/(.*)\?rel", resp.text)
|
|
158
|
-
trailer = f"https://www.youtube.com/embed/{trailer_match}" if trailer_match else None
|
|
159
|
-
|
|
160
|
-
duration_text = sel.select_text("span.runtime")
|
|
161
|
-
duration = duration_text.split(" ")[0] if duration_text else None
|
|
162
|
-
|
|
163
|
-
episodes = []
|
|
164
|
-
# ul a handles episodes
|
|
165
|
-
for ep_link in sel.select("ul a"):
|
|
166
|
-
href = ep_link.attrs.get("href")
|
|
167
|
-
if not href or "/sezon-" not in href:
|
|
168
|
-
continue
|
|
169
|
-
|
|
170
|
-
ep_name = sel.select_text("span.hidden.sm\\:block", ep_link)
|
|
171
|
-
|
|
172
|
-
season_match = sel.regex_first(r"sezon-(\d+)", href)
|
|
173
|
-
episode_match = sel.regex_first(r"bolum-(\d+)", href)
|
|
174
|
-
|
|
175
|
-
season = season_match if season_match else None
|
|
176
|
-
episode_num = episode_match if episode_match else None
|
|
177
|
-
|
|
178
|
-
episodes.append(Episode(
|
|
179
|
-
season = int(season) if season and season.isdigit() else None,
|
|
180
|
-
episode = int(episode_num) if episode_num and episode_num.isdigit() else None,
|
|
181
|
-
title = ep_name if ep_name else f"{season}x{episode_num}",
|
|
182
|
-
url = self.fix_url(href)
|
|
183
|
-
))
|
|
184
|
-
|
|
185
|
-
return SeriesInfo(
|
|
186
|
-
title = title,
|
|
187
|
-
url = url,
|
|
188
|
-
poster = self.fix_url(poster) if poster else None,
|
|
189
|
-
description = description,
|
|
190
|
-
rating = rating,
|
|
191
|
-
tags = tags,
|
|
192
|
-
actors = actors,
|
|
193
|
-
year = year,
|
|
194
|
-
episodes = episodes,
|
|
195
|
-
duration = int(duration) if duration and str(duration).isdigit() else None
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
async def load_links(self, url: str) -> list[ExtractResult]:
|
|
199
|
-
resp = await self.httpx.get(url)
|
|
200
|
-
sel = HTMLHelper(resp.text)
|
|
201
|
-
|
|
202
|
-
iframe = sel.select_attr("iframe", "src")
|
|
203
|
-
if not iframe:
|
|
204
|
-
return []
|
|
205
|
-
|
|
206
|
-
iframe_url = self.fix_url(iframe)
|
|
207
|
-
data = await self.extract(iframe_url, referer=f"{self.main_url}/")
|
|
208
|
-
|
|
209
|
-
if not data:
|
|
210
|
-
return []
|
|
211
|
-
|
|
212
|
-
return data if isinstance(data, list) else [data]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|