KekikStream 2.4.2__py3-none-any.whl → 2.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- KekikStream/Core/Extractor/ExtractorBase.py +3 -2
- KekikStream/Core/HTMLHelper.py +134 -40
- KekikStream/Core/Plugin/PluginBase.py +3 -2
- KekikStream/Extractors/CloseLoad.py +30 -54
- KekikStream/Extractors/ContentX.py +27 -72
- KekikStream/Extractors/DonilasPlay.py +33 -77
- KekikStream/Extractors/DzenRu.py +10 -24
- KekikStream/Extractors/ExPlay.py +20 -38
- KekikStream/Extractors/Filemoon.py +19 -45
- KekikStream/Extractors/HDMomPlayer.py +24 -56
- KekikStream/Extractors/HDPlayerSystem.py +13 -31
- KekikStream/Extractors/HotStream.py +14 -32
- KekikStream/Extractors/JFVid.py +3 -24
- KekikStream/Extractors/JetTv.py +21 -34
- KekikStream/Extractors/MailRu.py +11 -29
- KekikStream/Extractors/MixPlayHD.py +15 -28
- KekikStream/Extractors/MixTiger.py +17 -40
- KekikStream/Extractors/MolyStream.py +17 -21
- KekikStream/Extractors/Odnoklassniki.py +28 -104
- KekikStream/Extractors/PeaceMakerst.py +18 -45
- KekikStream/Extractors/PixelDrain.py +8 -16
- KekikStream/Extractors/PlayerFilmIzle.py +22 -41
- KekikStream/Extractors/RapidVid.py +21 -35
- KekikStream/Extractors/SetPlay.py +18 -43
- KekikStream/Extractors/SibNet.py +7 -17
- KekikStream/Extractors/Sobreatsesuyp.py +23 -45
- KekikStream/Extractors/TRsTX.py +23 -53
- KekikStream/Extractors/TurboImgz.py +7 -14
- KekikStream/Extractors/VCTPlay.py +10 -28
- KekikStream/Extractors/VidHide.py +10 -31
- KekikStream/Extractors/VidMoly.py +65 -99
- KekikStream/Extractors/VidMoxy.py +16 -27
- KekikStream/Extractors/VidPapi.py +24 -54
- KekikStream/Extractors/VideoSeyred.py +19 -40
- KekikStream/Extractors/Videostr.py +42 -99
- KekikStream/Extractors/Vidoza.py +8 -15
- KekikStream/Extractors/YildizKisaFilm.py +13 -31
- KekikStream/Plugins/BelgeselX.py +63 -69
- KekikStream/Plugins/DiziBox.py +16 -36
- KekikStream/Plugins/DiziMom.py +37 -129
- KekikStream/Plugins/DiziPal.py +26 -75
- KekikStream/Plugins/DiziYou.py +44 -152
- KekikStream/Plugins/Dizilla.py +18 -44
- KekikStream/Plugins/FilmBip.py +10 -24
- KekikStream/Plugins/FilmEkseni.py +12 -32
- KekikStream/Plugins/FilmMakinesi.py +24 -77
- KekikStream/Plugins/FilmModu.py +11 -18
- KekikStream/Plugins/Filmatek.py +13 -39
- KekikStream/Plugins/Full4kizle.py +33 -133
- KekikStream/Plugins/FullHDFilm.py +23 -93
- KekikStream/Plugins/FullHDFilmizlesene.py +10 -29
- KekikStream/Plugins/HDFilmCehennemi.py +27 -66
- KekikStream/Plugins/JetFilmizle.py +19 -20
- KekikStream/Plugins/KultFilmler.py +16 -50
- KekikStream/Plugins/RecTV.py +47 -85
- KekikStream/Plugins/SelcukFlix.py +29 -47
- KekikStream/Plugins/SetFilmIzle.py +28 -84
- KekikStream/Plugins/SezonlukDizi.py +27 -59
- KekikStream/Plugins/Sinefy.py +37 -100
- KekikStream/Plugins/SinemaCX.py +12 -18
- KekikStream/Plugins/Sinezy.py +11 -12
- KekikStream/Plugins/SuperFilmGeldi.py +8 -13
- KekikStream/Plugins/UgurFilm.py +14 -14
- KekikStream/Plugins/Watch32.py +42 -74
- KekikStream/Plugins/YabanciDizi.py +33 -87
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/METADATA +1 -1
- kekikstream-2.4.3.dist-info/RECORD +93 -0
- kekikstream-2.4.2.dist-info/RECORD +0 -93
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/WHEEL +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/entry_points.txt +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/licenses/LICENSE +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/top_level.txt +0 -0
|
@@ -4,84 +4,54 @@ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
|
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class VidPapi(ExtractorBase):
|
|
7
|
-
name = "
|
|
7
|
+
name = "VidPapi"
|
|
8
8
|
main_url = "https://vidpapi.xyz"
|
|
9
9
|
|
|
10
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
11
|
-
|
|
10
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
11
|
+
ref = referer or self.main_url
|
|
12
12
|
|
|
13
|
-
#
|
|
13
|
+
# ID tespiti
|
|
14
14
|
if "video/" in url:
|
|
15
15
|
vid_id = url.split("video/")[-1]
|
|
16
16
|
else:
|
|
17
17
|
vid_id = url.split("?data=")[-1]
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
sub_url = f"{self.main_url}/player/index.php?data={vid_id}"
|
|
21
|
-
sub_headers = {
|
|
19
|
+
headers = {
|
|
22
20
|
"Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
|
|
23
21
|
"X-Requested-With" : "XMLHttpRequest",
|
|
24
|
-
"
|
|
25
|
-
"Referer" : ext_ref or "https://kultfilmler.pro/"
|
|
22
|
+
"Referer" : ref
|
|
26
23
|
}
|
|
27
24
|
|
|
25
|
+
# 1. Altyazıları çek
|
|
28
26
|
subtitles = []
|
|
29
27
|
try:
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
headers =
|
|
33
|
-
data = {"hash": vid_id, "r":
|
|
28
|
+
sub_resp = await self.httpx.post(
|
|
29
|
+
f"{self.main_url}/player/index.php?data={vid_id}",
|
|
30
|
+
headers = headers,
|
|
31
|
+
data = {"hash": vid_id, "r": ref}
|
|
34
32
|
)
|
|
35
|
-
|
|
36
|
-
raw_subs
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
lang = lang.strip()
|
|
41
|
-
if "Türkçe" in lang:
|
|
42
|
-
lang_code = "tr"
|
|
43
|
-
lang_name = "Turkish"
|
|
44
|
-
elif "İngilizce" in lang:
|
|
45
|
-
lang_code = "en"
|
|
46
|
-
lang_name = "English"
|
|
47
|
-
else:
|
|
48
|
-
lang_code = lang[:2].lower()
|
|
49
|
-
lang_name = lang
|
|
50
|
-
|
|
51
|
-
subtitles.append(Subtitle(
|
|
52
|
-
name = lang_name,
|
|
53
|
-
url = sub_link.strip()
|
|
54
|
-
))
|
|
55
|
-
|
|
56
|
-
except Exception as e:
|
|
33
|
+
sel = HTMLHelper(sub_resp.text)
|
|
34
|
+
if raw_subs := sel.regex_first(r'var playerjsSubtitle\s*=\s*"([^"]*)"'):
|
|
35
|
+
for lang, link in HTMLHelper(raw_subs).regex_all(r'\[(.*?)\](https?://[^\s\",]+)'):
|
|
36
|
+
subtitles.append(Subtitle(name=lang.strip(), url=link.strip()))
|
|
37
|
+
except:
|
|
57
38
|
pass
|
|
58
39
|
|
|
59
40
|
# 2. Videoyu çek
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
url = video_url,
|
|
65
|
-
headers = video_headers,
|
|
66
|
-
data = {"hash": vid_id, "r": "https://kultfilmler.pro/"}
|
|
41
|
+
resp = await self.httpx.post(
|
|
42
|
+
f"{self.main_url}/player/index.php?data={vid_id}&do=getVideo",
|
|
43
|
+
headers = headers,
|
|
44
|
+
data = {"hash": vid_id, "r": ref}
|
|
67
45
|
)
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
try:
|
|
71
|
-
video_data = response.json()
|
|
72
|
-
except Exception:
|
|
73
|
-
return None
|
|
46
|
+
data = resp.json()
|
|
74
47
|
|
|
75
|
-
stream_url =
|
|
76
|
-
if not stream_url or not stream_url.strip():
|
|
77
|
-
stream_url = video_data.get("videoSource")
|
|
78
|
-
|
|
48
|
+
stream_url = data.get("securedLink") or data.get("videoSource")
|
|
79
49
|
if not stream_url:
|
|
80
|
-
raise ValueError("
|
|
50
|
+
raise ValueError(f"VidPapi: Video URL bulunamadı. {url}")
|
|
81
51
|
|
|
82
52
|
return ExtractResult(
|
|
83
53
|
name = self.name,
|
|
84
54
|
url = stream_url,
|
|
85
|
-
referer =
|
|
55
|
+
referer = ref,
|
|
86
56
|
subtitles = subtitles
|
|
87
57
|
)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
#
|
|
1
|
+
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
3
|
from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
|
|
4
4
|
import json
|
|
@@ -7,47 +7,26 @@ class VideoSeyred(ExtractorBase):
|
|
|
7
7
|
name = "VideoSeyred"
|
|
8
8
|
main_url = "https://videoseyred.in"
|
|
9
9
|
|
|
10
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
11
|
-
|
|
12
|
-
|
|
10
|
+
async def extract(self, url: str, referer: str = None) -> list[ExtractResult] | ExtractResult:
|
|
11
|
+
v_id = url.split("embed/")[1].split("?")[0]
|
|
12
|
+
if len(v_id) > 10:
|
|
13
|
+
resp = await self.httpx.get(url)
|
|
14
|
+
v_id = HTMLHelper(resp.text).regex_first(r"playlist\/(.*)\.json")
|
|
13
15
|
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
kontrol = await self.httpx.get(url)
|
|
17
|
-
kontrol.raise_for_status()
|
|
18
|
-
|
|
19
|
-
video_id = HTMLHelper(kontrol.text).regex_first(r"playlist\/(.*)\.json")
|
|
20
|
-
|
|
21
|
-
video_url = f"{self.main_url}/playlist/{video_id}.json"
|
|
22
|
-
|
|
23
|
-
response = await self.httpx.get(video_url)
|
|
24
|
-
response.raise_for_status()
|
|
25
|
-
|
|
26
|
-
try:
|
|
27
|
-
if response_list := json.loads(response.text):
|
|
28
|
-
response_data = response_list[0]
|
|
29
|
-
else:
|
|
30
|
-
raise ValueError("Empty response from VideoSeyred.")
|
|
31
|
-
|
|
32
|
-
except (json.JSONDecodeError, IndexError) as hata:
|
|
33
|
-
raise RuntimeError(f"Failed to parse response: {hata}") from hata
|
|
16
|
+
json_resp = await self.httpx.get(f"{self.main_url}/playlist/{v_id}.json")
|
|
17
|
+
data = json_resp.json()[0]
|
|
34
18
|
|
|
35
19
|
subtitles = [
|
|
36
|
-
Subtitle(name=
|
|
37
|
-
|
|
38
|
-
|
|
20
|
+
Subtitle(name=t["label"], url=self.fix_url(t["file"]))
|
|
21
|
+
for t in data.get("tracks", []) if t.get("kind") == "captions"
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
results = [
|
|
25
|
+
ExtractResult(name=self.name, url=self.fix_url(s["file"]), referer=self.main_url, subtitles=subtitles)
|
|
26
|
+
for s in data.get("sources", [])
|
|
39
27
|
]
|
|
40
28
|
|
|
41
|
-
if
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
referer = self.main_url,
|
|
46
|
-
subtitles = subtitles,
|
|
47
|
-
)
|
|
48
|
-
for source in response_data.get("sources", [])
|
|
49
|
-
]:
|
|
50
|
-
# En yüksek kaliteli videoyu döndür (varsayılan olarak ilk video)
|
|
51
|
-
return video_links[0] if len(video_links) == 1 else video_links
|
|
52
|
-
else:
|
|
53
|
-
raise ValueError("No video links found in the response.")
|
|
29
|
+
if not results:
|
|
30
|
+
raise ValueError(f"VideoSeyred: Video bulunamadı. {url}")
|
|
31
|
+
|
|
32
|
+
return results[0] if len(results) == 1 else results
|
|
@@ -2,114 +2,57 @@
|
|
|
2
2
|
|
|
3
3
|
from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper, Subtitle
|
|
4
4
|
from urllib.parse import quote
|
|
5
|
-
|
|
5
|
+
import re
|
|
6
6
|
|
|
7
7
|
class Videostr(ExtractorBase):
|
|
8
8
|
name = "Videostr"
|
|
9
|
-
main_url = "videostr.net"
|
|
9
|
+
main_url = "https://videostr.net"
|
|
10
10
|
|
|
11
|
-
async def extract(self, url: str, referer: str = None) -> ExtractResult
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
"X-Requested-With": "XMLHttpRequest",
|
|
15
|
-
"Referer": "https://videostr.net",
|
|
16
|
-
}
|
|
11
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
12
|
+
v_id = url.split("?")[0].split("/")[-1]
|
|
13
|
+
headers = {"Referer": self.main_url, "X-Requested-With": "XMLHttpRequest"}
|
|
17
14
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
if istek.status_code != 200:
|
|
24
|
-
return None
|
|
25
|
-
|
|
26
|
-
responsenonce = istek.text
|
|
27
|
-
|
|
28
|
-
# Find nonce
|
|
29
|
-
# Regex: \b[a-zA-Z0-9]{48}\b
|
|
30
|
-
# Or 3 blocks of 16 chars
|
|
31
|
-
helper = HTMLHelper(responsenonce)
|
|
32
|
-
nonce = helper.regex_first(r"\b[a-zA-Z0-9]{48}\b")
|
|
15
|
+
resp = await self.httpx.get(url, headers=headers)
|
|
16
|
+
sel = HTMLHelper(resp.text)
|
|
17
|
+
|
|
18
|
+
# Nonce Bulma
|
|
19
|
+
nonce = sel.regex_first(r"\b[a-zA-Z0-9]{48}\b")
|
|
33
20
|
if not nonce:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
m = re.search(r"\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b", responsenonce, re.DOTALL)
|
|
38
|
-
if m:
|
|
39
|
-
nonce = m.group(1) + m.group(2) + m.group(3)
|
|
40
|
-
|
|
21
|
+
m = re.search(r"\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b", resp.text, re.DOTALL)
|
|
22
|
+
if m: nonce = m.group(1) + m.group(2) + m.group(3)
|
|
23
|
+
|
|
41
24
|
if not nonce:
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
#
|
|
45
|
-
|
|
25
|
+
raise ValueError(f"Videostr: Nonce bulunamadı. {url}")
|
|
26
|
+
|
|
27
|
+
# Kaynakları Çek
|
|
28
|
+
api_resp = await self.httpx.get(f"{self.main_url}/embed-1/v3/e-1/getSources?id={v_id}&_k={nonce}", headers=headers)
|
|
29
|
+
data = api_resp.json()
|
|
46
30
|
|
|
47
|
-
|
|
48
|
-
if
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
# Parse JSON
|
|
52
|
-
try:
|
|
53
|
-
data = api_resp.json()
|
|
54
|
-
except:
|
|
55
|
-
return None
|
|
56
|
-
|
|
57
|
-
sources = data.get("sources", [])
|
|
58
|
-
if not sources:
|
|
59
|
-
return None
|
|
60
|
-
|
|
61
|
-
encrypted_file = sources[0].get("file")
|
|
62
|
-
if not encrypted_file:
|
|
63
|
-
return None
|
|
64
|
-
|
|
31
|
+
enc_file = data.get("sources", [{}])[0].get("file")
|
|
32
|
+
if not enc_file:
|
|
33
|
+
raise ValueError("Videostr: Kaynak bulunamadı.")
|
|
34
|
+
|
|
65
35
|
m3u8_url = None
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
m3u8_url = encrypted_file
|
|
36
|
+
if ".m3u8" in enc_file:
|
|
37
|
+
m3u8_url = enc_file
|
|
69
38
|
else:
|
|
70
|
-
#
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
if vidstr_key:
|
|
80
|
-
# Use Google Script to decrypt
|
|
81
|
-
decode_url = "https://script.google.com/macros/s/AKfycbxHbYHbrGMXYD2-bC-C43D3njIbU-wGiYQuJL61H4vyy6YVXkybMNNEPJNPPuZrD1gRVA/exec"
|
|
82
|
-
|
|
83
|
-
full_url = f"{decode_url}?encrypted_data={quote(encrypted_file)}&nonce={quote(nonce)}&secret={quote(vidstr_key)}"
|
|
84
|
-
|
|
85
|
-
decrypted_resp = await self.httpx.get(full_url)
|
|
86
|
-
if decrypted_resp.status_code == 200:
|
|
87
|
-
# Response is JSON {"file": "..."} usually or text?
|
|
88
|
-
# Kotlin says: Regex("\"file\":\"(.*?)\"").find(decryptedResponse)
|
|
89
|
-
m_file = re.search(r'"file":"(.*?)"', decrypted_resp.text)
|
|
90
|
-
if m_file:
|
|
91
|
-
m3u8_url = m_file.group(1).replace(r"\/", "/")
|
|
92
|
-
except Exception as e:
|
|
93
|
-
# print(f"Decryption error: {e}")
|
|
94
|
-
pass
|
|
39
|
+
# Decryption Flow (External Keys)
|
|
40
|
+
with contextlib.suppress(Exception):
|
|
41
|
+
key_resp = await self.httpx.get("https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json")
|
|
42
|
+
v_key = key_resp.json().get("vidstr")
|
|
43
|
+
if v_key:
|
|
44
|
+
decode_api = "https://script.google.com/macros/s/AKfycbxHbYHbrGMXYD2-bC-C43D3njIbU-wGiYQuJL61H4vyy6YVXkybMNNEPJNPPuZrD1gRVA/exec"
|
|
45
|
+
dec_resp = await self.httpx.get(f"{decode_api}?encrypted_data={quote(enc_file)}&nonce={quote(nonce)}&secret={quote(v_key)}")
|
|
46
|
+
m3u8_url = re.search(r'"file":"(.*?)"', dec_resp.text).group(1).replace("\\/", "/")
|
|
95
47
|
|
|
96
48
|
if not m3u8_url:
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
url = t.get("file")
|
|
108
|
-
))
|
|
109
|
-
|
|
110
|
-
return ExtractResult(
|
|
111
|
-
name = "Videostr",
|
|
112
|
-
url = m3u8_url,
|
|
113
|
-
referer = "https://videostr.net/",
|
|
114
|
-
subtitles= subtitles
|
|
115
|
-
)
|
|
49
|
+
raise ValueError(f"Videostr: Video URL bulunamadı. {url}")
|
|
50
|
+
|
|
51
|
+
subtitles = [
|
|
52
|
+
Subtitle(name=t.get("label", "Altyazı"), url=t.get("file"))
|
|
53
|
+
for t in data.get("tracks", []) if t.get("kind") in ["captions", "subtitles"]
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
return ExtractResult(name=self.name, url=m3u8_url, referer=f"{self.main_url}/", subtitles=subtitles)
|
|
57
|
+
|
|
58
|
+
import contextlib
|
KekikStream/Extractors/Vidoza.py
CHANGED
|
@@ -6,20 +6,13 @@ class Vidoza(ExtractorBase):
|
|
|
6
6
|
name = "Vidoza"
|
|
7
7
|
main_url = "https://vidoza.net"
|
|
8
8
|
|
|
9
|
-
async def extract(self, url: str, referer: str = None) -> ExtractResult
|
|
10
|
-
|
|
11
|
-
self.httpx.headers.update({"Referer": referer})
|
|
9
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
10
|
+
self.httpx.headers.update({"Referer": referer or url})
|
|
12
11
|
|
|
13
|
-
|
|
14
|
-
|
|
12
|
+
resp = await self.httpx.get(url)
|
|
13
|
+
v_url = HTMLHelper(resp.text).select_attr("source", "src")
|
|
15
14
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
name = self.name,
|
|
21
|
-
url = video_url,
|
|
22
|
-
referer = url
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
return None
|
|
15
|
+
if not v_url:
|
|
16
|
+
raise ValueError(f"Vidoza: Video bulunamadı. {url}")
|
|
17
|
+
|
|
18
|
+
return ExtractResult(name=self.name, url=v_url, referer=url)
|
|
@@ -6,36 +6,18 @@ class YildizKisaFilm(ExtractorBase):
|
|
|
6
6
|
name = "YildizKisaFilm"
|
|
7
7
|
main_url = "https://yildizkisafilm.org"
|
|
8
8
|
|
|
9
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
post_url = f"{self.main_url}/player/index.php?data={vid_id}&do=getVideo"
|
|
18
|
-
|
|
19
|
-
response = await self.httpx.post(
|
|
20
|
-
url = post_url,
|
|
21
|
-
data = {"hash": vid_id, "r": ext_ref},
|
|
22
|
-
headers = {
|
|
23
|
-
"Referer" : ext_ref,
|
|
24
|
-
"Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
|
|
25
|
-
"X-Requested-With" : "XMLHttpRequest"
|
|
26
|
-
}
|
|
9
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
10
|
+
ref = referer or self.main_url
|
|
11
|
+
v_id = url.split("video/")[-1] if "video/" in url else url.split("?data=")[-1]
|
|
12
|
+
|
|
13
|
+
resp = await self.httpx.post(
|
|
14
|
+
f"{self.main_url}/player/index.php?data={v_id}&do=getVideo",
|
|
15
|
+
data = {"hash": v_id, "r": ref},
|
|
16
|
+
headers = {"Referer": ref, "X-Requested-With": "XMLHttpRequest"}
|
|
27
17
|
)
|
|
28
|
-
|
|
18
|
+
|
|
19
|
+
m3u8_url = resp.json().get("securedLink")
|
|
20
|
+
if not m3u8_url:
|
|
21
|
+
raise ValueError(f"YildizKisaFilm: Video URL bulunamadı. {url}")
|
|
29
22
|
|
|
30
|
-
|
|
31
|
-
m3u_link = video_data.get("securedLink")
|
|
32
|
-
|
|
33
|
-
if not m3u_link:
|
|
34
|
-
raise ValueError("securedLink not found in response")
|
|
35
|
-
|
|
36
|
-
return ExtractResult(
|
|
37
|
-
name = self.name,
|
|
38
|
-
url = m3u_link,
|
|
39
|
-
referer = ext_ref,
|
|
40
|
-
subtitles = []
|
|
41
|
-
)
|
|
23
|
+
return ExtractResult(name=self.name, url=m3u8_url, referer=ref)
|
KekikStream/Plugins/BelgeselX.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
|
-
from KekikStream.Core
|
|
3
|
+
from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
|
|
4
4
|
|
|
5
5
|
class BelgeselX(PluginBase):
|
|
6
6
|
name = "BelgeselX"
|
|
@@ -70,7 +70,7 @@ class BelgeselX(PluginBase):
|
|
|
70
70
|
token_resp = self.cloudscraper.get(f"https://cse.google.com/cse.js?cx={cx}")
|
|
71
71
|
token_text = token_resp.text
|
|
72
72
|
|
|
73
|
-
secici
|
|
73
|
+
secici = HTMLHelper(token_text)
|
|
74
74
|
cse_lib = secici.regex_first(r'cselibVersion": "(.*)"')
|
|
75
75
|
cse_tok = secici.regex_first(r'cse_token": "(.*)"')
|
|
76
76
|
|
|
@@ -84,13 +84,13 @@ class BelgeselX(PluginBase):
|
|
|
84
84
|
f"&callback=google.search.cse.api9969&rurl=https%3A%2F%2Fbelgeselx.com%2F"
|
|
85
85
|
)
|
|
86
86
|
|
|
87
|
-
resp
|
|
87
|
+
resp = self.cloudscraper.get(search_url)
|
|
88
88
|
resp_text = resp.text
|
|
89
89
|
|
|
90
90
|
secici2 = HTMLHelper(resp_text)
|
|
91
|
-
titles
|
|
92
|
-
urls
|
|
93
|
-
images
|
|
91
|
+
titles = secici2.regex_all(r'"titleNoFormatting": "(.*?)"')
|
|
92
|
+
urls = secici2.regex_all(r'"url": "(.*?)"')
|
|
93
|
+
images = secici2.regex_all(r'"ogImage": "(.*?)"')
|
|
94
94
|
|
|
95
95
|
results = []
|
|
96
96
|
for i, title in enumerate(titles):
|
|
@@ -101,8 +101,7 @@ class BelgeselX(PluginBase):
|
|
|
101
101
|
# URL'den belgesel linkini oluştur
|
|
102
102
|
if poster and "diziresimleri" in poster:
|
|
103
103
|
file_name = poster.rsplit("/", 1)[-1]
|
|
104
|
-
|
|
105
|
-
file_name = secici3.regex_replace(r"\.(jpe?g|png|webp)$", "")
|
|
104
|
+
file_name = HTMLHelper(file_name).regex_replace(r"\.(jpe?g|png|webp)$", "")
|
|
106
105
|
url_val = f"{self.main_url}/belgeseldizi/{file_name}"
|
|
107
106
|
else:
|
|
108
107
|
continue
|
|
@@ -120,85 +119,80 @@ class BelgeselX(PluginBase):
|
|
|
120
119
|
istek = await self.httpx.get(url)
|
|
121
120
|
secici = HTMLHelper(istek.text)
|
|
122
121
|
|
|
123
|
-
title
|
|
124
|
-
|
|
125
|
-
poster = secici.select_attr("div.gen-tv-show-top img", "src")
|
|
126
|
-
|
|
122
|
+
title = self._to_title_case(secici.select_text("h2.gen-title"))
|
|
123
|
+
poster = secici.select_poster("div.gen-tv-show-top img")
|
|
127
124
|
description = secici.select_text("div.gen-single-tv-show-info p")
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
125
|
+
tags = [self._to_title_case(t.rsplit("/", 1)[-1].replace("-", " ")) for t in secici.select_attrs("div.gen-socail-share a[href*='belgeselkanali']", "href")]
|
|
126
|
+
|
|
127
|
+
# Meta bilgilerinden yıl ve puanı çıkar
|
|
128
|
+
meta_items = secici.select_texts("div.gen-single-meta-holder ul li")
|
|
129
|
+
year = None
|
|
130
|
+
rating = None
|
|
131
|
+
for item in meta_items:
|
|
132
|
+
if not year:
|
|
133
|
+
if y_match := secici.regex_first(r"\b((?:19|20)\d{2})\b", item):
|
|
134
|
+
year = int(y_match)
|
|
135
|
+
if not rating:
|
|
136
|
+
if r_match := secici.regex_first(r"%\s*(\d+)\s*Puan", item):
|
|
137
|
+
rating = float(r_match) / 10
|
|
135
138
|
|
|
136
139
|
episodes = []
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
ep_episode = int(episode_num) if episode_num else counter
|
|
151
|
-
ep_season = int(season_num) if season_num else 1
|
|
152
|
-
|
|
153
|
-
counter += 1
|
|
154
|
-
|
|
155
|
-
episodes.append(Episode(
|
|
156
|
-
season = ep_season,
|
|
157
|
-
episode = ep_episode,
|
|
158
|
-
title = ep_name,
|
|
159
|
-
url = self.fix_url(ep_href)
|
|
160
|
-
))
|
|
140
|
+
for i, ep in enumerate(secici.select("div.gen-movie-contain")):
|
|
141
|
+
name = secici.select_text("div.gen-movie-info h3 a", ep)
|
|
142
|
+
href = secici.select_attr("div.gen-movie-info h3 a", "href", ep)
|
|
143
|
+
item_id = secici.select_attr("div.gen-movie-info h3 a", "id", ep)
|
|
144
|
+
if name and href:
|
|
145
|
+
s, e = secici.extract_season_episode(secici.select_text("div.gen-single-meta-holder ul li", ep))
|
|
146
|
+
# ID'yi URL'ye ekle ki load_links doğru bölümü çekebilsin
|
|
147
|
+
final_url = self.fix_url(href)
|
|
148
|
+
if item_id:
|
|
149
|
+
final_url = f"{final_url}?id={item_id}"
|
|
150
|
+
|
|
151
|
+
episodes.append(Episode(season=s or 1, episode=e or (i + 1), title=name, url=final_url))
|
|
161
152
|
|
|
162
153
|
return SeriesInfo(
|
|
163
|
-
url
|
|
164
|
-
|
|
165
|
-
title = self._to_title_case(title) if title else None,
|
|
166
|
-
description = description,
|
|
167
|
-
tags = tags,
|
|
168
|
-
episodes = episodes
|
|
154
|
+
url=url, poster=self.fix_url(poster) if poster else None, title=title or "Bilinmiyor",
|
|
155
|
+
description=description, tags=tags, year=year, rating=rating, episodes=episodes
|
|
169
156
|
)
|
|
170
157
|
|
|
171
158
|
async def load_links(self, url: str) -> list[ExtractResult]:
|
|
172
|
-
|
|
173
|
-
|
|
159
|
+
# URL'den ID'yi ayıkla
|
|
160
|
+
params = dict([x.split('=') for x in url.split('?')[-1].split('&')]) if '?' in url else {}
|
|
161
|
+
episode_id = params.get('id')
|
|
162
|
+
main_url = url.split('?')[0]
|
|
163
|
+
|
|
164
|
+
istek = await self.httpx.get(main_url)
|
|
165
|
+
secici = HTMLHelper(istek.text)
|
|
174
166
|
|
|
175
|
-
secici = HTMLHelper(text)
|
|
176
|
-
# fnc_addWatch div'inden data-episode ID'sini al
|
|
177
|
-
episode_id = secici.regex_first(r'<div[^>]*class=["\'][^"\']*fnc_addWatch[^"\']*["\'][^>]*data-episode=["\'](\d+)["\']')
|
|
178
167
|
if not episode_id:
|
|
179
|
-
|
|
168
|
+
episode_id = secici.regex_first(r'data-episode=["\'](\d+)["\']')
|
|
180
169
|
|
|
181
|
-
|
|
170
|
+
if not episode_id:
|
|
171
|
+
return []
|
|
182
172
|
|
|
183
|
-
iframe_resp = await self.httpx.get(
|
|
184
|
-
|
|
173
|
+
iframe_resp = await self.httpx.get(f"{self.main_url}/video/data/new4.php?id={episode_id}", headers={"Referer": main_url})
|
|
174
|
+
secici = HTMLHelper(iframe_resp.text)
|
|
185
175
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
label_matches = secici2.regex_all(r'label: "([^"]+)"')
|
|
176
|
+
links = []
|
|
177
|
+
files = secici.regex_all(r'file:"([^"]+)"')
|
|
178
|
+
labels = secici.regex_all(r'label: "([^"]+)"')
|
|
190
179
|
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
quality = label_matches[i] if i < len(label_matches) else "Unknown"
|
|
180
|
+
for i, video_url in enumerate(files):
|
|
181
|
+
quality = labels[i] if i < len(labels) else "HD"
|
|
194
182
|
|
|
195
|
-
|
|
196
|
-
|
|
183
|
+
# belgeselx.php redirect'ini çöz
|
|
184
|
+
if "belgeselx.php" in video_url or "belgeselx2.php" in video_url:
|
|
185
|
+
try:
|
|
186
|
+
# HEAD isteği ile lokasyonu alalım
|
|
187
|
+
resp = await self.httpx.head(video_url, headers={"Referer": main_url}, follow_redirects=True)
|
|
188
|
+
video_url = str(resp.url)
|
|
189
|
+
except:
|
|
190
|
+
pass
|
|
197
191
|
|
|
198
192
|
links.append(ExtractResult(
|
|
199
193
|
url = video_url,
|
|
200
|
-
name = f"{
|
|
201
|
-
referer =
|
|
194
|
+
name = f"{'Google' if 'google' in video_url.lower() or 'blogspot' in video_url.lower() or quality == 'FULL' else self.name} | {'1080p' if quality == 'FULL' else quality}",
|
|
195
|
+
referer = main_url
|
|
202
196
|
))
|
|
203
197
|
|
|
204
198
|
return links
|