KekikStream 2.4.2__py3-none-any.whl → 2.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- KekikStream/Core/Extractor/ExtractorBase.py +3 -2
- KekikStream/Core/HTMLHelper.py +134 -40
- KekikStream/Core/Plugin/PluginBase.py +3 -2
- KekikStream/Extractors/CloseLoad.py +30 -54
- KekikStream/Extractors/ContentX.py +27 -72
- KekikStream/Extractors/DonilasPlay.py +33 -77
- KekikStream/Extractors/DzenRu.py +10 -24
- KekikStream/Extractors/ExPlay.py +20 -38
- KekikStream/Extractors/Filemoon.py +19 -45
- KekikStream/Extractors/HDMomPlayer.py +24 -56
- KekikStream/Extractors/HDPlayerSystem.py +13 -31
- KekikStream/Extractors/HotStream.py +14 -32
- KekikStream/Extractors/JFVid.py +3 -24
- KekikStream/Extractors/JetTv.py +21 -34
- KekikStream/Extractors/MailRu.py +11 -29
- KekikStream/Extractors/MixPlayHD.py +15 -28
- KekikStream/Extractors/MixTiger.py +17 -40
- KekikStream/Extractors/MolyStream.py +17 -21
- KekikStream/Extractors/Odnoklassniki.py +28 -104
- KekikStream/Extractors/PeaceMakerst.py +18 -45
- KekikStream/Extractors/PixelDrain.py +8 -16
- KekikStream/Extractors/PlayerFilmIzle.py +22 -41
- KekikStream/Extractors/RapidVid.py +21 -35
- KekikStream/Extractors/SetPlay.py +18 -43
- KekikStream/Extractors/SibNet.py +7 -17
- KekikStream/Extractors/Sobreatsesuyp.py +23 -45
- KekikStream/Extractors/TRsTX.py +23 -53
- KekikStream/Extractors/TurboImgz.py +7 -14
- KekikStream/Extractors/VCTPlay.py +10 -28
- KekikStream/Extractors/VidHide.py +10 -31
- KekikStream/Extractors/VidMoly.py +65 -99
- KekikStream/Extractors/VidMoxy.py +16 -27
- KekikStream/Extractors/VidPapi.py +24 -54
- KekikStream/Extractors/VideoSeyred.py +19 -40
- KekikStream/Extractors/Videostr.py +42 -99
- KekikStream/Extractors/Vidoza.py +8 -15
- KekikStream/Extractors/YildizKisaFilm.py +13 -31
- KekikStream/Plugins/BelgeselX.py +63 -69
- KekikStream/Plugins/DiziBox.py +16 -36
- KekikStream/Plugins/DiziMom.py +37 -129
- KekikStream/Plugins/DiziPal.py +26 -75
- KekikStream/Plugins/DiziYou.py +44 -152
- KekikStream/Plugins/Dizilla.py +18 -44
- KekikStream/Plugins/FilmBip.py +10 -24
- KekikStream/Plugins/FilmEkseni.py +12 -32
- KekikStream/Plugins/FilmMakinesi.py +24 -77
- KekikStream/Plugins/FilmModu.py +11 -18
- KekikStream/Plugins/Filmatek.py +13 -39
- KekikStream/Plugins/Full4kizle.py +33 -133
- KekikStream/Plugins/FullHDFilm.py +23 -93
- KekikStream/Plugins/FullHDFilmizlesene.py +10 -29
- KekikStream/Plugins/HDFilmCehennemi.py +27 -66
- KekikStream/Plugins/JetFilmizle.py +19 -20
- KekikStream/Plugins/KultFilmler.py +16 -50
- KekikStream/Plugins/RecTV.py +47 -85
- KekikStream/Plugins/SelcukFlix.py +29 -47
- KekikStream/Plugins/SetFilmIzle.py +28 -84
- KekikStream/Plugins/SezonlukDizi.py +27 -59
- KekikStream/Plugins/Sinefy.py +37 -100
- KekikStream/Plugins/SinemaCX.py +12 -18
- KekikStream/Plugins/Sinezy.py +11 -12
- KekikStream/Plugins/SuperFilmGeldi.py +8 -13
- KekikStream/Plugins/UgurFilm.py +14 -14
- KekikStream/Plugins/Watch32.py +42 -74
- KekikStream/Plugins/YabanciDizi.py +33 -87
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/METADATA +1 -1
- kekikstream-2.4.3.dist-info/RECORD +93 -0
- kekikstream-2.4.2.dist-info/RECORD +0 -93
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/WHEEL +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/entry_points.txt +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/licenses/LICENSE +0 -0
- {kekikstream-2.4.2.dist-info → kekikstream-2.4.3.dist-info}/top_level.txt +0 -0
|
@@ -6,52 +6,29 @@ class MixTiger(ExtractorBase):
|
|
|
6
6
|
name = "MixTiger"
|
|
7
7
|
main_url = "https://www.mixtiger.com"
|
|
8
8
|
|
|
9
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
vid_id = url.split("video/")[-1] if "video/" in url else ""
|
|
9
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
10
|
+
ref = referer or self.main_url
|
|
11
|
+
v_id = url.split("video/")[-1] if "video/" in url else ""
|
|
13
12
|
|
|
14
|
-
|
|
15
|
-
url
|
|
16
|
-
data = {"hash":
|
|
13
|
+
resp = await self.httpx.post(
|
|
14
|
+
f"{url}?do=getVideo",
|
|
15
|
+
data = {"hash": v_id, "r": ref, "s": ""},
|
|
17
16
|
headers = {
|
|
18
|
-
"Referer"
|
|
19
|
-
"
|
|
20
|
-
"X-Requested-With" : "XMLHttpRequest"
|
|
17
|
+
"Referer": ref,
|
|
18
|
+
"X-Requested-With": "XMLHttpRequest"
|
|
21
19
|
}
|
|
22
20
|
)
|
|
23
|
-
|
|
21
|
+
data = resp.json()
|
|
24
22
|
|
|
25
|
-
|
|
23
|
+
m3u8_url = data.get("videoSrc")
|
|
24
|
+
if not m3u8_url and data.get("videoSources"):
|
|
25
|
+
m3u8_url = data["videoSources"][-1].get("file")
|
|
26
26
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
m3u_link = video_data["videoSrc"]
|
|
30
|
-
# videoSources listesi varsa son elemanı al
|
|
31
|
-
elif video_data.get("videoSources"):
|
|
32
|
-
sources = video_data["videoSources"]
|
|
33
|
-
m3u_link = sources[-1].get("file") if sources else None
|
|
34
|
-
else:
|
|
35
|
-
m3u_link = None
|
|
36
|
-
|
|
37
|
-
if not m3u_link:
|
|
38
|
-
raise ValueError("Video URL not found in response")
|
|
39
|
-
|
|
40
|
-
# Recursive extraction check - başka extractor kullanılabilir mi?
|
|
41
|
-
try:
|
|
42
|
-
from KekikStream.Core.Extractor.ExtractorManager import ExtractorManager
|
|
43
|
-
manager = ExtractorManager()
|
|
44
|
-
if nested_extractor := manager.find_extractor(m3u_link):
|
|
45
|
-
# Nested extractor ile çıkar
|
|
46
|
-
return await nested_extractor.extract(m3u_link, referer=ext_ref)
|
|
47
|
-
except Exception:
|
|
48
|
-
# Recursive extraction başarısız olursa standart sonucu döndür
|
|
49
|
-
pass
|
|
27
|
+
if not m3u8_url:
|
|
28
|
+
raise ValueError(f"MixTiger: Video linki bulunamadı. {url}")
|
|
50
29
|
|
|
51
30
|
return ExtractResult(
|
|
52
|
-
name
|
|
53
|
-
url
|
|
54
|
-
referer
|
|
55
|
-
subtitles = []
|
|
31
|
+
name = self.name,
|
|
32
|
+
url = m3u8_url,
|
|
33
|
+
referer = None if "disk.yandex" in m3u8_url else ref
|
|
56
34
|
)
|
|
57
|
-
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
3
|
from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
|
|
4
|
-
import re
|
|
5
4
|
|
|
6
5
|
class MolyStream(ExtractorBase):
|
|
7
6
|
name = "MolyStream"
|
|
@@ -9,34 +8,31 @@ class MolyStream(ExtractorBase):
|
|
|
9
8
|
|
|
10
9
|
# Birden fazla domain destekle
|
|
11
10
|
supported_domains = [
|
|
12
|
-
"dbx.molystream.org",
|
|
13
|
-
"
|
|
14
|
-
"yd.sheila.stream",
|
|
15
|
-
"ydf.popcornvakti.net",
|
|
11
|
+
"dbx.molystream.org", "ydx.molystream.org",
|
|
12
|
+
"yd.sheila.stream", "ydf.popcornvakti.net",
|
|
16
13
|
]
|
|
17
14
|
|
|
18
15
|
def can_handle_url(self, url: str) -> bool:
|
|
19
16
|
return any(domain in url for domain in self.supported_domains)
|
|
20
17
|
|
|
21
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
22
|
-
|
|
23
|
-
secici = HTMLHelper(url)
|
|
24
|
-
video = secici.select_attr("video#sheplayer source", "src")
|
|
25
|
-
else:
|
|
26
|
-
video = url
|
|
18
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
19
|
+
self.httpx.headers.update({"Referer": referer or self.main_url})
|
|
27
20
|
|
|
28
|
-
|
|
29
|
-
|
|
21
|
+
# Eğer url zaten bir HTML kaynağıysa (doctype html içeriyorsa)
|
|
22
|
+
if "doctype html" in url.lower():
|
|
23
|
+
sel = HTMLHelper(url)
|
|
24
|
+
v_url = sel.select_attr("video#sheplayer source", "src")
|
|
25
|
+
else:
|
|
26
|
+
v_url = url
|
|
30
27
|
|
|
31
|
-
subtitles = [
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
]
|
|
28
|
+
subtitles = []
|
|
29
|
+
for s_url, s_name in HTMLHelper(url).regex_all(r"addSrtFile\(['\"]([^'\"]+\.srt)['\"]\s*,\s*['\"][a-z]{2}['\"]\s*,\s*['\"]([^'\"]+)['\"]"):
|
|
30
|
+
subtitles.append(Subtitle(name=s_name, url=self.fix_url(s_url)))
|
|
35
31
|
|
|
36
32
|
return ExtractResult(
|
|
37
|
-
name
|
|
38
|
-
url
|
|
39
|
-
referer
|
|
33
|
+
name = self.name,
|
|
34
|
+
url = v_url,
|
|
35
|
+
referer = v_url.replace("/sheila", "") if v_url else None,
|
|
40
36
|
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0",
|
|
41
|
-
subtitles
|
|
37
|
+
subtitles = subtitles
|
|
42
38
|
)
|
|
@@ -1,117 +1,41 @@
|
|
|
1
1
|
# Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
|
|
2
2
|
|
|
3
3
|
from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper
|
|
4
|
-
import
|
|
4
|
+
import json
|
|
5
5
|
|
|
6
6
|
class Odnoklassniki(ExtractorBase):
|
|
7
7
|
name = "Odnoklassniki"
|
|
8
8
|
main_url = "https://odnoklassniki.ru"
|
|
9
9
|
|
|
10
|
-
# Birden fazla domain destekle
|
|
11
10
|
supported_domains = ["odnoklassniki.ru", "ok.ru"]
|
|
12
11
|
|
|
13
12
|
def can_handle_url(self, url: str) -> bool:
|
|
14
13
|
return any(domain in url for domain in self.supported_domains)
|
|
15
14
|
|
|
16
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
17
|
-
if "/video/" in url:
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
r"\\u([0-9A-Fa-f]{4})",
|
|
44
|
-
lambda match: chr(int(match[1], 16)),
|
|
45
|
-
response_text
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
videos_json = HTMLHelper(response_text).regex_first(r'"videos":(\[.*?\])')
|
|
49
|
-
if not videos_json:
|
|
50
|
-
raise ValueError("No video data found in the response.")
|
|
51
|
-
|
|
52
|
-
try:
|
|
53
|
-
videos = json.loads(videos_json)
|
|
54
|
-
except json.JSONDecodeError as hata:
|
|
55
|
-
raise ValueError("Failed to parse video data.") from hata
|
|
56
|
-
|
|
57
|
-
quality_order = {
|
|
58
|
-
"ULTRA": 6, # 4K veya daha yüksek
|
|
59
|
-
"QUAD": 5, # 1440p
|
|
60
|
-
"FULL": 4, # 1080p
|
|
61
|
-
"HD": 3, # 720p
|
|
62
|
-
"SD": 2, # 480p
|
|
63
|
-
"LOW": 1, # 360p
|
|
64
|
-
"MOBILE": 0 # 144p
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
# Kaliteye göre en iyi videoyu seçme
|
|
68
|
-
best_video = None
|
|
69
|
-
best_quality_score = -1
|
|
70
|
-
|
|
71
|
-
for video in videos:
|
|
72
|
-
video_url = video.get("url")
|
|
73
|
-
quality_name = video.get("name", "").upper()
|
|
74
|
-
|
|
75
|
-
if not video_url or not quality_name:
|
|
76
|
-
continue
|
|
77
|
-
|
|
78
|
-
# Kalite sıralamasına göre puanla
|
|
79
|
-
quality_score = quality_order.get(quality_name, -1)
|
|
80
|
-
if quality_score > best_quality_score:
|
|
81
|
-
best_quality_score = quality_score
|
|
82
|
-
best_video = video_url
|
|
83
|
-
|
|
84
|
-
if not best_video:
|
|
85
|
-
raise ValueError("No valid video URLs found.")
|
|
86
|
-
|
|
87
|
-
if best_video.startswith("//"):
|
|
88
|
-
best_video = f"https:{best_video}"
|
|
89
|
-
|
|
90
|
-
return ExtractResult(
|
|
91
|
-
name = self.name,
|
|
92
|
-
url = best_video,
|
|
93
|
-
referer = referer,
|
|
94
|
-
user_agent = headers.get("User-Agent", None),
|
|
95
|
-
subtitles = []
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
async def fetch_with_redirects(self, url, max_redirects=5):
|
|
99
|
-
"""Yönlendirmeleri takip eden bir fonksiyon"""
|
|
100
|
-
redirects = 0
|
|
101
|
-
while redirects < max_redirects:
|
|
102
|
-
istek = await self.httpx.get(url, follow_redirects=False)
|
|
103
|
-
|
|
104
|
-
if istek.status_code not in [301, 302]:
|
|
105
|
-
break # Yönlendirme yoksa çık
|
|
106
|
-
|
|
107
|
-
redirected_url = istek.headers.get("Location")
|
|
108
|
-
if not redirected_url:
|
|
109
|
-
raise ValueError("Redirect location not found.")
|
|
110
|
-
|
|
111
|
-
url = redirected_url if redirected_url.startswith("http") else f"https://{redirected_url}"
|
|
112
|
-
redirects += 1
|
|
113
|
-
|
|
114
|
-
if redirects == max_redirects:
|
|
115
|
-
raise RuntimeError(f"Max redirects ({max_redirects}) reached.")
|
|
116
|
-
|
|
117
|
-
return istek
|
|
15
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
16
|
+
if "/video/" in url: url = url.replace("/video/", "/videoembed/")
|
|
17
|
+
self.httpx.headers.update({"Origin": self.main_url})
|
|
18
|
+
|
|
19
|
+
resp = await self.httpx.get(url, follow_redirects=True)
|
|
20
|
+
sel = HTMLHelper(resp.text)
|
|
21
|
+
|
|
22
|
+
v_data = sel.regex_first(r'"videos":(\[.*?\])')
|
|
23
|
+
if not v_data:
|
|
24
|
+
raise ValueError(f"Odnoklassniki: Video verisi bulunamadı. {url}")
|
|
25
|
+
|
|
26
|
+
# Kalite sıralaması (En yüksekten düşüğe)
|
|
27
|
+
order = ["ULTRA", "QUAD", "FULL", "HD", "SD", "LOW", "MOBILE"]
|
|
28
|
+
videos = json.loads(v_data)
|
|
29
|
+
|
|
30
|
+
best_url = None
|
|
31
|
+
for q in order:
|
|
32
|
+
best_url = next((v.get("url") for v in videos if v.get("name", "").upper() == q), None)
|
|
33
|
+
if best_url: break
|
|
34
|
+
|
|
35
|
+
if not best_url:
|
|
36
|
+
best_url = videos[0].get("url") if videos else None
|
|
37
|
+
|
|
38
|
+
if not best_url:
|
|
39
|
+
raise ValueError("Odnoklassniki: Geçerli video URL'si bulunamadı.")
|
|
40
|
+
|
|
41
|
+
return ExtractResult(name=self.name, url=self.fix_url(best_url), referer=referer)
|
|
@@ -7,57 +7,30 @@ class PeaceMakerst(ExtractorBase):
|
|
|
7
7
|
name = "PeaceMakerst"
|
|
8
8
|
main_url = "https://peacemakerst.com"
|
|
9
9
|
|
|
10
|
-
# Birden fazla domain destekle
|
|
11
10
|
supported_domains = ["peacemakerst.com", "hdstreamable.com"]
|
|
12
11
|
|
|
13
12
|
def can_handle_url(self, url: str) -> bool:
|
|
14
13
|
return any(domain in url for domain in self.supported_domains)
|
|
15
14
|
|
|
16
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
17
|
-
if referer:
|
|
18
|
-
self.httpx.headers.update({"Referer": referer})
|
|
19
|
-
|
|
15
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
20
16
|
self.httpx.headers.update({
|
|
21
|
-
"
|
|
17
|
+
"Referer" : referer or url,
|
|
22
18
|
"X-Requested-With" : "XMLHttpRequest"
|
|
23
19
|
})
|
|
24
20
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
teve2_response = await self.httpx.get(teve2_url, headers={"Referer": f"https://www.teve2.com.tr/embed/{teve2_id}"})
|
|
43
|
-
teve2_response.raise_for_status()
|
|
44
|
-
teve2_json = teve2_response.json()
|
|
45
|
-
|
|
46
|
-
m3u_link = f"{teve2_json['Media']['Link']['ServiceUrl']}//{teve2_json['Media']['Link']['SecurePath']}"
|
|
47
|
-
else:
|
|
48
|
-
try:
|
|
49
|
-
video_response = response.json()
|
|
50
|
-
if video_sources := video_response.get("videoSources", []):
|
|
51
|
-
m3u_link = video_sources[-1]["file"]
|
|
52
|
-
except (json.JSONDecodeError, KeyError) as hata:
|
|
53
|
-
raise ValueError("Peace response is invalid or null.") from hata
|
|
54
|
-
|
|
55
|
-
if not m3u_link:
|
|
56
|
-
raise ValueError("m3u link not found.")
|
|
57
|
-
|
|
58
|
-
return ExtractResult(
|
|
59
|
-
name = self.name,
|
|
60
|
-
url = m3u_link,
|
|
61
|
-
referer = url,
|
|
62
|
-
subtitles = []
|
|
63
|
-
)
|
|
21
|
+
resp = await self.httpx.post(f"{url}?do=getVideo", data={"hash": url.split("video/")[-1], "r": referer or "", "s": ""})
|
|
22
|
+
data = resp.json()
|
|
23
|
+
|
|
24
|
+
m3u8_url = None
|
|
25
|
+
if "teve2.com.tr" in resp.text:
|
|
26
|
+
v_id = HTMLHelper(resp.text).regex_first(r"teve2\.com\.tr\\\/embed\\\/(\d+)")
|
|
27
|
+
t_resp = await self.httpx.get(f"https://www.teve2.com.tr/action/media/{v_id}")
|
|
28
|
+
t_data = t_resp.json()
|
|
29
|
+
m3u8_url = f"{t_data['Media']['Link']['ServiceUrl']}//{t_data['Media']['Link']['SecurePath']}"
|
|
30
|
+
elif sources := data.get("videoSources"):
|
|
31
|
+
m3u8_url = sources[-1]["file"]
|
|
32
|
+
|
|
33
|
+
if not m3u8_url:
|
|
34
|
+
raise ValueError(f"PeaceMakerst: Video linki bulunamadı. {url}")
|
|
35
|
+
|
|
36
|
+
return ExtractResult(name=self.name, url=m3u8_url, referer=url)
|
|
@@ -6,23 +6,15 @@ class PixelDrain(ExtractorBase):
|
|
|
6
6
|
name = "PixelDrain"
|
|
7
7
|
main_url = "https://pixeldrain.com"
|
|
8
8
|
|
|
9
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
10
|
-
if referer:
|
|
11
|
-
self.httpx.headers.update({"Referer": referer})
|
|
9
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
10
|
+
if referer: self.httpx.headers.update({"Referer": referer})
|
|
12
11
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
raise ValueError("PixelDrain bağlantısından ID çıkarılamadı.")
|
|
17
|
-
|
|
18
|
-
m = matches[0]
|
|
19
|
-
pixel_id = next((g for g in m if g), None)
|
|
20
|
-
download_link = f"{self.main_url}/api/file/{pixel_id}?download"
|
|
21
|
-
referer_link = f"{self.main_url}/u/{pixel_id}?download"
|
|
12
|
+
p_id = HTMLHelper(url).regex_first(r"/u/([^/?]+)|([^\/]+)(?=\?download)")
|
|
13
|
+
if not p_id:
|
|
14
|
+
raise ValueError(f"PixelDrain: ID bulunamadı. {url}")
|
|
22
15
|
|
|
23
16
|
return ExtractResult(
|
|
24
|
-
name
|
|
25
|
-
url
|
|
26
|
-
referer
|
|
27
|
-
subtitles = []
|
|
17
|
+
name = f"{self.name} - {p_id}",
|
|
18
|
+
url = f"{self.main_url}/api/file/{p_id}?download",
|
|
19
|
+
referer = f"{self.main_url}/u/{p_id}?download"
|
|
28
20
|
)
|
|
@@ -11,55 +11,36 @@ class PlayerFilmIzle(ExtractorBase):
|
|
|
11
11
|
return "filmizle.in" in url or "fireplayer" in url.lower()
|
|
12
12
|
|
|
13
13
|
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
self.httpx.headers.update({"Referer": ext_ref})
|
|
14
|
+
ref = referer or self.main_url
|
|
15
|
+
self.httpx.headers.update({"Referer": ref})
|
|
17
16
|
|
|
18
|
-
|
|
19
|
-
|
|
17
|
+
resp = await self.httpx.get(url)
|
|
18
|
+
sel = HTMLHelper(resp.text)
|
|
20
19
|
|
|
21
20
|
subtitles = []
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
# Format örneği: [dil]url
|
|
26
|
-
if "]" in sub_yakala:
|
|
27
|
-
sub_lang_raw, sub_url = sub_yakala.split("]", 1)
|
|
28
|
-
sub_lang = sub_lang_raw.replace("[", "")
|
|
29
|
-
subtitles.append(Subtitle(name=sub_lang, url=sub_url))
|
|
21
|
+
if raw_subs := sel.regex_first(r'playerjsSubtitle\s*=\s*"([^"]*)"'):
|
|
22
|
+
for lang, link in HTMLHelper(raw_subs).regex_all(r'\[(.*?)\](https?://[^\s\",]+)'):
|
|
23
|
+
subtitles.append(Subtitle(name=lang.strip(), url=link.strip()))
|
|
30
24
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
# Data yakalama: FirePlayer("DATA", ...) formatından
|
|
35
|
-
data_val = HTMLHelper(unpacked).regex_first(r'(?i)FirePlayer\s*\(\s*["\']([a-f0-9]+)["\']')
|
|
25
|
+
content = Packer.unpack(resp.text) if Packer.detect_packed(resp.text) else resp.text
|
|
26
|
+
data_val = HTMLHelper(content).regex_first(r'FirePlayer\s*\(\s*["\']([a-f0-9]+)["\']')
|
|
36
27
|
|
|
37
28
|
if not data_val:
|
|
38
|
-
raise ValueError("PlayerFilmIzle: Data
|
|
39
|
-
|
|
40
|
-
url_post = f"{self.main_url}/player/index.php?data={data_val}&do=getVideo"
|
|
41
|
-
|
|
42
|
-
post_headers = {
|
|
43
|
-
"Referer": ext_ref,
|
|
44
|
-
"X-Requested-With": "XMLHttpRequest"
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
# Kotlin'de post data: "hash" -> data, "r" -> ""
|
|
48
|
-
post_data = {"hash": data_val, "r": ""}
|
|
49
|
-
|
|
50
|
-
response = await self.httpx.post(url_post, data=post_data, headers=post_headers)
|
|
51
|
-
get_url = response.text.replace("\\", "")
|
|
52
|
-
|
|
53
|
-
m3u8_url = ""
|
|
54
|
-
m3u8_url = HTMLHelper(get_url).regex_first(r'(?i)"securedLink":"([^\\"]*)"') or m3u8_url
|
|
29
|
+
raise ValueError(f"PlayerFilmIzle: Data bulunamadı. {url}")
|
|
55
30
|
|
|
31
|
+
resp_vid = await self.httpx.post(
|
|
32
|
+
f"{self.main_url}/player/index.php?data={data_val}&do=getVideo",
|
|
33
|
+
data = {"hash": data_val, "r": ""},
|
|
34
|
+
headers = {"X-Requested-With": "XMLHttpRequest"}
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
m3u8_url = HTMLHelper(resp_vid.text).regex_first(r'"securedLink":"([^"]+)"')
|
|
56
38
|
if not m3u8_url:
|
|
57
|
-
raise ValueError("PlayerFilmIzle:
|
|
39
|
+
raise ValueError(f"PlayerFilmIzle: Video URL bulunamadı. {url}")
|
|
58
40
|
|
|
59
41
|
return ExtractResult(
|
|
60
|
-
name
|
|
61
|
-
url
|
|
62
|
-
referer
|
|
63
|
-
|
|
64
|
-
subtitles = subtitles
|
|
42
|
+
name = self.name,
|
|
43
|
+
url = m3u8_url.replace("\\", ""),
|
|
44
|
+
referer = ref,
|
|
45
|
+
subtitles = subtitles
|
|
65
46
|
)
|
|
@@ -14,57 +14,43 @@ class RapidVid(ExtractorBase):
|
|
|
14
14
|
def can_handle_url(self, url: str) -> bool:
|
|
15
15
|
return any(domain in url for domain in self.supported_domains)
|
|
16
16
|
|
|
17
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
17
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
18
18
|
if referer:
|
|
19
19
|
self.httpx.headers.update({"Referer": referer})
|
|
20
20
|
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
resp = await self.httpx.get(url)
|
|
22
|
+
sel = HTMLHelper(resp.text)
|
|
23
23
|
|
|
24
|
-
subtitles
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
for sub_url, sub_lang in subtitle_matches:
|
|
30
|
-
if sub_url in seen_subtitles:
|
|
31
|
-
continue
|
|
32
|
-
|
|
33
|
-
seen_subtitles.add(sub_url)
|
|
34
|
-
decoded_lang = (
|
|
35
|
-
sub_lang.replace("\\u0131", "ı")
|
|
36
|
-
.replace("\\u0130", "İ")
|
|
37
|
-
.replace("\\u00fc", "ü")
|
|
38
|
-
.replace("\\u00e7", "ç")
|
|
39
|
-
)
|
|
40
|
-
subtitles.append(Subtitle(name=decoded_lang, url=sub_url.replace("\\", "")))
|
|
24
|
+
subtitles = []
|
|
25
|
+
for s_url, s_lang in sel.regex_all(r'captions","file":"([^\"]+)","label":"([^\"]+)"'):
|
|
26
|
+
decoded_lang = s_lang.encode().decode('unicode_escape')
|
|
27
|
+
subtitles.append(Subtitle(name=decoded_lang, url=s_url.replace("\\", "")))
|
|
41
28
|
|
|
42
29
|
try:
|
|
43
|
-
|
|
30
|
+
video_url = None
|
|
44
31
|
|
|
45
|
-
# Method 1:
|
|
46
|
-
if
|
|
47
|
-
|
|
48
|
-
decoded_url = HexCodec.decode(escaped_hex)
|
|
32
|
+
# Method 1: HexCodec pattern
|
|
33
|
+
if hex_data := sel.regex_first(r'file": "(.*)",'):
|
|
34
|
+
video_url = HexCodec.decode(hex_data)
|
|
49
35
|
|
|
50
36
|
# Method 2: av('...') pattern
|
|
51
|
-
elif
|
|
52
|
-
|
|
37
|
+
elif av_data := sel.regex_first(r"av\('([^']+)'\)"):
|
|
38
|
+
video_url = self.decode_secret(av_data)
|
|
53
39
|
|
|
54
|
-
# Method 3: Packed
|
|
55
|
-
elif Packer.detect_packed(
|
|
56
|
-
unpacked
|
|
57
|
-
|
|
40
|
+
# Method 3: Packed dc_*
|
|
41
|
+
elif Packer.detect_packed(resp.text):
|
|
42
|
+
unpacked = Packer.unpack(resp.text)
|
|
43
|
+
video_url = StreamDecoder.extract_stream_url(unpacked)
|
|
58
44
|
|
|
59
|
-
if not
|
|
60
|
-
raise ValueError("
|
|
45
|
+
if not video_url:
|
|
46
|
+
raise ValueError(f"RapidVid: Video URL bulunamadı. {url}")
|
|
61
47
|
|
|
62
48
|
except Exception as hata:
|
|
63
|
-
raise RuntimeError(f"Extraction failed: {hata}") from hata
|
|
49
|
+
raise RuntimeError(f"RapidVid: Extraction failed: {hata}") from hata
|
|
64
50
|
|
|
65
51
|
return ExtractResult(
|
|
66
52
|
name = self.name,
|
|
67
|
-
url =
|
|
53
|
+
url = video_url,
|
|
68
54
|
referer = self.main_url,
|
|
69
55
|
subtitles = subtitles
|
|
70
56
|
)
|
|
@@ -7,60 +7,35 @@ class SetPlay(ExtractorBase):
|
|
|
7
7
|
name = "SetPlay"
|
|
8
8
|
main_url = "https://setplay.shop"
|
|
9
9
|
|
|
10
|
-
# Birden fazla domain destekle
|
|
11
10
|
supported_domains = ["setplay.cfd", "setplay.shop", "setplay.site"]
|
|
12
11
|
|
|
13
12
|
def can_handle_url(self, url: str) -> bool:
|
|
14
13
|
return any(domain in url for domain in self.supported_domains)
|
|
15
14
|
|
|
16
|
-
async def extract(self, url, referer=None) -> ExtractResult:
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
if referer:
|
|
20
|
-
self.httpx.headers.update({"Referer": referer})
|
|
21
|
-
|
|
22
|
-
# Dinamik base URL kullan
|
|
15
|
+
async def extract(self, url: str, referer: str = None) -> ExtractResult:
|
|
16
|
+
self.httpx.headers.update({"Referer": referer or url})
|
|
23
17
|
base_url = self.get_base_url(url)
|
|
24
18
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
hp = HTMLHelper(istek.text)
|
|
19
|
+
resp = await self.httpx.get(url)
|
|
20
|
+
sel = HTMLHelper(resp.text)
|
|
29
21
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
if not
|
|
33
|
-
raise ValueError("
|
|
34
|
-
video_url = video_url.replace("\\", "")
|
|
22
|
+
v_url = sel.regex_first(r'videoUrl":"([^",]+)"')
|
|
23
|
+
v_srv = sel.regex_first(r'videoServer":"([^",]+)"')
|
|
24
|
+
if not v_url or not v_srv:
|
|
25
|
+
raise ValueError(f"SetPlay: Video url/server bulunamadı. {url}")
|
|
35
26
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
if not video_server:
|
|
39
|
-
raise ValueError("videoServer not found")
|
|
40
|
-
|
|
41
|
-
# title çıkar (opsiyonel)
|
|
42
|
-
title_base = hp.regex_first(r'title":"([^",]+)"')
|
|
43
|
-
title_base = title_base.split(".")[-1] if title_base else "Unknown"
|
|
44
|
-
|
|
45
|
-
# partKey logic
|
|
46
|
-
parsed = urlparse(url)
|
|
47
|
-
params = parse_qs(parsed.query)
|
|
48
|
-
part_key = params.get("partKey", [""])[0]
|
|
27
|
+
params = parse_qs(urlparse(url).query)
|
|
28
|
+
part_key = params.get("partKey", [""])[0].lower()
|
|
49
29
|
|
|
50
|
-
|
|
51
|
-
if "turkcedublaj" in part_key
|
|
52
|
-
|
|
53
|
-
elif "turkcealtyazi" in part_key.lower():
|
|
54
|
-
name_suffix = "Altyazı"
|
|
30
|
+
suffix = "Bilinmiyor"
|
|
31
|
+
if "turkcedublaj" in part_key: suffix = "Dublaj"
|
|
32
|
+
elif "turkcealtyazi" in part_key: suffix = "Altyazı"
|
|
55
33
|
else:
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
# M3U8 link oluştur - base_url kullan (main_url yerine)
|
|
59
|
-
m3u_link = f"{base_url}{video_url}?s={video_server}"
|
|
34
|
+
title = sel.regex_first(r'title":"([^",]+)"')
|
|
35
|
+
if title: suffix = title.split(".")[-1]
|
|
60
36
|
|
|
61
37
|
return ExtractResult(
|
|
62
|
-
name = f"{self.name} - {
|
|
63
|
-
url =
|
|
64
|
-
referer = url
|
|
65
|
-
subtitles = []
|
|
38
|
+
name = f"{self.name} - {suffix}",
|
|
39
|
+
url = f"{base_url}{v_url.replace('\\', '')}?s={v_srv}",
|
|
40
|
+
referer = url
|
|
66
41
|
)
|