KekikStream 1.7.1__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. KekikStream/Core/Extractor/ExtractorBase.py +13 -7
  2. KekikStream/Core/Extractor/ExtractorLoader.py +25 -17
  3. KekikStream/Core/Extractor/ExtractorManager.py +53 -9
  4. KekikStream/Core/Extractor/ExtractorModels.py +5 -7
  5. KekikStream/Core/Extractor/YTDLPCache.py +35 -0
  6. KekikStream/Core/Media/MediaHandler.py +44 -26
  7. KekikStream/Core/Media/MediaManager.py +0 -3
  8. KekikStream/Core/Plugin/PluginBase.py +21 -9
  9. KekikStream/Core/Plugin/PluginLoader.py +11 -7
  10. KekikStream/Core/Plugin/PluginModels.py +25 -26
  11. KekikStream/Core/__init__.py +1 -0
  12. KekikStream/Extractors/CloseLoad.py +4 -5
  13. KekikStream/Extractors/ContentX.py +4 -6
  14. KekikStream/Extractors/ContentX_.py +40 -0
  15. KekikStream/Extractors/DzenRu.py +38 -0
  16. KekikStream/Extractors/ExPlay.py +53 -0
  17. KekikStream/Extractors/FirePlayer.py +60 -0
  18. KekikStream/Extractors/HDPlayerSystem.py +41 -0
  19. KekikStream/Extractors/JetTv.py +45 -0
  20. KekikStream/Extractors/MailRu.py +3 -4
  21. KekikStream/Extractors/MixPlayHD.py +2 -3
  22. KekikStream/Extractors/MixTiger.py +57 -0
  23. KekikStream/Extractors/MolyStream.py +5 -5
  24. KekikStream/Extractors/Odnoklassniki.py +7 -7
  25. KekikStream/Extractors/{OkRuHTTP.py → Odnoklassniki_.py} +5 -1
  26. KekikStream/Extractors/PeaceMakerst.py +4 -5
  27. KekikStream/Extractors/{HDStreamAble.py → PeaceMakerst_.py} +1 -1
  28. KekikStream/Extractors/PixelDrain.py +1 -2
  29. KekikStream/Extractors/PlayerFilmIzle.py +62 -0
  30. KekikStream/Extractors/RapidVid.py +2 -3
  31. KekikStream/Extractors/RapidVid_.py +7 -0
  32. KekikStream/Extractors/SetPlay.py +57 -0
  33. KekikStream/Extractors/SetPrime.py +45 -0
  34. KekikStream/Extractors/SibNet.py +2 -3
  35. KekikStream/Extractors/Sobreatsesuyp.py +4 -5
  36. KekikStream/Extractors/TRsTX.py +4 -5
  37. KekikStream/Extractors/TauVideo.py +2 -3
  38. KekikStream/Extractors/TurboImgz.py +2 -3
  39. KekikStream/Extractors/TurkeyPlayer.py +34 -0
  40. KekikStream/Extractors/VidHide.py +72 -0
  41. KekikStream/Extractors/VidMoly.py +4 -5
  42. KekikStream/Extractors/{VidMolyMe.py → VidMoly_.py} +1 -1
  43. KekikStream/Extractors/VidMoxy.py +2 -3
  44. KekikStream/Extractors/VidPapi.py +89 -0
  45. KekikStream/Extractors/VideoSeyred.py +3 -4
  46. KekikStream/Extractors/YTDLP.py +177 -0
  47. KekikStream/Extractors/YildizKisaFilm.py +41 -0
  48. KekikStream/Plugins/DiziBox.py +18 -23
  49. KekikStream/Plugins/DiziPal.py +16 -16
  50. KekikStream/Plugins/DiziYou.py +48 -23
  51. KekikStream/Plugins/Dizilla.py +47 -32
  52. KekikStream/Plugins/FilmBip.py +145 -0
  53. KekikStream/Plugins/FilmMakinesi.py +6 -8
  54. KekikStream/Plugins/FilmModu.py +9 -9
  55. KekikStream/Plugins/FullHDFilm.py +164 -0
  56. KekikStream/Plugins/FullHDFilmizlesene.py +4 -8
  57. KekikStream/Plugins/HDFilmCehennemi.py +15 -19
  58. KekikStream/Plugins/JetFilmizle.py +67 -49
  59. KekikStream/Plugins/KultFilmler.py +219 -0
  60. KekikStream/Plugins/RecTV.py +18 -22
  61. KekikStream/Plugins/RoketDizi.py +232 -0
  62. KekikStream/Plugins/SelcukFlix.py +309 -0
  63. KekikStream/Plugins/SezonlukDizi.py +12 -13
  64. KekikStream/Plugins/SineWix.py +8 -12
  65. KekikStream/Plugins/Sinefy.py +238 -0
  66. KekikStream/Plugins/SinemaCX.py +157 -0
  67. KekikStream/Plugins/Sinezy.py +146 -0
  68. KekikStream/Plugins/SuperFilmGeldi.py +121 -0
  69. KekikStream/Plugins/UgurFilm.py +7 -11
  70. KekikStream/__init__.py +34 -24
  71. KekikStream/requirements.txt +3 -4
  72. kekikstream-2.0.2.dist-info/METADATA +309 -0
  73. kekikstream-2.0.2.dist-info/RECORD +82 -0
  74. KekikStream/Extractors/FourCX.py +0 -7
  75. KekikStream/Extractors/FourPichive.py +0 -7
  76. KekikStream/Extractors/FourPlayRu.py +0 -7
  77. KekikStream/Extractors/Hotlinger.py +0 -7
  78. KekikStream/Extractors/OkRuSSL.py +0 -7
  79. KekikStream/Extractors/Pichive.py +0 -7
  80. KekikStream/Extractors/PlayRu.py +0 -7
  81. kekikstream-1.7.1.dist-info/METADATA +0 -109
  82. kekikstream-1.7.1.dist-info/RECORD +0 -63
  83. {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/WHEEL +0 -0
  84. {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/entry_points.txt +0 -0
  85. {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/licenses/LICENSE +0 -0
  86. {kekikstream-1.7.1.dist-info → kekikstream-2.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,45 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult
4
+ from urllib.parse import urlparse, parse_qs
5
+ import re
6
+
7
+ class SetPrime(ExtractorBase):
8
+ name = "SetPrime"
9
+ main_url = "https://setplay.site"
10
+
11
+ async def extract(self, url, referer=None) -> ExtractResult:
12
+ # URL parsing
13
+ parsed = urlparse(url)
14
+ params = parse_qs(parsed.query)
15
+ part_key = params.get("partKey", [""])[0].upper()
16
+ clean_url = url.split("?partKey=")[0]
17
+
18
+ # POST URL: embed?i= -> embed/get?i=
19
+ post_url = clean_url.replace("embed?i=", "embed/get?i=")
20
+
21
+ response = await self.httpx.post(
22
+ url = post_url,
23
+ headers = {"Referer": clean_url}
24
+ )
25
+ response.raise_for_status()
26
+
27
+ # Links parse
28
+ links_match = re.search(r'Links":\["([^"\]]+)"', response.text)
29
+ if not links_match:
30
+ raise ValueError("Links not found in SetPrime response")
31
+
32
+ link_suffix = links_match.group(1)
33
+ if not link_suffix.startswith("/"):
34
+ raise ValueError("Links not valid (must start with /)")
35
+
36
+ m3u_link = f"{self.main_url}{link_suffix}"
37
+
38
+ display_name = f"{self.name} - {part_key}" if part_key else self.name
39
+
40
+ return ExtractResult(
41
+ name = display_name,
42
+ url = m3u_link,
43
+ referer = clean_url,
44
+ subtitles = []
45
+ )
@@ -9,9 +9,9 @@ class SibNet(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.cffi.headers.update({"Referer": referer})
12
+ self.httpx.headers.update({"Referer": referer})
13
13
 
14
- response = await self.cffi.get(url)
14
+ response = await self.httpx.get(url)
15
15
  response.raise_for_status()
16
16
 
17
17
  match = re.search(r'player\.src\(\[\{src: \"([^\"]+)\"', response.text)
@@ -24,6 +24,5 @@ class SibNet(ExtractorBase):
24
24
  name = self.name,
25
25
  url = m3u_link,
26
26
  referer = url,
27
- headers = {},
28
27
  subtitles = []
29
28
  )
@@ -9,9 +9,9 @@ class Sobreatsesuyp(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.cffi.headers.update({"Referer": referer})
12
+ self.httpx.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.cffi.get(url)
14
+ istek = await self.httpx.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  file_match = re.search(r'file\":\"([^\"]+)', istek.text)
@@ -21,7 +21,7 @@ class Sobreatsesuyp(ExtractorBase):
21
21
  file_path = file_match[1].replace("\\", "")
22
22
  post_link = f"{self.main_url}/{file_path}"
23
23
 
24
- post_istek = await self.cffi.post(post_link)
24
+ post_istek = await self.httpx.post(post_link)
25
25
  post_istek.raise_for_status()
26
26
 
27
27
  try:
@@ -41,7 +41,7 @@ class Sobreatsesuyp(ExtractorBase):
41
41
  continue
42
42
 
43
43
  playlist_url = f"{self.main_url}/playlist/{file.lstrip('/')}.txt"
44
- playlist_request = await self.cffi.post(playlist_url, headers={"Referer": referer or self.main_url})
44
+ playlist_request = await self.httpx.post(playlist_url, headers={"Referer": referer or self.main_url})
45
45
  playlist_request.raise_for_status()
46
46
 
47
47
  all_results.append(
@@ -49,7 +49,6 @@ class Sobreatsesuyp(ExtractorBase):
49
49
  name = f"{self.name} - {title}",
50
50
  url = playlist_request.text,
51
51
  referer = self.main_url,
52
- headers = {},
53
52
  subtitles = []
54
53
  )
55
54
  )
@@ -9,9 +9,9 @@ class TRsTX(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> list[ExtractResult]:
11
11
  if referer:
12
- self.cffi.headers.update({"Referer": referer})
12
+ self.httpx.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.cffi.get(url)
14
+ istek = await self.httpx.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  file_match = re.search(r'file\":\"([^\"]+)', istek.text)
@@ -21,7 +21,7 @@ class TRsTX(ExtractorBase):
21
21
  file_path = file_match[1].replace("\\", "")
22
22
  post_link = f"{self.main_url}/{file_path}"
23
23
 
24
- post_istek = await self.cffi.post(post_link)
24
+ post_istek = await self.httpx.post(post_link)
25
25
  post_istek.raise_for_status()
26
26
 
27
27
  try:
@@ -42,7 +42,7 @@ class TRsTX(ExtractorBase):
42
42
  continue
43
43
 
44
44
  playlist_url = f"{self.main_url}/playlist/{file.lstrip('/')}.txt"
45
- playlist_request = await self.cffi.post(playlist_url, headers={"Referer": referer or self.main_url})
45
+ playlist_request = await self.httpx.post(playlist_url, headers={"Referer": referer or self.main_url})
46
46
  playlist_request.raise_for_status()
47
47
 
48
48
  video_data = playlist_request.text
@@ -57,7 +57,6 @@ class TRsTX(ExtractorBase):
57
57
  name = f"{self.name} - {title}",
58
58
  url = video_data,
59
59
  referer = self.main_url,
60
- headers = {},
61
60
  subtitles = []
62
61
  )
63
62
  )
@@ -8,12 +8,12 @@ class TauVideo(ExtractorBase):
8
8
 
9
9
  async def extract(self, url, referer=None) -> list[ExtractResult]:
10
10
  if referer:
11
- self.cffi.headers.update({"Referer": referer})
11
+ self.httpx.headers.update({"Referer": referer})
12
12
 
13
13
  video_key = url.split("/")[-1]
14
14
  api_url = f"{self.main_url}/api/video/{video_key}"
15
15
 
16
- response = await self.cffi.get(api_url)
16
+ response = await self.httpx.get(api_url)
17
17
  response.raise_for_status()
18
18
 
19
19
  api_data = response.json()
@@ -26,7 +26,6 @@ class TauVideo(ExtractorBase):
26
26
  name = f"{self.name} - {video['label']}",
27
27
  url = video["url"],
28
28
  referer = referer or self.main_url,
29
- headers = {},
30
29
  subtitles = []
31
30
  )
32
31
  for video in api_data["urls"]
@@ -9,9 +9,9 @@ class TurboImgz(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.cffi.headers.update({"Referer": referer})
12
+ self.httpx.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.cffi.get(url)
14
+ istek = await self.httpx.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  if video_match := re.search(r'file: "(.*)",', istek.text):
@@ -19,7 +19,6 @@ class TurboImgz(ExtractorBase):
19
19
  name = self.name,
20
20
  url = video_match[1],
21
21
  referer = referer or self.main_url,
22
- headers = {},
23
22
  subtitles = []
24
23
  )
25
24
  else:
@@ -0,0 +1,34 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle
4
+ import re, json
5
+
6
+ class TurkeyPlayer(ExtractorBase):
7
+ name = "TurkeyPlayer"
8
+ main_url = "https://watch.turkeyplayer.com/"
9
+
10
+ async def extract(self, url: str, referer: str = None) -> ExtractResult:
11
+ if referer:
12
+ self.httpx.headers.update({"Referer": referer})
13
+
14
+ istek = await self.httpx.get(url)
15
+ page_content = istek.text
16
+
17
+ video_json_match = re.search(r'var\s+video\s*=\s*(\{.*?\});', page_content, re.DOTALL)
18
+ if not video_json_match:
19
+ raise ValueError("TurkeyPlayer: Video JSON bulunamadı")
20
+
21
+ video_data = json.loads(video_json_match.group(1))
22
+
23
+ video_id = video_data.get("id")
24
+ video_md5 = video_data.get("md5")
25
+
26
+ master_url = f"https://watch.turkeyplayer.com/m3u8/8/{video_md5}/master.txt?s=1&id={video_id}&cache=1"
27
+
28
+ return ExtractResult(
29
+ name = self.name,
30
+ url = master_url,
31
+ referer = referer or url,
32
+ user_agent = self.httpx.headers.get("User-Agent", ""),
33
+ subtitles = []
34
+ )
@@ -0,0 +1,72 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle
4
+ from Kekik.Sifreleme import Packer
5
+ from parsel import Selector
6
+ import re
7
+
8
+ class VidHide(ExtractorBase):
9
+ name = "VidHide"
10
+ main_url = "https://vidhidepro.com"
11
+
12
+ def get_embed_url(self, url: str) -> str:
13
+ if "/d/" in url:
14
+ return url.replace("/d/", "/v/")
15
+ elif "/download/" in url:
16
+ return url.replace("/download/", "/v/")
17
+ elif "/file/" in url:
18
+ return url.replace("/file/", "/v/")
19
+ else:
20
+ return url.replace("/f/", "/v/")
21
+
22
+ async def extract(self, url: str, referer: str = None) -> ExtractResult:
23
+ if referer:
24
+ self.httpx.headers.update({"Referer": referer})
25
+
26
+ self.httpx.headers.update({
27
+ "Sec-Fetch-Dest" : "empty",
28
+ "Sec-Fetch-Mode" : "cors",
29
+ "Sec-Fetch-Site" : "cross-site",
30
+ "Origin" : self.main_url,
31
+ })
32
+
33
+ embed_url = self.get_embed_url(url)
34
+ istek = await self.httpx.get(embed_url)
35
+ response = istek.text
36
+
37
+ script = None
38
+ if "eval(function" in response:
39
+ try:
40
+ unpacked = Packer.unpack(response)
41
+ if "var links" in unpacked:
42
+ script = unpacked.split("var links")[1]
43
+ else:
44
+ script = unpacked
45
+ except Exception:
46
+ pass
47
+
48
+ if not script:
49
+ if matches := re.search(r'sources:\s*(\[.*?\])', response, re.DOTALL):
50
+ script = matches.group(1)
51
+
52
+ m3u8_url = None
53
+ if script:
54
+ # m3u8 urls could be prefixed by 'file:', 'hls2:' or 'hls4:', so we just match ':'
55
+ if match := re.search(r':\s*"([^"]*?m3u8[^"]*?)"', script):
56
+ m3u8_url = match.group(1)
57
+
58
+ if not m3u8_url:
59
+ # Fallback direct search in response if unpacking failed or structure changed
60
+ if match := re.search(r'file:"(.*?\.m3u8.*?)"', response):
61
+ m3u8_url = match.group(1)
62
+
63
+ if not m3u8_url:
64
+ raise ValueError(f"VidHide: Video URL bulunamadı. {url}")
65
+
66
+ return ExtractResult(
67
+ name = self.name,
68
+ url = self.fix_url(m3u8_url),
69
+ referer = f"{self.main_url}/",
70
+ user_agent = self.httpx.headers.get("User-Agent", ""),
71
+ subtitles = []
72
+ )
@@ -11,9 +11,9 @@ class VidMoly(ExtractorBase):
11
11
 
12
12
  async def extract(self, url: str, referer: str = None) -> ExtractResult:
13
13
  if referer:
14
- self.cffi.headers.update({"Referer": referer})
14
+ self.httpx.headers.update({"Referer": referer})
15
15
 
16
- self.cffi.headers.update({
16
+ self.httpx.headers.update({
17
17
  "Sec-Fetch-Dest" : "iframe",
18
18
  })
19
19
 
@@ -21,10 +21,10 @@ class VidMoly(ExtractorBase):
21
21
  self.main_url = self.main_url.replace(".me", ".net")
22
22
  url = url.replace(".me", ".net")
23
23
 
24
- response = await self.cffi.get(url)
24
+ response = await self.httpx.get(url)
25
25
  if "Select number" in response.text:
26
26
  secici = Selector(response.text)
27
- response = await self.cffi.post(
27
+ response = await self.httpx.post(
28
28
  url = url,
29
29
  data = {
30
30
  "op" : secici.css("input[name='op']::attr(value)").get(),
@@ -80,7 +80,6 @@ class VidMoly(ExtractorBase):
80
80
  name = self.name,
81
81
  url = video_url,
82
82
  referer = self.main_url,
83
- headers = {},
84
83
  subtitles = subtitles
85
84
  )
86
85
 
@@ -4,4 +4,4 @@ from KekikStream.Extractors.VidMoly import VidMoly
4
4
 
5
5
  class VidMolyMe(VidMoly):
6
6
  name = "VidMolyMe"
7
- main_url = "https://vidmoly.me"
7
+ main_url = "https://vidmoly.me"
@@ -10,9 +10,9 @@ class VidMoxy(ExtractorBase):
10
10
 
11
11
  async def extract(self, url, referer=None) -> ExtractResult:
12
12
  if referer:
13
- self.cffi.headers.update({"Referer": referer})
13
+ self.httpx.headers.update({"Referer": referer})
14
14
 
15
- istek = await self.cffi.get(url)
15
+ istek = await self.httpx.get(url)
16
16
  istek.raise_for_status()
17
17
 
18
18
  subtitles = []
@@ -45,6 +45,5 @@ class VidMoxy(ExtractorBase):
45
45
  name = self.name,
46
46
  url = m3u_link,
47
47
  referer = self.main_url,
48
- headers = {},
49
48
  subtitles = subtitles
50
49
  )
@@ -0,0 +1,89 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle
4
+ import re
5
+
6
+ class VidPapi(ExtractorBase):
7
+ name = "VidApi"
8
+ main_url = "https://vidpapi.xyz"
9
+
10
+ async def extract(self, url, referer=None) -> ExtractResult:
11
+ ext_ref = referer or ""
12
+
13
+ # URL parsing
14
+ if "video/" in url:
15
+ vid_id = url.split("video/")[-1]
16
+ else:
17
+ vid_id = url.split("?data=")[-1]
18
+
19
+ # 1. Altyazıları çek
20
+ sub_url = f"{self.main_url}/player/index.php?data={vid_id}"
21
+ sub_headers = {
22
+ "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
23
+ "X-Requested-With" : "XMLHttpRequest",
24
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0",
25
+ "Referer" : ext_ref or "https://kultfilmler.pro/"
26
+ }
27
+
28
+ subtitles = []
29
+ try:
30
+ sub_istek = await self.httpx.post(
31
+ url = sub_url,
32
+ headers = sub_headers,
33
+ data = {"hash": vid_id, "r": "https://kultfilmler.pro/"}
34
+ )
35
+
36
+ subtitle_match = re.search(r'var playerjsSubtitle = "([^"]*)"', sub_istek.text, re.IGNORECASE)
37
+ if subtitle_match and subtitle_match.group(1):
38
+ raw_subs = subtitle_match.group(1)
39
+
40
+ found_subs = re.findall(r'\[(.*?)\](.*?)(?:,|$)', raw_subs)
41
+ for lang, sub_link in found_subs:
42
+ lang = lang.strip()
43
+ if "Türkçe" in lang:
44
+ lang_code = "tr"
45
+ lang_name = "Turkish"
46
+ elif "İngilizce" in lang:
47
+ lang_code = "en"
48
+ lang_name = "English"
49
+ else:
50
+ lang_code = lang[:2].lower()
51
+ lang_name = lang
52
+
53
+ subtitles.append(Subtitle(
54
+ name = lang_name,
55
+ url = sub_link.strip()
56
+ ))
57
+
58
+ except Exception as e:
59
+ pass
60
+
61
+ # 2. Videoyu çek
62
+ video_url = f"{self.main_url}/player/index.php?data={vid_id}&do=getVideo"
63
+ video_headers = sub_headers.copy()
64
+
65
+ response = await self.httpx.post(
66
+ url = video_url,
67
+ headers = video_headers,
68
+ data = {"hash": vid_id, "r": "https://kultfilmler.pro/"}
69
+ )
70
+ response.raise_for_status()
71
+
72
+ try:
73
+ video_data = response.json()
74
+ except Exception:
75
+ return None
76
+
77
+ stream_url = video_data.get("securedLink")
78
+ if not stream_url or not stream_url.strip():
79
+ stream_url = video_data.get("videoSource")
80
+
81
+ if not stream_url:
82
+ raise ValueError("No video link found in VidPapi response")
83
+
84
+ return ExtractResult(
85
+ name = self.name,
86
+ url = stream_url,
87
+ referer = ext_ref or self.main_url,
88
+ subtitles = subtitles
89
+ )
@@ -9,18 +9,18 @@ class VideoSeyred(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.cffi.headers.update({"Referer": referer})
12
+ self.httpx.headers.update({"Referer": referer})
13
13
 
14
14
  video_id = url.split("embed/")[1].split("?")[0]
15
15
  if len(video_id) > 10:
16
- kontrol = await self.cffi.get(url)
16
+ kontrol = await self.httpx.get(url)
17
17
  kontrol.raise_for_status()
18
18
 
19
19
  video_id = re.search(r"playlist\/(.*)\.json", kontrol.text)[1]
20
20
 
21
21
  video_url = f"{self.main_url}/playlist/{video_id}.json"
22
22
 
23
- response = await self.cffi.get(video_url)
23
+ response = await self.httpx.get(video_url)
24
24
  response.raise_for_status()
25
25
 
26
26
  try:
@@ -43,7 +43,6 @@ class VideoSeyred(ExtractorBase):
43
43
  name = self.name,
44
44
  url = self.fix_url(source["file"]),
45
45
  referer = self.main_url,
46
- headers = {},
47
46
  subtitles = subtitles,
48
47
  )
49
48
  for source in response_data.get("sources", [])
@@ -0,0 +1,177 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, get_ytdlp_extractors
4
+ from urllib.parse import urlparse
5
+ import yt_dlp, re, sys, os
6
+
7
+ class YTDLP(ExtractorBase):
8
+ name = "yt-dlp"
9
+ main_url = "" # Universal - tüm siteleri destekler
10
+
11
+ _FAST_DOMAIN_RE = None # compiled mega-regex (host üstünden)
12
+
13
+ @classmethod
14
+ def _init_fast_domain_regex(cls):
15
+ """
16
+ Fast domain regex'i initialize et
17
+ """
18
+ if cls._FAST_DOMAIN_RE is not None:
19
+ return
20
+
21
+ domains = set()
22
+
23
+ # Merkezi cache'den extractorları al
24
+ extractors = get_ytdlp_extractors()
25
+
26
+ # yt-dlp extractor'larının _VALID_URL regex'lerinden domain yakala
27
+ # Regex metinlerinde domainler genelde "\." şeklinde geçer.
28
+ domain_pat = re.compile(r"(?:[a-z0-9-]+\\\.)+[a-z]{2,}", re.IGNORECASE)
29
+
30
+ for ie in extractors:
31
+ valid = getattr(ie, "_VALID_URL", None)
32
+ if not valid or not isinstance(valid, str):
33
+ continue
34
+
35
+ for m in domain_pat.findall(valid):
36
+ d = m.replace(r"\.", ".").lower()
37
+
38
+ # Çok agresif/şüpheli şeyleri elemek istersen burada filtre koyabilirsin
39
+ # (genelde gerek kalmıyor)
40
+ domains.add(d)
41
+
42
+ # Hiç domain çıkmazsa (çok uç durum) fallback: boş regex
43
+ if not domains:
44
+ cls._FAST_DOMAIN_RE = re.compile(r"$^") # hiçbir şeye match etmez
45
+ return
46
+
47
+ # Host eşleştirmesi: subdomain destekli (m.youtube.com, player.vimeo.com vs.)
48
+ # (?:^|.*\.) (domain1|domain2|...) $
49
+ joined = "|".join(sorted(re.escape(d) for d in domains))
50
+ pattern = rf"(?:^|.*\.)(?:{joined})$"
51
+ cls._FAST_DOMAIN_RE = re.compile(pattern, re.IGNORECASE)
52
+
53
+ def __init__(self):
54
+ self.__class__._init_fast_domain_regex()
55
+
56
+ def can_handle_url(self, url: str) -> bool:
57
+ """
58
+ Fast-path: URL host'unu tek mega-regex ile kontrol et (loop yok)
59
+ Slow-path: gerekirse mevcut extract_info tabanlı kontrolün
60
+ """
61
+ # URL parse + host al
62
+ try:
63
+ parsed = urlparse(url)
64
+ host = (parsed.hostname or "").lower()
65
+ except Exception:
66
+ host = ""
67
+
68
+ # Şemasız URL desteği: "youtube.com/..." gibi
69
+ if not host and "://" not in url:
70
+ try:
71
+ parsed = urlparse("https://" + url)
72
+ host = (parsed.hostname or "").lower()
73
+ except Exception:
74
+ host = ""
75
+
76
+ # Fast-path
77
+ if host and self.__class__._FAST_DOMAIN_RE.search(host):
78
+ return True
79
+
80
+ # SLOW PATH: Diğer siteler için yt-dlp'nin native kontrolü
81
+ try:
82
+ # stderr'ı geçici olarak kapat (hata mesajlarını gizle)
83
+ old_stderr = sys.stderr
84
+ sys.stderr = open(os.devnull, "w")
85
+
86
+ try:
87
+ ydl_opts = {
88
+ "simulate" : True, # Download yok, sadece tespit
89
+ "quiet" : True, # Log kirliliği yok
90
+ "no_warnings" : True, # Uyarı mesajları yok
91
+ "extract_flat" : True, # Minimal işlem
92
+ "no_check_certificates" : True,
93
+ "ignoreerrors" : True, # Hataları yoksay
94
+ "socket_timeout" : 3,
95
+ "retries" : 1
96
+ }
97
+
98
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
99
+ # URL'yi işleyebiliyor mu kontrol et
100
+ info = ydl.extract_info(url, download=False, process=False)
101
+
102
+ # Generic extractor ise atla
103
+ if info and info.get("extractor_key") != "Generic":
104
+ return True
105
+
106
+ return False
107
+ finally:
108
+ # stderr'ı geri yükle
109
+ sys.stderr.close()
110
+ sys.stderr = old_stderr
111
+
112
+ except Exception:
113
+ # yt-dlp işleyemezse False döndür
114
+ return False
115
+
116
+ async def extract(self, url: str, referer: str | None = None) -> ExtractResult:
117
+ ydl_opts = {
118
+ "quiet" : True,
119
+ "no_warnings" : True,
120
+ "extract_flat" : False, # Tam bilgi al
121
+ "format" : "best", # En iyi kalite
122
+ "no_check_certificates" : True,
123
+ "socket_timeout" : 3,
124
+ "retries" : 1
125
+ }
126
+
127
+ # Referer varsa header olarak ekle
128
+ if referer:
129
+ ydl_opts["http_headers"] = {"Referer": referer}
130
+
131
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
132
+ info = ydl.extract_info(url, download=False)
133
+
134
+ if not info:
135
+ raise ValueError("yt-dlp video bilgisi döndürmedi")
136
+
137
+ # Video URL'sini al
138
+ video_url = info.get("url")
139
+ if not video_url:
140
+ # Bazen formatlar listesinde olabilir
141
+ formats = info.get("formats", [])
142
+ if formats:
143
+ video_url = formats[-1].get("url") # Son format (genellikle en iyi)
144
+
145
+ if not video_url:
146
+ raise ValueError("Video URL bulunamadı")
147
+
148
+ # Altyazıları çıkar
149
+ subtitles = []
150
+ if subtitle_data := info.get("subtitles"):
151
+ for lang, subs in subtitle_data.items():
152
+ for sub in subs:
153
+ if sub_url := sub.get("url"):
154
+ subtitles.append(
155
+ Subtitle(
156
+ name=f"{lang} ({sub.get('ext', 'unknown')})",
157
+ url=sub_url
158
+ )
159
+ )
160
+
161
+ # User-Agent al
162
+ user_agent = None
163
+ http_headers = info.get("http_headers", {})
164
+ if http_headers:
165
+ user_agent = http_headers.get("User-Agent")
166
+
167
+ return ExtractResult(
168
+ name = self.name,
169
+ url = video_url,
170
+ referer = referer or info.get("webpage_url"),
171
+ user_agent = user_agent,
172
+ subtitles = subtitles
173
+ )
174
+
175
+ async def close(self):
176
+ """yt-dlp için cleanup gerekmez"""
177
+ pass