KekikStream 2.5.8__py3-none-any.whl → 2.5.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. KekikStream/CLI/__pycache__/pypi_kontrol.cpython-314.pyc +0 -0
  2. KekikStream/Core/Extractor/__pycache__/ExtractorBase.cpython-314.pyc +0 -0
  3. KekikStream/Core/Extractor/__pycache__/ExtractorLoader.cpython-314.pyc +0 -0
  4. KekikStream/Core/Extractor/__pycache__/ExtractorManager.cpython-314.pyc +0 -0
  5. KekikStream/Core/Extractor/__pycache__/ExtractorModels.cpython-314.pyc +0 -0
  6. KekikStream/Core/Extractor/__pycache__/YTDLPCache.cpython-314.pyc +0 -0
  7. KekikStream/Core/Media/__pycache__/MediaHandler.cpython-314.pyc +0 -0
  8. KekikStream/Core/Media/__pycache__/MediaManager.cpython-314.pyc +0 -0
  9. KekikStream/Core/Plugin/__pycache__/PluginBase.cpython-314.pyc +0 -0
  10. KekikStream/Core/Plugin/__pycache__/PluginLoader.cpython-314.pyc +0 -0
  11. KekikStream/Core/Plugin/__pycache__/PluginManager.cpython-314.pyc +0 -0
  12. KekikStream/Core/Plugin/__pycache__/PluginModels.cpython-314.pyc +0 -0
  13. KekikStream/Core/UI/__pycache__/UIManager.cpython-314.pyc +0 -0
  14. KekikStream/Core/__pycache__/HTMLHelper.cpython-314.pyc +0 -0
  15. KekikStream/Core/__pycache__/__init__.cpython-314.pyc +0 -0
  16. KekikStream/Extractors/StreamWish.py +80 -0
  17. KekikStream/Extractors/VidHide.py +46 -44
  18. KekikStream/Extractors/VidStack.py +88 -0
  19. KekikStream/Extractors/__pycache__/Abstream.cpython-314.pyc +0 -0
  20. KekikStream/Extractors/__pycache__/CloseLoad.cpython-314.pyc +0 -0
  21. KekikStream/Extractors/__pycache__/ContentX.cpython-314.pyc +0 -0
  22. KekikStream/Extractors/__pycache__/DonilasPlay.cpython-314.pyc +0 -0
  23. KekikStream/Extractors/__pycache__/DzenRu.cpython-314.pyc +0 -0
  24. KekikStream/Extractors/__pycache__/ExPlay.cpython-314.pyc +0 -0
  25. KekikStream/Extractors/__pycache__/Filemoon.cpython-314.pyc +0 -0
  26. KekikStream/Extractors/__pycache__/HDMomPlayer.cpython-314.pyc +0 -0
  27. KekikStream/Extractors/__pycache__/HDPlayerSystem.cpython-314.pyc +0 -0
  28. KekikStream/Extractors/__pycache__/HotStream.cpython-314.pyc +0 -0
  29. KekikStream/Extractors/__pycache__/JFVid.cpython-314.pyc +0 -0
  30. KekikStream/Extractors/__pycache__/JetTv.cpython-314.pyc +0 -0
  31. KekikStream/Extractors/__pycache__/JetV.cpython-314.pyc +0 -0
  32. KekikStream/Extractors/__pycache__/LuciferPlays.cpython-314.pyc +0 -0
  33. KekikStream/Extractors/__pycache__/MailRu.cpython-314.pyc +0 -0
  34. KekikStream/Extractors/__pycache__/MixPlayHD.cpython-314.pyc +0 -0
  35. KekikStream/Extractors/__pycache__/MixTiger.cpython-314.pyc +0 -0
  36. KekikStream/Extractors/__pycache__/MolyStream.cpython-314.pyc +0 -0
  37. KekikStream/Extractors/__pycache__/Odnoklassniki.cpython-314.pyc +0 -0
  38. KekikStream/Extractors/__pycache__/PeaceMakerst.cpython-314.pyc +0 -0
  39. KekikStream/Extractors/__pycache__/PixelDrain.cpython-314.pyc +0 -0
  40. KekikStream/Extractors/__pycache__/PlayerFilmIzle.cpython-314.pyc +0 -0
  41. KekikStream/Extractors/__pycache__/RapidVid.cpython-314.pyc +0 -0
  42. KekikStream/Extractors/__pycache__/SetPlay.cpython-314.pyc +0 -0
  43. KekikStream/Extractors/__pycache__/SetPrime.cpython-314.pyc +0 -0
  44. KekikStream/Extractors/__pycache__/SibNet.cpython-314.pyc +0 -0
  45. KekikStream/Extractors/__pycache__/Sobreatsesuyp.cpython-314.pyc +0 -0
  46. KekikStream/Extractors/__pycache__/StreamWish.cpython-314.pyc +0 -0
  47. KekikStream/Extractors/__pycache__/TRsTX.cpython-314.pyc +0 -0
  48. KekikStream/Extractors/__pycache__/TauVideo.cpython-314.pyc +0 -0
  49. KekikStream/Extractors/__pycache__/TurboImgz.cpython-314.pyc +0 -0
  50. KekikStream/Extractors/__pycache__/TurkeyPlayer.cpython-314.pyc +0 -0
  51. KekikStream/Extractors/__pycache__/VCTPlay.cpython-314.pyc +0 -0
  52. KekikStream/Extractors/__pycache__/Veev.cpython-314.pyc +0 -0
  53. KekikStream/Extractors/__pycache__/VidBiz.cpython-314.pyc +0 -0
  54. KekikStream/Extractors/__pycache__/VidHide.cpython-314.pyc +0 -0
  55. KekikStream/Extractors/__pycache__/VidMoly.cpython-314.pyc +0 -0
  56. KekikStream/Extractors/__pycache__/VidMoxy.cpython-314.pyc +0 -0
  57. KekikStream/Extractors/__pycache__/VidPapi.cpython-314.pyc +0 -0
  58. KekikStream/Extractors/__pycache__/VidStack.cpython-314.pyc +0 -0
  59. KekikStream/Extractors/__pycache__/VideoSeyred.cpython-314.pyc +0 -0
  60. KekikStream/Extractors/__pycache__/Videostr.cpython-314.pyc +0 -0
  61. KekikStream/Extractors/__pycache__/Vidoza.cpython-314.pyc +0 -0
  62. KekikStream/Extractors/__pycache__/Vtbe.cpython-314.pyc +0 -0
  63. KekikStream/Extractors/__pycache__/YTDLP.cpython-314.pyc +0 -0
  64. KekikStream/Extractors/__pycache__/YildizKisaFilm.cpython-314.pyc +0 -0
  65. KekikStream/Extractors/__pycache__/Zeus.cpython-314.pyc +0 -0
  66. KekikStream/Plugins/DDizi.py +176 -0
  67. KekikStream/Plugins/ShowFlix.py +211 -0
  68. KekikStream/Plugins/__pycache__/BelgeselX.cpython-314.pyc +0 -0
  69. KekikStream/Plugins/__pycache__/DDizi.cpython-314.pyc +0 -0
  70. KekikStream/Plugins/__pycache__/DiziBox.cpython-314.pyc +0 -0
  71. KekikStream/Plugins/__pycache__/DiziMom.cpython-314.pyc +0 -0
  72. KekikStream/Plugins/__pycache__/DiziPal.cpython-314.pyc +0 -0
  73. KekikStream/Plugins/__pycache__/DiziYou.cpython-314.pyc +0 -0
  74. KekikStream/Plugins/__pycache__/Dizilla.cpython-314.pyc +0 -0
  75. KekikStream/Plugins/__pycache__/FilmBip.cpython-314.pyc +0 -0
  76. KekikStream/Plugins/__pycache__/FilmEkseni.cpython-314.pyc +0 -0
  77. KekikStream/Plugins/__pycache__/FilmMakinesi.cpython-314.pyc +0 -0
  78. KekikStream/Plugins/__pycache__/FilmModu.cpython-314.pyc +0 -0
  79. KekikStream/Plugins/__pycache__/Filmatek.cpython-314.pyc +0 -0
  80. KekikStream/Plugins/__pycache__/FilmciBaba.cpython-314.pyc +0 -0
  81. KekikStream/Plugins/__pycache__/FullHDFilmizlesene.cpython-314.pyc +0 -0
  82. KekikStream/Plugins/__pycache__/HDFilm.cpython-314.pyc +0 -0
  83. KekikStream/Plugins/__pycache__/HDFilmCehennemi.cpython-314.pyc +0 -0
  84. KekikStream/Plugins/__pycache__/JetFilmizle.cpython-314.pyc +0 -0
  85. KekikStream/Plugins/__pycache__/KultFilmler.cpython-314.pyc +0 -0
  86. KekikStream/Plugins/__pycache__/RealFilmIzle.cpython-314.pyc +0 -0
  87. KekikStream/Plugins/__pycache__/RecTV.cpython-314.pyc +0 -0
  88. KekikStream/Plugins/__pycache__/RoketDizi.cpython-314.pyc +0 -0
  89. KekikStream/Plugins/__pycache__/SelcukFlix.cpython-314.pyc +0 -0
  90. KekikStream/Plugins/__pycache__/SetFilmIzle.cpython-314.pyc +0 -0
  91. KekikStream/Plugins/__pycache__/SezonlukDizi.cpython-314.pyc +0 -0
  92. KekikStream/Plugins/__pycache__/ShowFlix.cpython-314.pyc +0 -0
  93. KekikStream/Plugins/__pycache__/SineWix.cpython-314.pyc +0 -0
  94. KekikStream/Plugins/__pycache__/Sinefy.cpython-314.pyc +0 -0
  95. KekikStream/Plugins/__pycache__/SinemaCX.cpython-314.pyc +0 -0
  96. KekikStream/Plugins/__pycache__/Sinezy.cpython-314.pyc +0 -0
  97. KekikStream/Plugins/__pycache__/SuperFilmIzle.cpython-314.pyc +0 -0
  98. KekikStream/Plugins/__pycache__/UgurFilm.cpython-314.pyc +0 -0
  99. KekikStream/Plugins/__pycache__/Watch32.cpython-314.pyc +0 -0
  100. KekikStream/Plugins/__pycache__/YabanciDizi.cpython-314.pyc +0 -0
  101. {kekikstream-2.5.8.dist-info → kekikstream-2.5.9.dist-info}/METADATA +1 -1
  102. kekikstream-2.5.9.dist-info/RECORD +200 -0
  103. kekikstream-2.5.8.dist-info/RECORD +0 -101
  104. {kekikstream-2.5.8.dist-info → kekikstream-2.5.9.dist-info}/WHEEL +0 -0
  105. {kekikstream-2.5.8.dist-info → kekikstream-2.5.9.dist-info}/entry_points.txt +0 -0
  106. {kekikstream-2.5.8.dist-info → kekikstream-2.5.9.dist-info}/licenses/LICENSE +0 -0
  107. {kekikstream-2.5.8.dist-info → kekikstream-2.5.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,80 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper
4
+ from Kekik.Sifreleme import Packer
5
+ from contextlib import suppress
6
+
7
+ class StreamWish(ExtractorBase):
8
+ name = "StreamWish"
9
+ main_url = "https://streamwish.to"
10
+
11
+ supported_domains = [
12
+ "streamwish.to", "streamwish.site", "streamwish.xyz", "streamwish.com",
13
+ "embedwish.com", "mwish.pro", "dwish.pro", "wishembed.pro", "wishembed.com",
14
+ "kswplayer.info", "wishfast.top", "sfastwish.com", "strwish.xyz", "strwish.com",
15
+ "flaswish.com", "awish.pro", "obeywish.com", "jodwish.com", "swhoi.com",
16
+ "multimovies.cloud", "uqloads.xyz", "doodporn.xyz", "cdnwish.com", "asnwish.com",
17
+ "nekowish.my.id", "neko-stream.click", "swdyu.com", "wishonly.site", "playerwish.com",
18
+ "streamhls.to", "hlswish.com"
19
+ ]
20
+
21
+ def can_handle_url(self, url: str) -> bool:
22
+ return any(domain in url for domain in self.supported_domains)
23
+
24
+ def resolve_embed_url(self, url: str) -> str:
25
+ # Kotlin: /f/ -> /, /e/ -> /
26
+ if "/f/" in url:
27
+ return url.replace("/f/", "/")
28
+ if "/e/" in url:
29
+ return url.replace("/e/", "/")
30
+ return url
31
+
32
+ async def extract(self, url: str, referer: str = None) -> ExtractResult:
33
+ base_url = self.get_base_url(url)
34
+ embed_url = self.resolve_embed_url(url)
35
+ istek = await self.httpx.get(
36
+ url = embed_url,
37
+ headers = {
38
+ "Accept" : "*/*",
39
+ "Connection" : "keep-alive",
40
+ "Referer" : f"{base_url}/",
41
+ "Origin" : f"{base_url}/",
42
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
43
+ },
44
+ follow_redirects=True
45
+ )
46
+ text = istek.text
47
+
48
+ unpacked = ""
49
+ # Eval script bul
50
+ if eval_match := HTMLHelper(text).regex_first(r'(eval\s*\(\s*function[\s\S]+?)<\/script>'):
51
+ with suppress(Exception):
52
+ unpacked = Packer.unpack(eval_match)
53
+
54
+ content = unpacked or text
55
+ sel = HTMLHelper(content)
56
+
57
+ # Regex: file:\s*"(.*?m3u8.*?)"
58
+ m3u8_url = sel.regex_first(r'file:\s*["\']([^"\']+\.m3u8[^"\']*)["\']')
59
+
60
+ if not m3u8_url:
61
+ # Fallback to sources: Kotlin mantığı
62
+ m3u8_url = sel.regex_first(r'sources:\s*\[\s*{\s*file:\s*["\']([^"\']+)["\']')
63
+
64
+ if not m3u8_url:
65
+ # p,a,c,k,e,d içinde olabilir
66
+ m3u8_url = sel.regex_first(r'["\'](https?://[^"\']+\.m3u8[^"\']*)["\']')
67
+
68
+ if not m3u8_url:
69
+ # t.r.u.e pattern fallback
70
+ m3u8_url = sel.regex_first(r'file\s*:\s*["\']([^"\']+)["\']')
71
+
72
+ if not m3u8_url:
73
+ raise ValueError(f"StreamWish: m3u8 bulunamadı. {url}")
74
+
75
+ return ExtractResult(
76
+ name = self.name,
77
+ url = self.fix_url(m3u8_url),
78
+ referer = f"{base_url}/",
79
+ user_agent = self.httpx.headers.get("User-Agent", "")
80
+ )
@@ -2,6 +2,7 @@
2
2
 
3
3
  from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
4
4
  from Kekik.Sifreleme import Packer
5
+ from contextlib import suppress
5
6
  import re
6
7
 
7
8
  class VidHide(ExtractorBase):
@@ -17,7 +18,7 @@ class VidHide(ExtractorBase):
17
18
  "kinoger.be",
18
19
  "smoothpre.com",
19
20
  "dhtpre.com",
20
- "peytonepre.com"
21
+ "peytonepre.com",
21
22
  ]
22
23
 
23
24
  def can_handle_url(self, url: str) -> bool:
@@ -37,69 +38,70 @@ class VidHide(ExtractorBase):
37
38
 
38
39
  async def extract(self, url: str, referer: str = None) -> ExtractResult:
39
40
  base_url = self.get_base_url(url)
40
- self.httpx.headers.update({
41
- "Referer" : referer or base_url,
42
- "Origin" : base_url,
43
- })
44
-
41
+ name = "EarnVids" if any(x in base_url for x in ["smoothpre.com", "dhtpre.com", "peytonepre.com"]) else self.name
42
+
43
+ # Kotlin Headers
44
+ headers = {
45
+ "Sec-Fetch-Dest" : "empty",
46
+ "Sec-Fetch-Mode" : "cors",
47
+ "Sec-Fetch-Site" : "cross-site",
48
+ "Origin" : f"{base_url}/",
49
+ "Referer" : referer or f"{base_url}/",
50
+ }
51
+
45
52
  embed_url = self.get_embed_url(url)
46
- istek = await self.httpx.get(embed_url, follow_redirects=True)
53
+ istek = await self.httpx.get(embed_url, headers=headers, follow_redirects=True)
47
54
  text = istek.text
48
55
 
49
56
  # Silinmiş dosya kontrolü
50
- if "File is no longer available" in text or "File Not Found" in text:
51
- raise ValueError(f"VidHide: Video silinmiş. {url}")
57
+ if any(x in text for x in ["File is no longer available", "File Not Found", "Video silinmiş"]):
58
+ raise ValueError(f"{name}: Video silinmiş. {url}")
52
59
 
53
60
  # JS Redirect Kontrolü (OneUpload vb.)
54
61
  if js_redirect := HTMLHelper(text).regex_first(r"window\.location\.replace\(['\"]([^'\"]+)['\"]\)") or \
55
62
  HTMLHelper(text).regex_first(r"window\.location\.href\s*=\s*['\"]([^'\"]+)['\"]"):
56
- # Redirect url'i al
57
63
  target_url = js_redirect
58
- # Bazen path relative olabilir ama genelde full url
59
64
  if not target_url.startswith("http"):
60
- # urljoin gerekebilir ama şimdilik doğrudan deneyelim veya fix_url
61
- target_url = self.fix_url(target_url) # fix_url base'e göre düzeltebilir mi? ExtractorBase.fix_url genelde şema ekler.
62
- pass
65
+ target_url = self.fix_url(target_url)
63
66
 
64
- # Yeniden istek at
65
67
  istek = await self.httpx.get(target_url, headers={"Referer": embed_url}, follow_redirects=True)
66
68
  text = istek.text
67
69
 
68
70
  sel = HTMLHelper(text)
69
71
 
70
72
  unpacked = ""
71
- # Eval script bul (regex ile daha sağlam)
73
+ # Eval script bul
72
74
  if eval_match := sel.regex_first(r'(eval\s*\(\s*function[\s\S]+?)<\/script>'):
73
- try:
75
+ with suppress(Exception):
74
76
  unpacked = Packer.unpack(eval_match)
75
77
  if "var links" in unpacked:
76
78
  unpacked = unpacked.split("var links")[1]
77
- except:
78
- pass
79
79
 
80
80
  content = unpacked or text
81
-
82
- # Regex: Kotlin mantığı (: "url")
83
- # Ayrıca sources: [...] mantığını da ekle
84
- m3u8_url = HTMLHelper(content).regex_first(r'sources:\s*\[\s*\{\s*file:\s*"([^"]+)"')
85
-
86
- if not m3u8_url:
87
- # Genel arama (hls:, file: vb.)
88
- # Kotlin Regex: :\s*"(.*?m3u8.*?)"
89
- match = HTMLHelper(content).regex_first(r':\s*["\']([^"\']+\.m3u8[^"\']*)["\']')
90
- if match:
91
- m3u8_url = match
92
-
93
- if not m3u8_url:
94
- # Son şans: herhangi bir m3u8 linki
95
- m3u8_url = HTMLHelper(content).regex_first(r'["\']([^"\']+\.m3u8[^"\']*)["\']')
96
-
97
- if not m3u8_url:
98
- raise ValueError(f"VidHide: Video URL bulunamadı. {url}")
99
-
100
- return ExtractResult(
101
- name = self.name,
102
- url = self.fix_url(m3u8_url),
103
- referer = f"{base_url}/",
104
- user_agent = self.httpx.headers.get("User-Agent", "")
105
- )
81
+
82
+ # Kotlin Exact Regex: :\s*"(.*?m3u8.*?)"
83
+ m3u8_matches = re.findall(r':\s*["\']([^"\']+\.m3u8[^"\']*)["\']', content)
84
+
85
+ results = []
86
+ for m3u8_url in m3u8_matches:
87
+ results.append(ExtractResult(
88
+ name = name,
89
+ url = self.fix_url(m3u8_url),
90
+ referer = f"{base_url}/",
91
+ user_agent = self.httpx.headers.get("User-Agent", "")
92
+ ))
93
+
94
+ if not results:
95
+ # Fallback for non-m3u8 or different patterns
96
+ if m3u8_url := sel.regex_first(r'sources:\s*\[\s*\{\s*file:\s*"([^"]+)"'):
97
+ results.append(ExtractResult(
98
+ name = name,
99
+ url = self.fix_url(m3u8_url),
100
+ referer = f"{base_url}/",
101
+ user_agent = self.httpx.headers.get("User-Agent", "")
102
+ ))
103
+
104
+ if not results:
105
+ raise ValueError(f"{name}: Video URL bulunamadı. {url}")
106
+
107
+ return results[0] if len(results) == 1 else results
@@ -0,0 +1,88 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle
4
+ from Crypto.Cipher import AES
5
+ from Crypto.Util import Padding
6
+ import re
7
+
8
+ class VidStack(ExtractorBase):
9
+ name = "VidStack"
10
+ main_url = "https://vidstack.io"
11
+ requires_referer = True
12
+
13
+ supported_domains = [
14
+ "vidstack.io", "server1.uns.bio", "upns.one"
15
+ ]
16
+
17
+ def can_handle_url(self, url: str) -> bool:
18
+ return any(domain in url for domain in self.supported_domains)
19
+
20
+ def decrypt_aes(self, input_hex: str, key: str, iv: str) -> str:
21
+ try:
22
+ cipher = AES.new(key.encode('utf-8'), AES.MODE_CBC, iv.encode('utf-8'))
23
+ raw_data = bytes.fromhex(input_hex)
24
+ decrypted = cipher.decrypt(raw_data)
25
+ unpadded = Padding.unpad(decrypted, AES.block_size)
26
+ return unpadded.decode('utf-8')
27
+ except Exception as e:
28
+ # print(f"DEBUG VidStack: {iv} -> {e}") # Debugging
29
+ return None
30
+
31
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | list[ExtractResult]:
32
+ headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0"}
33
+
34
+ # Hash ve Base URL çıkarma
35
+ hash_val = url.split("#")[-1].split("/")[-1]
36
+ base_url = self.get_base_url(url)
37
+
38
+ # API İsteği
39
+ api_url = f"{base_url}/api/v1/video?id={hash_val}"
40
+ istek = await self.httpx.get(api_url, headers=headers)
41
+
42
+ # Bazen yanıt tırnak içinde gelebilir, temizleyelim
43
+ encoded_data = istek.text.strip().strip('"')
44
+
45
+ # AES Çözme
46
+ key = "kiemtienmua911ca"
47
+ ivs = ["1234567890oiuytr", "0123456789abcdef"]
48
+
49
+ decrypted_text = None
50
+ for iv in ivs:
51
+ decrypted_text = self.decrypt_aes(encoded_data, key, iv)
52
+ if decrypted_text and '"source":' in decrypted_text:
53
+ break
54
+
55
+ if not decrypted_text:
56
+ # Hata mesajını daha detaylı verelim (debug için tırnaklanmış hali)
57
+ raise ValueError(f"VidStack: AES çözme başarısız. {url} | Response: {istek.text[:50]}...")
58
+
59
+ # m3u8 ve Alt yazı çıkarma
60
+ # Kotlin'de "source":"(.*?)" regex'i kullanılıyor
61
+ m3u8_url = re.search(r'["\']source["\']\s*:\s*["\']([^"\']+)["\']', decrypted_text)
62
+ if m3u8_url:
63
+ m3u8_url = m3u8_url.group(1).replace("\\/", "/")
64
+ else:
65
+ raise ValueError(f"VidStack: m3u8 bulunamadı. {url}")
66
+
67
+ subtitles = []
68
+ # Kotlin: "subtitle":\{(.*?)\}
69
+ subtitle_section = re.search(r'["\']subtitle["\']\s*:\s*\{(.*?)\}', decrypted_text)
70
+ if subtitle_section:
71
+ section = subtitle_section.group(1)
72
+ # Regex: "([^"]+)":\s*"([^"]+)"
73
+ matches = re.finditer(r'["\']([^"\']+)["\']\s*:\s*["\']([^"\']+)["\']', section)
74
+ for match in matches:
75
+ lang = match.group(1)
76
+ raw_path = match.group(2).split("#")[0]
77
+ if raw_path:
78
+ path = raw_path.replace("\\/", "/")
79
+ sub_url = f"{self.main_url}{path}"
80
+ subtitles.append(Subtitle(name=lang, url=self.fix_url(sub_url)))
81
+
82
+ return ExtractResult(
83
+ name = self.name,
84
+ url = self.fix_url(m3u8_url),
85
+ referer = url,
86
+ user_agent = headers["User-Agent"],
87
+ subtitles = subtitles
88
+ )
@@ -0,0 +1,176 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
4
+ from contextlib import suppress
5
+
6
+ class DDizi(PluginBase):
7
+ name = "DDizi"
8
+ language = "tr"
9
+ main_url = "https://www.ddizi.im"
10
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
11
+ description = "Ddizi, dizi izle, dizi seyret, yerli dizi izle, canlı dizi, türk dizi izle, dizi izle full, diziizle, eski diziler"
12
+
13
+ main_page = {
14
+ f"{main_url}/yeni-eklenenler7" : "Son Eklenen Bölümler",
15
+ f"{main_url}/yabanci-dizi-izle" : "Yabancı Diziler",
16
+ f"{main_url}/eski.diziler" : "Eski Diziler",
17
+ f"{main_url}/yerli-diziler" : "Yerli Diziler"
18
+ }
19
+
20
+ async def get_articles(self, secici: HTMLHelper) -> list[dict]:
21
+ articles = []
22
+ for veri in secici.select("div.dizi-boxpost-cat, div.dizi-boxpost"):
23
+ title = secici.select_text("a", veri)
24
+ href = secici.select_attr("a", "href", veri)
25
+ img = secici.select_first("img.img-back, img.img-back-cat", veri)
26
+ poster = img.attrs.get("data-src") or img.attrs.get("src") if img else None
27
+
28
+ if title and href:
29
+ articles.append({
30
+ "title" : self.clean_title(title),
31
+ "url" : self.fix_url(href),
32
+ "poster": self.fix_url(poster),
33
+ })
34
+
35
+ return articles
36
+
37
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
38
+ # DDizi'de sayfalama /sayfa-X formatında (0'dan başlıyor)
39
+ if page > 1:
40
+ target_url = f"{url}/sayfa-{page-1}"
41
+ else:
42
+ target_url = url
43
+
44
+ istek = await self.httpx.get(target_url, follow_redirects=True)
45
+ secici = HTMLHelper(istek.text)
46
+ veriler = await self.get_articles(secici)
47
+
48
+ return [MainPageResult(**veri, category=category) for veri in veriler if veri]
49
+
50
+ async def search(self, query: str) -> list[SearchResult]:
51
+ istek = await self.httpx.post(
52
+ url = f"{self.main_url}/arama/",
53
+ headers = {"Referer": f"{self.main_url}/"},
54
+ data = {"arama": query}
55
+ )
56
+ secici = HTMLHelper(istek.text)
57
+ veriler = await self.get_articles(secici)
58
+
59
+ return [SearchResult(**veri) for veri in veriler if veri]
60
+
61
+ async def load_item(self, url: str) -> SeriesInfo:
62
+ istek = await self.httpx.get(url)
63
+ secici = HTMLHelper(istek.text)
64
+
65
+ title = self.clean_title(secici.select_text("h1, h2, div.dizi-boxpost-cat a"))
66
+ poster = secici.select_poster("div.afis img, img.afis, img.img-back, img.img-back-cat")
67
+ description = secici.select_text("div.dizi-aciklama, div.aciklama, p")
68
+ rating = secici.select_text("span.comments-ss")
69
+
70
+ # Meta verileri (DDizi'de pek yok ama deniyoruz)
71
+ # Year için sadece açıklama kısmına bakalım ki URL'deki ID'yi almasın
72
+ year = HTMLHelper(description).regex_first(r"(\d{4})") if description else None
73
+ actors = secici.select_texts("div.oyuncular a, ul.bilgi li a")
74
+
75
+ episodes = []
76
+ current_page = 1
77
+ has_next = True
78
+
79
+ while has_next:
80
+ page_url = f"{url}/sayfa-{current_page}" if current_page > 1 else url
81
+ if current_page > 1:
82
+ istek = await self.httpx.get(page_url)
83
+ secici = HTMLHelper(istek.text)
84
+
85
+ page_eps = secici.select("div.bolumler a, div.sezonlar a, div.dizi-arsiv a, div.dizi-boxpost-cat a")
86
+ if not page_eps:
87
+ break
88
+
89
+ for ep in page_eps:
90
+ name = ep.text().strip()
91
+ href = ep.attrs.get("href")
92
+ if name and href:
93
+ # 'Bölüm Final' gibi durumları temizleyelim
94
+ clean_name = name.replace("Final", "").strip()
95
+ s, e = secici.extract_season_episode(clean_name)
96
+ episodes.append(Episode(
97
+ season = s or 1,
98
+ episode = e or 1,
99
+ title = name,
100
+ url = self.fix_url(href)
101
+ ))
102
+
103
+ # Sonraki sayfa kontrolü
104
+ has_next = any("Sonraki" in a.text() for a in secici.select(".pagination a"))
105
+ current_page += 1
106
+ if current_page > 10: break # Emniyet kilidi
107
+
108
+ if not episodes:
109
+ s, e = secici.extract_season_episode(title)
110
+ episodes.append(Episode(
111
+ season = s or 1,
112
+ episode = e or 1,
113
+ title = title,
114
+ url = url
115
+ ))
116
+
117
+ return SeriesInfo(
118
+ url = url,
119
+ poster = self.fix_url(poster),
120
+ title = title,
121
+ description = description,
122
+ rating = rating.strip() if rating else None,
123
+ year = year,
124
+ actors = actors,
125
+ episodes = episodes
126
+ )
127
+
128
+ async def load_links(self, url: str) -> list[ExtractResult]:
129
+ istek = await self.httpx.get(url)
130
+ secici = HTMLHelper(istek.text)
131
+
132
+ results = []
133
+ # og:video ve JWPlayer kontrolü
134
+ og_video = secici.select_attr("meta[property='og:video']", "content")
135
+ if og_video:
136
+ og_video = self.fix_url(og_video)
137
+ with suppress(Exception):
138
+ player_istek = await self.httpx.get(og_video, headers={"Referer": url})
139
+ player_secici = HTMLHelper(player_istek.text)
140
+
141
+ # file: '...' logic
142
+ sources = player_secici.regex_all(r'file:\s*["\']([^"\']+)["\']')
143
+ for src in sources:
144
+ src = self.fix_url(src)
145
+ # Direkt link kontrolü - Extractor gerektirmeyenler
146
+ is_direct = any(x in src.lower() for x in ["google", "twimg", "mncdn", "akamai", "streambox", ".m3u8", ".mp4", "master.txt"])
147
+
148
+ if is_direct:
149
+ results.append(ExtractResult(
150
+ url = src,
151
+ name = "Video",
152
+ user_agent = "googleusercontent",
153
+ referer = "https://twitter.com/"
154
+ ))
155
+ else:
156
+ res = await self.extract(src, referer=og_video)
157
+ if res:
158
+ if isinstance(res, list): results.extend(res)
159
+ else: results.append(res)
160
+
161
+ # Fallback to direct extraction if nothing found but we have og_video
162
+ if not results:
163
+ if any(x in og_video.lower() for x in ["google", "twimg", "mncdn", "akamai", "streambox", ".m3u8", ".mp4", "master.txt"]):
164
+ results.append(ExtractResult(
165
+ url = og_video,
166
+ name = "Video",
167
+ user_agent = "googleusercontent",
168
+ referer = "https://twitter.com/"
169
+ ))
170
+ else:
171
+ res = await self.extract(og_video)
172
+ if res:
173
+ if isinstance(res, list): results.extend(res)
174
+ else: results.append(res)
175
+
176
+ return results