KekikStream 1.4.4__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. KekikStream/CLI/pypi_kontrol.py +6 -6
  2. KekikStream/Core/Extractor/ExtractorBase.py +13 -12
  3. KekikStream/Core/Extractor/ExtractorLoader.py +25 -17
  4. KekikStream/Core/Extractor/ExtractorManager.py +53 -9
  5. KekikStream/Core/Extractor/ExtractorModels.py +5 -7
  6. KekikStream/Core/Extractor/YTDLPCache.py +35 -0
  7. KekikStream/Core/Media/MediaHandler.py +52 -31
  8. KekikStream/Core/Media/MediaManager.py +0 -3
  9. KekikStream/Core/Plugin/PluginBase.py +47 -21
  10. KekikStream/Core/Plugin/PluginLoader.py +11 -7
  11. KekikStream/Core/Plugin/PluginModels.py +25 -25
  12. KekikStream/Core/__init__.py +1 -0
  13. KekikStream/Extractors/CloseLoad.py +6 -26
  14. KekikStream/Extractors/ContentX_.py +40 -0
  15. KekikStream/Extractors/DzenRu.py +38 -0
  16. KekikStream/Extractors/ExPlay.py +53 -0
  17. KekikStream/Extractors/FirePlayer.py +60 -0
  18. KekikStream/Extractors/HDPlayerSystem.py +41 -0
  19. KekikStream/Extractors/JetTv.py +45 -0
  20. KekikStream/Extractors/MailRu.py +2 -4
  21. KekikStream/Extractors/MixTiger.py +57 -0
  22. KekikStream/Extractors/MolyStream.py +25 -7
  23. KekikStream/Extractors/Odnoklassniki.py +16 -11
  24. KekikStream/Extractors/{OkRuHTTP.py → Odnoklassniki_.py} +5 -1
  25. KekikStream/Extractors/{HDStreamAble.py → PeaceMakerst_.py} +1 -1
  26. KekikStream/Extractors/PixelDrain.py +0 -1
  27. KekikStream/Extractors/PlayerFilmIzle.py +62 -0
  28. KekikStream/Extractors/RapidVid.py +30 -13
  29. KekikStream/Extractors/RapidVid_.py +7 -0
  30. KekikStream/Extractors/SetPlay.py +57 -0
  31. KekikStream/Extractors/SetPrime.py +45 -0
  32. KekikStream/Extractors/SibNet.py +0 -1
  33. KekikStream/Extractors/TurkeyPlayer.py +34 -0
  34. KekikStream/Extractors/VidHide.py +72 -0
  35. KekikStream/Extractors/VidMoly.py +20 -19
  36. KekikStream/Extractors/{VidMolyMe.py → VidMoly_.py} +1 -1
  37. KekikStream/Extractors/VidMoxy.py +0 -1
  38. KekikStream/Extractors/VidPapi.py +89 -0
  39. KekikStream/Extractors/YTDLP.py +177 -0
  40. KekikStream/Extractors/YildizKisaFilm.py +41 -0
  41. KekikStream/Plugins/DiziBox.py +28 -16
  42. KekikStream/Plugins/DiziPal.py +246 -0
  43. KekikStream/Plugins/DiziYou.py +58 -31
  44. KekikStream/Plugins/Dizilla.py +97 -68
  45. KekikStream/Plugins/FilmBip.py +145 -0
  46. KekikStream/Plugins/FilmMakinesi.py +61 -52
  47. KekikStream/Plugins/FilmModu.py +138 -0
  48. KekikStream/Plugins/FullHDFilm.py +164 -0
  49. KekikStream/Plugins/FullHDFilmizlesene.py +38 -37
  50. KekikStream/Plugins/HDFilmCehennemi.py +44 -54
  51. KekikStream/Plugins/JetFilmizle.py +68 -42
  52. KekikStream/Plugins/KultFilmler.py +219 -0
  53. KekikStream/Plugins/RecTV.py +41 -37
  54. KekikStream/Plugins/RoketDizi.py +232 -0
  55. KekikStream/Plugins/SelcukFlix.py +309 -0
  56. KekikStream/Plugins/SezonlukDizi.py +16 -14
  57. KekikStream/Plugins/SineWix.py +39 -30
  58. KekikStream/Plugins/Sinefy.py +238 -0
  59. KekikStream/Plugins/SinemaCX.py +157 -0
  60. KekikStream/Plugins/Sinezy.py +146 -0
  61. KekikStream/Plugins/SuperFilmGeldi.py +121 -0
  62. KekikStream/Plugins/UgurFilm.py +10 -10
  63. KekikStream/__init__.py +296 -319
  64. KekikStream/requirements.txt +3 -4
  65. kekikstream-2.0.2.dist-info/METADATA +309 -0
  66. kekikstream-2.0.2.dist-info/RECORD +82 -0
  67. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/WHEEL +1 -1
  68. KekikStream/Extractors/FourCX.py +0 -7
  69. KekikStream/Extractors/FourPichive.py +0 -7
  70. KekikStream/Extractors/FourPlayRu.py +0 -7
  71. KekikStream/Extractors/Hotlinger.py +0 -7
  72. KekikStream/Extractors/OkRuSSL.py +0 -7
  73. KekikStream/Extractors/Pichive.py +0 -7
  74. KekikStream/Extractors/PlayRu.py +0 -7
  75. KekikStream/Helpers/Unpack.py +0 -75
  76. KekikStream/Plugins/Shorten.py +0 -225
  77. kekikstream-1.4.4.dist-info/METADATA +0 -108
  78. kekikstream-1.4.4.dist-info/RECORD +0 -63
  79. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/entry_points.txt +0 -0
  80. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info/licenses}/LICENSE +0 -0
  81. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,177 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, get_ytdlp_extractors
4
+ from urllib.parse import urlparse
5
+ import yt_dlp, re, sys, os
6
+
7
+ class YTDLP(ExtractorBase):
8
+ name = "yt-dlp"
9
+ main_url = "" # Universal - tüm siteleri destekler
10
+
11
+ _FAST_DOMAIN_RE = None # compiled mega-regex (host üstünden)
12
+
13
+ @classmethod
14
+ def _init_fast_domain_regex(cls):
15
+ """
16
+ Fast domain regex'i initialize et
17
+ """
18
+ if cls._FAST_DOMAIN_RE is not None:
19
+ return
20
+
21
+ domains = set()
22
+
23
+ # Merkezi cache'den extractorları al
24
+ extractors = get_ytdlp_extractors()
25
+
26
+ # yt-dlp extractor'larının _VALID_URL regex'lerinden domain yakala
27
+ # Regex metinlerinde domainler genelde "\." şeklinde geçer.
28
+ domain_pat = re.compile(r"(?:[a-z0-9-]+\\\.)+[a-z]{2,}", re.IGNORECASE)
29
+
30
+ for ie in extractors:
31
+ valid = getattr(ie, "_VALID_URL", None)
32
+ if not valid or not isinstance(valid, str):
33
+ continue
34
+
35
+ for m in domain_pat.findall(valid):
36
+ d = m.replace(r"\.", ".").lower()
37
+
38
+ # Çok agresif/şüpheli şeyleri elemek istersen burada filtre koyabilirsin
39
+ # (genelde gerek kalmıyor)
40
+ domains.add(d)
41
+
42
+ # Hiç domain çıkmazsa (çok uç durum) fallback: boş regex
43
+ if not domains:
44
+ cls._FAST_DOMAIN_RE = re.compile(r"$^") # hiçbir şeye match etmez
45
+ return
46
+
47
+ # Host eşleştirmesi: subdomain destekli (m.youtube.com, player.vimeo.com vs.)
48
+ # (?:^|.*\.) (domain1|domain2|...) $
49
+ joined = "|".join(sorted(re.escape(d) for d in domains))
50
+ pattern = rf"(?:^|.*\.)(?:{joined})$"
51
+ cls._FAST_DOMAIN_RE = re.compile(pattern, re.IGNORECASE)
52
+
53
+ def __init__(self):
54
+ self.__class__._init_fast_domain_regex()
55
+
56
+ def can_handle_url(self, url: str) -> bool:
57
+ """
58
+ Fast-path: URL host'unu tek mega-regex ile kontrol et (loop yok)
59
+ Slow-path: gerekirse mevcut extract_info tabanlı kontrolün
60
+ """
61
+ # URL parse + host al
62
+ try:
63
+ parsed = urlparse(url)
64
+ host = (parsed.hostname or "").lower()
65
+ except Exception:
66
+ host = ""
67
+
68
+ # Şemasız URL desteği: "youtube.com/..." gibi
69
+ if not host and "://" not in url:
70
+ try:
71
+ parsed = urlparse("https://" + url)
72
+ host = (parsed.hostname or "").lower()
73
+ except Exception:
74
+ host = ""
75
+
76
+ # Fast-path
77
+ if host and self.__class__._FAST_DOMAIN_RE.search(host):
78
+ return True
79
+
80
+ # SLOW PATH: Diğer siteler için yt-dlp'nin native kontrolü
81
+ try:
82
+ # stderr'ı geçici olarak kapat (hata mesajlarını gizle)
83
+ old_stderr = sys.stderr
84
+ sys.stderr = open(os.devnull, "w")
85
+
86
+ try:
87
+ ydl_opts = {
88
+ "simulate" : True, # Download yok, sadece tespit
89
+ "quiet" : True, # Log kirliliği yok
90
+ "no_warnings" : True, # Uyarı mesajları yok
91
+ "extract_flat" : True, # Minimal işlem
92
+ "no_check_certificates" : True,
93
+ "ignoreerrors" : True, # Hataları yoksay
94
+ "socket_timeout" : 3,
95
+ "retries" : 1
96
+ }
97
+
98
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
99
+ # URL'yi işleyebiliyor mu kontrol et
100
+ info = ydl.extract_info(url, download=False, process=False)
101
+
102
+ # Generic extractor ise atla
103
+ if info and info.get("extractor_key") != "Generic":
104
+ return True
105
+
106
+ return False
107
+ finally:
108
+ # stderr'ı geri yükle
109
+ sys.stderr.close()
110
+ sys.stderr = old_stderr
111
+
112
+ except Exception:
113
+ # yt-dlp işleyemezse False döndür
114
+ return False
115
+
116
+ async def extract(self, url: str, referer: str | None = None) -> ExtractResult:
117
+ ydl_opts = {
118
+ "quiet" : True,
119
+ "no_warnings" : True,
120
+ "extract_flat" : False, # Tam bilgi al
121
+ "format" : "best", # En iyi kalite
122
+ "no_check_certificates" : True,
123
+ "socket_timeout" : 3,
124
+ "retries" : 1
125
+ }
126
+
127
+ # Referer varsa header olarak ekle
128
+ if referer:
129
+ ydl_opts["http_headers"] = {"Referer": referer}
130
+
131
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
132
+ info = ydl.extract_info(url, download=False)
133
+
134
+ if not info:
135
+ raise ValueError("yt-dlp video bilgisi döndürmedi")
136
+
137
+ # Video URL'sini al
138
+ video_url = info.get("url")
139
+ if not video_url:
140
+ # Bazen formatlar listesinde olabilir
141
+ formats = info.get("formats", [])
142
+ if formats:
143
+ video_url = formats[-1].get("url") # Son format (genellikle en iyi)
144
+
145
+ if not video_url:
146
+ raise ValueError("Video URL bulunamadı")
147
+
148
+ # Altyazıları çıkar
149
+ subtitles = []
150
+ if subtitle_data := info.get("subtitles"):
151
+ for lang, subs in subtitle_data.items():
152
+ for sub in subs:
153
+ if sub_url := sub.get("url"):
154
+ subtitles.append(
155
+ Subtitle(
156
+ name=f"{lang} ({sub.get('ext', 'unknown')})",
157
+ url=sub_url
158
+ )
159
+ )
160
+
161
+ # User-Agent al
162
+ user_agent = None
163
+ http_headers = info.get("http_headers", {})
164
+ if http_headers:
165
+ user_agent = http_headers.get("User-Agent")
166
+
167
+ return ExtractResult(
168
+ name = self.name,
169
+ url = video_url,
170
+ referer = referer or info.get("webpage_url"),
171
+ user_agent = user_agent,
172
+ subtitles = subtitles
173
+ )
174
+
175
+ async def close(self):
176
+ """yt-dlp için cleanup gerekmez"""
177
+ pass
@@ -0,0 +1,41 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult
4
+
5
+ class YildizKisaFilm(ExtractorBase):
6
+ name = "YildizKisaFilm"
7
+ main_url = "https://yildizkisafilm.org"
8
+
9
+ async def extract(self, url, referer=None) -> ExtractResult:
10
+ ext_ref = referer or ""
11
+
12
+ if "video/" in url:
13
+ vid_id = url.split("video/")[-1]
14
+ else:
15
+ vid_id = url.split("?data=")[-1]
16
+
17
+ post_url = f"{self.main_url}/player/index.php?data={vid_id}&do=getVideo"
18
+
19
+ response = await self.httpx.post(
20
+ url = post_url,
21
+ data = {"hash": vid_id, "r": ext_ref},
22
+ headers = {
23
+ "Referer" : ext_ref,
24
+ "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
25
+ "X-Requested-With" : "XMLHttpRequest"
26
+ }
27
+ )
28
+ response.raise_for_status()
29
+
30
+ video_data = response.json()
31
+ m3u_link = video_data.get("securedLink")
32
+
33
+ if not m3u_link:
34
+ raise ValueError("securedLink not found in response")
35
+
36
+ return ExtractResult(
37
+ name = self.name,
38
+ url = m3u_link,
39
+ referer = ext_ref,
40
+ subtitles = []
41
+ )
@@ -1,9 +1,9 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import kekik_cache, PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
4
4
  from Kekik.Sifreleme import CryptoJS
5
5
  from parsel import Selector
6
- import re, urllib.parse, base64, contextlib, asyncio
6
+ import re, urllib.parse, base64, contextlib, asyncio, time
7
7
 
8
8
  class DiziBox(PluginBase):
9
9
  name = "DiziBox"
@@ -40,8 +40,11 @@ class DiziBox(PluginBase):
40
40
  f"{main_url}/dizi-arsivi/page/SAYFA/?tur[0]=yarisma&yil&imdb" : "Yarışma"
41
41
  }
42
42
 
43
- @kekik_cache(ttl=60*60)
44
43
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
44
+ self.httpx.cookies.update({
45
+ "isTrustedUser" : "true",
46
+ "dbxu" : str(time.time() * 1000).split(".")[0]
47
+ })
45
48
  istek = await self.httpx.get(
46
49
  url = f"{url.replace('SAYFA', str(page))}",
47
50
  follow_redirects = True
@@ -58,12 +61,10 @@ class DiziBox(PluginBase):
58
61
  for veri in secici.css("article.detailed-article")
59
62
  ]
60
63
 
61
- @kekik_cache(ttl=60*60)
62
64
  async def search(self, query: str) -> list[SearchResult]:
63
65
  self.httpx.cookies.update({
64
- "LockUser" : "true",
65
66
  "isTrustedUser" : "true",
66
- "dbxu" : "1722403730363"
67
+ "dbxu" : str(time.time() * 1000).split(".")[0]
67
68
  })
68
69
  istek = await self.httpx.get(f"{self.main_url}/?s={query}")
69
70
  secici = Selector(istek.text)
@@ -77,7 +78,6 @@ class DiziBox(PluginBase):
77
78
  for item in secici.css("article.detailed-article")
78
79
  ]
79
80
 
80
- @kekik_cache(ttl=60*60)
81
81
  async def load_item(self, url: str) -> SeriesInfo:
82
82
  istek = await self.httpx.get(url)
83
83
  secici = Selector(istek.text)
@@ -124,13 +124,17 @@ class DiziBox(PluginBase):
124
124
  actors = actors,
125
125
  )
126
126
 
127
- @kekik_cache(ttl=60*60)
128
127
  async def _iframe_decode(self, name:str, iframe_link:str, referer:str) -> list[str]:
129
128
  results = []
130
129
 
130
+ self.httpx.headers.update({"Referer": referer})
131
+ self.httpx.cookies.update({
132
+ "isTrustedUser" : "true",
133
+ "dbxu" : str(time.time() * 1000).split(".")[0]
134
+ })
135
+
131
136
  if "/player/king/king.php" in iframe_link:
132
137
  iframe_link = iframe_link.replace("king.php?v=", "king.php?wmode=opaque&v=")
133
- self.httpx.headers.update({"Referer": referer})
134
138
 
135
139
  istek = await self.httpx.get(iframe_link)
136
140
  secici = Selector(istek.text)
@@ -150,7 +154,6 @@ class DiziBox(PluginBase):
150
154
 
151
155
  elif "/player/moly/moly.php" in iframe_link:
152
156
  iframe_link = iframe_link.replace("moly.php?h=", "moly.php?wmode=opaque&h=")
153
- self.httpx.headers.update({"Referer": referer})
154
157
  while True:
155
158
  await asyncio.sleep(.3)
156
159
  with contextlib.suppress(Exception):
@@ -171,15 +174,19 @@ class DiziBox(PluginBase):
171
174
 
172
175
  return results
173
176
 
174
- @kekik_cache(ttl=15*60)
175
- async def load_links(self, url: str) -> list[str]:
177
+ async def load_links(self, url: str) -> list[dict]:
176
178
  istek = await self.httpx.get(url)
177
179
  secici = Selector(istek.text)
178
180
 
179
- iframes = []
181
+ results = []
180
182
  if main_iframe := secici.css("div#video-area iframe::attr(src)").get():
181
183
  if decoded := await self._iframe_decode(self.name, main_iframe, url):
182
- iframes.extend(decoded)
184
+ for iframe in decoded:
185
+ extractor = self.ex_manager.find_extractor(iframe)
186
+ results.append({
187
+ "url" : iframe,
188
+ "name" : f"{extractor.name if extractor else 'Main Player'}"
189
+ })
183
190
 
184
191
  for alternatif in secici.css("div.video-toolbar option[value]"):
185
192
  alt_name = alternatif.css("::text").get()
@@ -195,6 +202,11 @@ class DiziBox(PluginBase):
195
202
  alt_secici = Selector(alt_istek.text)
196
203
  if alt_iframe := alt_secici.css("div#video-area iframe::attr(src)").get():
197
204
  if decoded := await self._iframe_decode(alt_name, alt_iframe, url):
198
- iframes.extend(decoded)
205
+ for iframe in decoded:
206
+ extractor = self.ex_manager.find_extractor(iframe)
207
+ results.append({
208
+ "url" : iframe,
209
+ "name" : f"{extractor.name if extractor else alt_name}"
210
+ })
199
211
 
200
- return iframes
212
+ return results
@@ -0,0 +1,246 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, Subtitle
4
+ from parsel import Selector
5
+ import re
6
+
7
+ class DiziPal(PluginBase):
8
+ name = "DiziPal"
9
+ language = "tr"
10
+ main_url = "https://dizipal1223.com"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "dizipal güncel, dizipal yeni ve gerçek adresi. dizipal en yeni dizi ve filmleri güvenli ve hızlı şekilde sunar."
13
+
14
+ main_page = {
15
+ f"{main_url}/diziler/son-bolumler" : "Son Bölümler",
16
+ f"{main_url}/diziler" : "Yeni Diziler",
17
+ f"{main_url}/filmler" : "Yeni Filmler",
18
+ f"{main_url}/koleksiyon/netflix" : "Netflix",
19
+ f"{main_url}/koleksiyon/exxen" : "Exxen",
20
+ f"{main_url}/koleksiyon/blutv" : "BluTV",
21
+ f"{main_url}/koleksiyon/disney" : "Disney+",
22
+ f"{main_url}/koleksiyon/amazon-prime" : "Amazon Prime",
23
+ f"{main_url}/koleksiyon/tod-bein" : "TOD (beIN)",
24
+ f"{main_url}/koleksiyon/gain" : "Gain",
25
+ f"{main_url}/tur/mubi" : "Mubi",
26
+ }
27
+
28
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
29
+ istek = await self.httpx.get(url)
30
+ secici = Selector(istek.text)
31
+
32
+ results = []
33
+
34
+ if "/son-bolumler" in url:
35
+ for veri in secici.css("div.episode-item"):
36
+ name = veri.css("div.name::text").get()
37
+ episode = veri.css("div.episode::text").get()
38
+ href = veri.css("a::attr(href)").get()
39
+ poster = veri.css("img::attr(src)").get()
40
+
41
+ if name and href:
42
+ ep_text = episode.strip().replace(". Sezon ", "x").replace(". Bölüm", "") if episode else ""
43
+ title = f"{name} {ep_text}"
44
+ # Son bölümler linkini dizi sayfasına çevir
45
+ dizi_url = href.split("/sezon")[0] if "/sezon" in href else href
46
+
47
+ results.append(MainPageResult(
48
+ category = category,
49
+ title = title,
50
+ url = self.fix_url(dizi_url),
51
+ poster = self.fix_url(poster) if poster else None,
52
+ ))
53
+ else:
54
+ for veri in secici.css("article.type2 ul li"):
55
+ title = veri.css("span.title::text").get()
56
+ href = veri.css("a::attr(href)").get()
57
+ poster = veri.css("img::attr(src)").get()
58
+
59
+ if title and href:
60
+ results.append(MainPageResult(
61
+ category = category,
62
+ title = title,
63
+ url = self.fix_url(href),
64
+ poster = self.fix_url(poster) if poster else None,
65
+ ))
66
+
67
+ return results
68
+
69
+ async def search(self, query: str) -> list[SearchResult]:
70
+ self.httpx.headers.update({
71
+ "Accept" : "application/json, text/javascript, */*; q=0.01",
72
+ "X-Requested-With" : "XMLHttpRequest"
73
+ })
74
+
75
+ istek = await self.httpx.post(
76
+ url = f"{self.main_url}/api/search-autocomplete",
77
+ data = {"query": query}
78
+ )
79
+
80
+ try:
81
+ data = istek.json()
82
+ except Exception:
83
+ return []
84
+
85
+ results = []
86
+
87
+ # API bazen dict, bazen list döner
88
+ items = data.values() if isinstance(data, dict) else data
89
+
90
+ for item in items:
91
+ if not isinstance(item, dict):
92
+ continue
93
+
94
+ title = item.get("title")
95
+ url = item.get("url")
96
+ poster = item.get("poster")
97
+
98
+ if title and url:
99
+ results.append(SearchResult(
100
+ title = title,
101
+ url = f"{self.main_url}{url}",
102
+ poster = self.fix_url(poster) if poster else None,
103
+ ))
104
+
105
+ return results
106
+
107
+ async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
108
+ # Reset headers to get HTML response
109
+ self.httpx.headers.update({
110
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
111
+ })
112
+ self.httpx.headers.pop("X-Requested-With", None)
113
+
114
+ istek = await self.httpx.get(url)
115
+ secici = Selector(text=istek.text, type="html")
116
+
117
+ poster = self.fix_url(secici.css("meta[property='og:image']::attr(content)").get())
118
+ year = secici.xpath("//div[text()='Yapım Yılı']//following-sibling::div/text()").get()
119
+ description = secici.css("div.summary p::text").get()
120
+ rating = secici.xpath("//div[text()='IMDB Puanı']//following-sibling::div/text()").get()
121
+ tags_raw = secici.xpath("//div[text()='Türler']//following-sibling::div/text()").get()
122
+ tags = [t.strip() for t in tags_raw.split() if t.strip()] if tags_raw else None
123
+
124
+ dur_text = secici.xpath("//div[text()='Ortalama Süre']//following-sibling::div/text()").get()
125
+ dur_match = re.search(r"(\d+)", dur_text or "")
126
+ duration = int(dur_match[1]) if dur_match else None
127
+
128
+ if "/dizi/" in url:
129
+ title = secici.css("div.cover h5::text").get()
130
+
131
+ episodes = []
132
+ for ep in secici.css("div.episode-item"):
133
+ ep_name = ep.css("div.name::text").get()
134
+ ep_href = ep.css("a::attr(href)").get()
135
+ ep_text = ep.css("div.episode::text").get() or ""
136
+ ep_parts = ep_text.strip().split(" ")
137
+
138
+ ep_season = None
139
+ ep_episode = None
140
+ if len(ep_parts) >= 4:
141
+ try:
142
+ ep_season = int(ep_parts[0].replace(".", ""))
143
+ ep_episode = int(ep_parts[2].replace(".", ""))
144
+ except ValueError:
145
+ pass
146
+
147
+ if ep_name and ep_href:
148
+ episodes.append(Episode(
149
+ season = ep_season,
150
+ episode = ep_episode,
151
+ title = ep_name.strip(),
152
+ url = self.fix_url(ep_href),
153
+ ))
154
+
155
+ return SeriesInfo(
156
+ url = url,
157
+ poster = poster,
158
+ title = title,
159
+ description = description.strip() if description else None,
160
+ tags = tags,
161
+ rating = rating.strip() if rating else None,
162
+ year = year.strip() if year else None,
163
+ duration = duration,
164
+ episodes = episodes if episodes else None,
165
+ )
166
+ else:
167
+ title = secici.xpath("//div[@class='g-title'][2]/div/text()").get()
168
+
169
+ return MovieInfo(
170
+ url = url,
171
+ poster = poster,
172
+ title = title.strip() if title else None,
173
+ description = description.strip() if description else None,
174
+ tags = tags,
175
+ rating = rating.strip() if rating else None,
176
+ year = year.strip() if year else None,
177
+ duration = duration,
178
+ )
179
+
180
+ async def load_links(self, url: str) -> list[dict]:
181
+ # Reset headers to get HTML response
182
+ self.httpx.headers.update({
183
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
184
+ })
185
+ self.httpx.headers.pop("X-Requested-With", None)
186
+
187
+ istek = await self.httpx.get(url)
188
+ secici = Selector(istek.text)
189
+
190
+ iframe = secici.css(".series-player-container iframe::attr(src)").get()
191
+ if not iframe:
192
+ iframe = secici.css("div#vast_new iframe::attr(src)").get()
193
+
194
+ if not iframe:
195
+ return []
196
+
197
+ results = []
198
+
199
+ self.httpx.headers.update({"Referer": f"{self.main_url}/"})
200
+ i_istek = await self.httpx.get(iframe)
201
+ i_text = i_istek.text
202
+
203
+ # m3u link çıkar
204
+ m3u_match = re.search(r'file:"([^"]+)"', i_text)
205
+ if m3u_match:
206
+ m3u_link = m3u_match[1]
207
+
208
+ # Altyazıları çıkar
209
+ subtitles = []
210
+ sub_match = re.search(r'"subtitle":"([^"]+)"', i_text)
211
+ if sub_match:
212
+ sub_text = sub_match[1]
213
+ if "," in sub_text:
214
+ for sub in sub_text.split(","):
215
+ lang = sub.split("[")[1].split("]")[0] if "[" in sub else "Türkçe"
216
+ sub_url = sub.replace(f"[{lang}]", "")
217
+ subtitles.append(Subtitle(name=lang, url=self.fix_url(sub_url)))
218
+ else:
219
+ lang = sub_text.split("[")[1].split("]")[0] if "[" in sub_text else "Türkçe"
220
+ sub_url = sub_text.replace(f"[{lang}]", "")
221
+ subtitles.append(Subtitle(name=lang, url=self.fix_url(sub_url)))
222
+
223
+ results.append({
224
+ "name" : self.name,
225
+ "url" : m3u_link,
226
+ "referer" : f"{self.main_url}/",
227
+ "subtitles" : subtitles
228
+ })
229
+ else:
230
+ # Extractor'a yönlendir
231
+ extractor = self.ex_manager.find_extractor(iframe)
232
+ results.append({
233
+ "name" : f"{extractor.name if extractor else self.name}",
234
+ "url" : iframe,
235
+ "referer" : f"{self.main_url}/",
236
+ })
237
+
238
+ return results
239
+
240
+ async def play(self, **kwargs):
241
+ extract_result = ExtractResult(**kwargs)
242
+ self.media_handler.title = kwargs.get("name")
243
+ if self.name not in self.media_handler.title:
244
+ self.media_handler.title = f"{self.name} | {self.media_handler.title}"
245
+
246
+ self.media_handler.play_media(extract_result)