KekikStream 1.8.1__py3-none-any.whl → 1.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from curl_cffi import AsyncSession
5
4
  from cloudscraper import CloudScraper
6
5
  from httpx import AsyncClient
7
6
  from typing import Optional
@@ -12,7 +11,6 @@ class ExtractorBase(ABC):
12
11
  # Çıkarıcının temel özellikleri
13
12
  name = "Extractor"
14
13
  main_url = ""
15
- requires_cffi = False
16
14
 
17
15
  def __init__(self):
18
16
  # cloudscraper - for bypassing Cloudflare
@@ -21,19 +19,11 @@ class ExtractorBase(ABC):
21
19
  # httpx - lightweight and safe for most HTTP requests
22
20
  self.httpx = AsyncClient(
23
21
  timeout = 3,
24
- follow_redirects = True,
22
+ follow_redirects = True
25
23
  )
26
24
  self.httpx.headers.update(self.cloudscraper.headers)
27
25
  self.httpx.cookies.update(self.cloudscraper.cookies)
28
26
 
29
- # curl_cffi - only initialize if needed for anti-bot bypass
30
- self.cffi = None
31
-
32
- if self.requires_cffi:
33
- self.cffi = AsyncSession(impersonate="firefox135")
34
- self.cffi.cookies.update(self.cloudscraper.cookies)
35
- self.cffi.headers.update({"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 15.7; rv:135.0) Gecko/20100101 Firefox/135.0"})
36
-
37
27
  def can_handle_url(self, url: str) -> bool:
38
28
  # URL'nin bu çıkarıcı tarafından işlenip işlenemeyeceğini kontrol et
39
29
  return self.main_url in url
@@ -44,10 +34,8 @@ class ExtractorBase(ABC):
44
34
  pass
45
35
 
46
36
  async def close(self):
47
- """Close both HTTP clients if they exist."""
37
+ """Close HTTP client."""
48
38
  await self.httpx.aclose()
49
- if self.cffi:
50
- await self.cffi.close()
51
39
 
52
40
  def fix_url(self, url: str) -> str:
53
41
  # Eksik URL'leri düzelt ve tam URL formatına çevir
@@ -9,23 +9,44 @@ class ExtractorManager:
9
9
  self.extractor_loader = ExtractorLoader(extractor_dir)
10
10
  self.extractors = self.extractor_loader.load_all()
11
11
 
12
- def find_extractor(self, link) -> ExtractorBase:
13
- # Verilen bağlantıyı işleyebilecek çıkarıcıyı bul
12
+ # Extractor instance'larını cache'le
13
+ self._extractor_instances = []
14
+ self._ytdlp_extractor = None
15
+
14
16
  for extractor_cls in self.extractors:
15
- extractor:ExtractorBase = extractor_cls()
17
+ instance = extractor_cls()
18
+
19
+ # YTDLP'yi ayrı tut
20
+ if instance.name == "yt-dlp":
21
+ self._ytdlp_extractor = instance
22
+ else:
23
+ self._extractor_instances.append(instance)
24
+
25
+ # YTDLP'yi EN BAŞA ekle
26
+ if self._ytdlp_extractor:
27
+ self._extractor_instances.insert(0, self._ytdlp_extractor)
28
+
29
+ def find_extractor(self, link) -> ExtractorBase:
30
+ """
31
+ Verilen bağlantıyı işleyebilecek çıkarıcıyı bul
32
+ """
33
+ # Cached instance'ları kullan
34
+ for extractor in self._extractor_instances:
16
35
  if extractor.can_handle_url(link):
17
36
  return extractor
18
37
 
19
38
  return None
20
39
 
21
40
  def map_links_to_extractors(self, links) -> dict:
22
- # Bağlantıları uygun çıkarıcılarla eşleştir
41
+ """
42
+ Bağlantıları uygun çıkarıcılarla eşleştir
43
+ """
23
44
  mapping = {}
24
45
  for link in links:
25
- for extractor_cls in self.extractors:
26
- extractor:ExtractorBase = extractor_cls()
46
+ # Cached instance'ları kullan
47
+ for extractor in self._extractor_instances:
27
48
  if extractor.can_handle_url(link):
28
49
  mapping[link] = f"{extractor.name:<30} » {link.replace(extractor.main_url, '')}"
29
- break
50
+ break # İlk eşleşmede dur
30
51
 
31
- return mapping
52
+ return mapping
@@ -18,19 +18,34 @@ class MediaHandler:
18
18
  if extract_data.referer:
19
19
  self.headers["referer"] = extract_data.referer
20
20
 
21
- # Google Drive gibi özel durumlar için yt-dlp kullan
21
+ # Özel Durumlar (RecTV vs. Googleusercontent)
22
22
  if user_agent in ["googleusercontent", "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0"]:
23
23
  return self.play_with_ytdlp(extract_data)
24
24
 
25
- # İşletim sistemine göre oynatıcı seç
25
+ # İşletim sistemine göre oynatıcı seç (Android durumu)
26
26
  if subprocess.check_output(['uname', '-o']).strip() == b'Android':
27
27
  return self.play_with_android_mxplayer(extract_data)
28
28
 
29
- # Alt yazılar varsa mpv kullan
30
- if extract_data.subtitles:
31
- return self.play_with_mpv(extract_data)
29
+ # Oynatıcı öncelik sırası (fallback zincirleme)
30
+ players = [
31
+ ("MPV", self.play_with_mpv),
32
+ ("VLC", self.play_with_vlc),
33
+ ("yt-dlp", self.play_with_ytdlp)
34
+ ]
35
+
36
+ # Fallback zincirleme
37
+ for player_name, player_func in players:
38
+ try:
39
+ result = player_func(extract_data)
40
+ if result or result is None: # None = MPV (exception yok)
41
+ konsol.log(f"[green][✓] {player_name} ile başarılı[/green]")
42
+ return True
43
+ except Exception as e:
44
+ konsol.log(f"[yellow][⚠] {player_name} hatası: {e}[/yellow]")
45
+ continue
32
46
 
33
- return self.play_with_vlc(extract_data) or self.play_with_mpv(extract_data)
47
+ konsol.print("[red][✗] Hiçbir oynatıcı çalışmadı![/red]")
48
+ return False
34
49
 
35
50
  def play_with_vlc(self, extract_data: ExtractResult):
36
51
  konsol.log(f"[yellow][»] VLC ile Oynatılıyor : {extract_data.url}")
@@ -88,12 +103,15 @@ class MediaHandler:
88
103
  with open(os.devnull, "w") as devnull:
89
104
  subprocess.run(mpv_command, stdout=devnull, stderr=devnull, check=True)
90
105
 
106
+ return True
91
107
  except subprocess.CalledProcessError as hata:
92
108
  konsol.print(f"[red]mpv oynatma hatası: {hata}[/red]")
93
109
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
110
+ return False
94
111
  except FileNotFoundError:
95
112
  konsol.print("[red]mpv bulunamadı! mpv kurulu olduğundan emin olun.[/red]")
96
113
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
114
+ return False
97
115
 
98
116
  def play_with_ytdlp(self, extract_data: ExtractResult):
99
117
  konsol.log(f"[yellow][»] yt-dlp ile Oynatılıyor : {extract_data.url}")
@@ -121,12 +139,15 @@ class MediaHandler:
121
139
  with subprocess.Popen(ytdlp_command, stdout=subprocess.PIPE) as ytdlp_proc:
122
140
  subprocess.run(mpv_command, stdin=ytdlp_proc.stdout, check=True)
123
141
 
142
+ return True
124
143
  except subprocess.CalledProcessError as hata:
125
144
  konsol.print(f"[red]Oynatma hatası: {hata}[/red]")
126
145
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
146
+ return False
127
147
  except FileNotFoundError:
128
148
  konsol.print("[red]yt-dlp veya mpv bulunamadı! Kurulumlarından emin olun.[/red]")
129
149
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
150
+ return False
130
151
 
131
152
  def play_with_android_mxplayer(self, extract_data: ExtractResult):
132
153
  konsol.log(f"[yellow][»] MxPlayer ile Oynatılıyor : {extract_data.url}")
@@ -151,11 +172,12 @@ class MediaHandler:
151
172
  with open(os.devnull, "w") as devnull:
152
173
  subprocess.run(android_command, stdout=devnull, stderr=devnull, check=True)
153
174
 
154
- return
155
-
175
+ return True
156
176
  except subprocess.CalledProcessError as hata:
157
177
  konsol.print(f"[red]{paket} oynatma hatası: {hata}[/red]")
158
178
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
179
+ return False
159
180
  except FileNotFoundError:
160
181
  konsol.print(f"Paket: {paket}, Hata: MX Player kurulu değil")
161
182
  konsol.print({"title": self.title, "url": extract_data.url, "headers": self.headers})
183
+ return False
@@ -1,7 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from curl_cffi import AsyncSession
5
4
  from cloudscraper import CloudScraper
6
5
  from httpx import AsyncClient
7
6
  from .PluginModels import MainPageResult, SearchResult, MovieInfo
@@ -17,8 +16,6 @@ class PluginBase(ABC):
17
16
  favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
18
17
  description = "No description provided."
19
18
 
20
- requires_cffi = False
21
-
22
19
  main_page = {}
23
20
 
24
21
  async def url_update(self, new_url: str):
@@ -33,19 +30,11 @@ class PluginBase(ABC):
33
30
  # httpx - lightweight and safe for most HTTP requests
34
31
  self.httpx = AsyncClient(
35
32
  timeout = 3,
36
- follow_redirects = True,
33
+ follow_redirects = True
37
34
  )
38
35
  self.httpx.headers.update(self.cloudscraper.headers)
39
36
  self.httpx.cookies.update(self.cloudscraper.cookies)
40
37
 
41
- # curl_cffi - only initialize if needed for anti-bot bypass
42
- self.cffi = None
43
-
44
- if self.requires_cffi:
45
- self.cffi = AsyncSession(impersonate="firefox135")
46
- self.cffi.cookies.update(self.cloudscraper.cookies)
47
- self.cffi.headers.update({"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 15.7; rv:135.0) Gecko/20100101 Firefox/135.0"})
48
-
49
38
  self.media_handler = MediaHandler()
50
39
  self.ex_manager = ExtractorManager()
51
40
 
@@ -90,10 +79,8 @@ class PluginBase(ABC):
90
79
  pass
91
80
 
92
81
  async def close(self):
93
- """Close both HTTP clients if they exist."""
82
+ """Close HTTP client."""
94
83
  await self.httpx.aclose()
95
- if self.cffi:
96
- await self.cffi.close()
97
84
 
98
85
  def fix_url(self, url: str) -> str:
99
86
  if not url:
@@ -0,0 +1,172 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle
4
+ from urllib.parse import urlparse
5
+ from yt_dlp.extractor import gen_extractors
6
+ import yt_dlp, re, sys, os
7
+
8
+ class YTDLP(ExtractorBase):
9
+ name = "yt-dlp"
10
+ main_url = "" # Universal - tüm siteleri destekler
11
+
12
+ _FAST_DOMAIN_RE = None # compiled mega-regex (host üstünden)
13
+
14
+ @classmethod
15
+ def _init_fast_domain_regex(cls):
16
+ if cls._FAST_DOMAIN_RE is not None:
17
+ return
18
+
19
+ domains = set()
20
+
21
+ # yt-dlp extractor'larının _VALID_URL regex'lerinden domain yakala
22
+ # Regex metinlerinde domainler genelde "\." şeklinde geçer.
23
+ domain_pat = re.compile(r"(?:[a-z0-9-]+\\\.)+[a-z]{2,}", re.IGNORECASE)
24
+
25
+ for ie in gen_extractors():
26
+ # Generic'i fast-path'e dahil etmiyoruz
27
+ if getattr(ie, "IE_NAME", "").lower() == "generic":
28
+ continue
29
+
30
+ valid = getattr(ie, "_VALID_URL", None)
31
+ if not valid or not isinstance(valid, str):
32
+ continue
33
+
34
+ for m in domain_pat.findall(valid):
35
+ d = m.replace(r"\.", ".").lower()
36
+
37
+ # Çok agresif/şüpheli şeyleri elemek istersen burada filtre koyabilirsin
38
+ # (genelde gerek kalmıyor)
39
+ domains.add(d)
40
+
41
+ # Hiç domain çıkmazsa (çok uç durum) fallback: boş regex
42
+ if not domains:
43
+ cls._FAST_DOMAIN_RE = re.compile(r"$^") # hiçbir şeye match etmez
44
+ return
45
+
46
+ # Host eşleştirmesi: subdomain destekli (m.youtube.com, player.vimeo.com vs.)
47
+ # (?:^|.*\.) (domain1|domain2|...) $
48
+ joined = "|".join(sorted(re.escape(d) for d in domains))
49
+ pattern = rf"(?:^|.*\.)(?:{joined})$"
50
+ cls._FAST_DOMAIN_RE = re.compile(pattern, re.IGNORECASE)
51
+
52
+ def __init__(self):
53
+ self.__class__._init_fast_domain_regex()
54
+
55
+ def can_handle_url(self, url: str) -> bool:
56
+ """
57
+ Fast-path: URL host'unu tek mega-regex ile kontrol et (loop yok)
58
+ Slow-path: gerekirse mevcut extract_info tabanlı kontrolün
59
+ """
60
+ # URL parse + host al
61
+ try:
62
+ parsed = urlparse(url)
63
+ host = (parsed.hostname or "").lower()
64
+ except Exception:
65
+ host = ""
66
+
67
+ # Şemasız URL desteği: "youtube.com/..." gibi
68
+ if not host and "://" not in url:
69
+ try:
70
+ parsed = urlparse("https://" + url)
71
+ host = (parsed.hostname or "").lower()
72
+ except Exception:
73
+ host = ""
74
+
75
+ # Fast-path
76
+ if host and self.__class__._FAST_DOMAIN_RE.search(host):
77
+ return True
78
+
79
+ # SLOW PATH: Diğer siteler için yt-dlp'nin native kontrolü
80
+ try:
81
+ # stderr'ı geçici olarak kapat (hata mesajlarını gizle)
82
+ old_stderr = sys.stderr
83
+ sys.stderr = open(os.devnull, "w")
84
+
85
+ try:
86
+ ydl_opts = {
87
+ "simulate" : True, # Download yok, sadece tespit
88
+ "quiet" : True, # Log kirliliği yok
89
+ "no_warnings" : True, # Uyarı mesajları yok
90
+ "extract_flat" : True, # Minimal işlem
91
+ "no_check_certificates" : True,
92
+ "ignoreerrors" : True # Hataları yoksay
93
+ }
94
+
95
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
96
+ # URL'yi işleyebiliyor mu kontrol et
97
+ info = ydl.extract_info(url, download=False, process=False)
98
+
99
+ # Generic extractor ise atla
100
+ if info and info.get("extractor_key") != "Generic":
101
+ return True
102
+
103
+ return False
104
+ finally:
105
+ # stderr'ı geri yükle
106
+ sys.stderr.close()
107
+ sys.stderr = old_stderr
108
+
109
+ except Exception:
110
+ # yt-dlp işleyemezse False döndür
111
+ return False
112
+
113
+ async def extract(self, url: str, referer: str | None = None) -> ExtractResult:
114
+ ydl_opts = {
115
+ "quiet" : True,
116
+ "no_warnings" : True,
117
+ "extract_flat" : False, # Tam bilgi al
118
+ "format" : "best", # En iyi kalite
119
+ "no_check_certificates" : True
120
+ }
121
+
122
+ # Referer varsa header olarak ekle
123
+ if referer:
124
+ ydl_opts["http_headers"] = {"Referer": referer}
125
+
126
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
127
+ info = ydl.extract_info(url, download=False)
128
+
129
+ if not info:
130
+ raise ValueError("yt-dlp video bilgisi döndürmedi")
131
+
132
+ # Video URL'sini al
133
+ video_url = info.get("url")
134
+ if not video_url:
135
+ # Bazen formatlar listesinde olabilir
136
+ formats = info.get("formats", [])
137
+ if formats:
138
+ video_url = formats[-1].get("url") # Son format (genellikle en iyi)
139
+
140
+ if not video_url:
141
+ raise ValueError("Video URL bulunamadı")
142
+
143
+ # Altyazıları çıkar
144
+ subtitles = []
145
+ if subtitle_data := info.get("subtitles"):
146
+ for lang, subs in subtitle_data.items():
147
+ for sub in subs:
148
+ if sub_url := sub.get("url"):
149
+ subtitles.append(
150
+ Subtitle(
151
+ name=f"{lang} ({sub.get('ext', 'unknown')})",
152
+ url=sub_url
153
+ )
154
+ )
155
+
156
+ # User-Agent al
157
+ user_agent = None
158
+ http_headers = info.get("http_headers", {})
159
+ if http_headers:
160
+ user_agent = http_headers.get("User-Agent")
161
+
162
+ return ExtractResult(
163
+ name = self.name,
164
+ url = video_url,
165
+ referer = referer or info.get("webpage_url"),
166
+ user_agent = user_agent,
167
+ subtitles = subtitles
168
+ )
169
+
170
+ async def close(self):
171
+ """yt-dlp için cleanup gerekmez"""
172
+ pass
@@ -1,6 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import kekik_cache, PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
4
4
  from Kekik.Sifreleme import CryptoJS
5
5
  from parsel import Selector
6
6
  import re, urllib.parse, base64, contextlib, asyncio, time
@@ -40,7 +40,6 @@ class DiziBox(PluginBase):
40
40
  f"{main_url}/dizi-arsivi/page/SAYFA/?tur[0]=yarisma&yil&imdb" : "Yarışma"
41
41
  }
42
42
 
43
- #@kekik_cache(ttl=60*60)
44
43
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
45
44
  self.httpx.cookies.update({
46
45
  "isTrustedUser" : "true",
@@ -62,7 +61,6 @@ class DiziBox(PluginBase):
62
61
  for veri in secici.css("article.detailed-article")
63
62
  ]
64
63
 
65
- #@kekik_cache(ttl=60*60)
66
64
  async def search(self, query: str) -> list[SearchResult]:
67
65
  self.httpx.cookies.update({
68
66
  "isTrustedUser" : "true",
@@ -80,7 +78,6 @@ class DiziBox(PluginBase):
80
78
  for item in secici.css("article.detailed-article")
81
79
  ]
82
80
 
83
- #@kekik_cache(ttl=60*60)
84
81
  async def load_item(self, url: str) -> SeriesInfo:
85
82
  istek = await self.httpx.get(url)
86
83
  secici = Selector(istek.text)
@@ -127,7 +124,6 @@ class DiziBox(PluginBase):
127
124
  actors = actors,
128
125
  )
129
126
 
130
- #@kekik_cache(ttl=60*60)
131
127
  async def _iframe_decode(self, name:str, iframe_link:str, referer:str) -> list[str]:
132
128
  results = []
133
129
 
@@ -178,7 +174,6 @@ class DiziBox(PluginBase):
178
174
 
179
175
  return results
180
176
 
181
- #@kekik_cache(ttl=15*60)
182
177
  async def load_links(self, url: str) -> list[dict]:
183
178
  istek = await self.httpx.get(url)
184
179
  secici = Selector(istek.text)
@@ -214,4 +209,4 @@ class DiziBox(PluginBase):
214
209
  "name" : f"{extractor.name if extractor else alt_name}"
215
210
  })
216
211
 
217
- return results
212
+ return results
@@ -9,7 +9,7 @@ class DiziPal(PluginBase):
9
9
  language = "tr"
10
10
  main_url = "https://dizipal1223.com"
11
11
  favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
- description = "Yabancı Dizi ve Film izle."
12
+ description = "dizipal güncel, dizipal yeni ve gerçek adresi. dizipal en yeni dizi ve filmleri güvenli ve hızlı şekilde sunar."
13
13
 
14
14
  main_page = {
15
15
  f"{main_url}/diziler/son-bolumler" : "Son Bölümler",
@@ -1,6 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import kekik_cache, PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, Subtitle, ExtractResult
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, Subtitle, ExtractResult
4
4
  from parsel import Selector
5
5
  import re
6
6
 
@@ -29,7 +29,6 @@ class DiziYou(PluginBase):
29
29
  f"{main_url}/dizi-arsivi/page/SAYFA/?tur=Vah%C5%9Fi+Bat%C4%B1" : "Vahşi Batı"
30
30
  }
31
31
 
32
- #@kekik_cache(ttl=60*60)
33
32
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
34
33
  istek = await self.httpx.get(f"{url.replace('SAYFA', str(page))}")
35
34
  secici = Selector(istek.text)
@@ -44,7 +43,6 @@ class DiziYou(PluginBase):
44
43
  for veri in secici.css("div.single-item")
45
44
  ]
46
45
 
47
- #@kekik_cache(ttl=60*60)
48
46
  async def search(self, query: str) -> list[SearchResult]:
49
47
  istek = await self.httpx.get(f"{self.main_url}/?s={query}")
50
48
  secici = Selector(istek.text)
@@ -53,18 +51,28 @@ class DiziYou(PluginBase):
53
51
  SearchResult(
54
52
  title = afis.css("div#categorytitle a::text").get().strip(),
55
53
  url = self.fix_url(afis.css("div#categorytitle a::attr(href)").get()),
56
- poster = self.fix_url(afis.css("img::attr(src)").get()),
54
+ poster = self.fix_url(afis.css("img::attr(src)").get() or afis.css("img::attr(data-src)").get())
57
55
  )
58
56
  for afis in secici.css("div.incontent div#list-series")
59
57
  ]
60
58
 
61
- #@kekik_cache(ttl=60*60)
62
59
  async def load_item(self, url: str) -> SeriesInfo:
63
60
  istek = await self.httpx.get(url)
64
61
  secici = Selector(istek.text)
65
62
 
66
- title = secici.css("h1::text").get().strip()
67
- poster = self.fix_url(secici.css("div.category_image img::attr(src)").get().strip())
63
+ # Title - div.title h1 içinde
64
+ title_raw = secici.css("div.title h1::text").get()
65
+ title = title_raw.strip() if title_raw else ""
66
+
67
+ # Fallback: Eğer title boşsa URL'den çıkar (telif kısıtlaması olan sayfalar için)
68
+ if not title:
69
+ # URL'den slug'ı al: https://www.diziyou.one/jasmine/ -> jasmine -> Jasmine
70
+ slug = url.rstrip('/').split('/')[-1]
71
+ title = slug.replace('-', ' ').title()
72
+
73
+ # Poster
74
+ poster_raw = secici.css("div.category_image img::attr(src)").get()
75
+ poster = self.fix_url(poster_raw) if poster_raw else ""
68
76
  year = secici.xpath("//span[contains(., 'Yapım Yılı')]/following-sibling::text()[1]").get()
69
77
  description = secici.css("div.diziyou_desc::text").get()
70
78
  if description:
@@ -75,13 +83,21 @@ class DiziYou(PluginBase):
75
83
  actors = [actor.strip() for actor in _actors.split(",")] if _actors else []
76
84
 
77
85
  episodes = []
78
- for it in secici.css("div.bolumust"):
79
- ep_name = it.css("div.baslik::text").get().strip()
80
- ep_href = it.xpath("ancestor::a/@href").get()
81
- if not ep_name or not ep_href:
86
+ # Episodes - bolumust her bölüm için bir <a> içinde
87
+ # :has() parsel'de çalışmıyor, XPath kullanıyoruz
88
+ for link in secici.xpath('//a[div[@class="bolumust"]]'):
89
+ ep_name_raw = link.css("div.baslik::text").get()
90
+ if not ep_name_raw:
91
+ continue
92
+ ep_name = ep_name_raw.strip()
93
+
94
+ ep_href = self.fix_url(link.css("::attr(href)").get())
95
+ if not ep_href:
82
96
  continue
83
97
 
84
- ep_name_clean = it.css("div.bolumismi::text").get().strip().replace("(", "").replace(")", "").strip() if it.css("div.bolumismi::text").get() else ep_name
98
+ # Bölüm ismi varsa al
99
+ ep_name_raw_clean = link.css("div.bolumismi::text").get()
100
+ ep_name_clean = ep_name_raw_clean.strip().replace("(", "").replace(")", "").strip() if ep_name_raw_clean else ep_name
85
101
 
86
102
  ep_episode = re.search(r"(\d+)\. Bölüm", ep_name)[1]
87
103
  ep_season = re.search(r"(\d+)\. Sezon", ep_name)[1]
@@ -107,14 +123,23 @@ class DiziYou(PluginBase):
107
123
  actors = actors
108
124
  )
109
125
 
110
- #@kekik_cache(ttl=15*60)
111
126
  async def load_links(self, url: str) -> list[dict]:
112
127
  istek = await self.httpx.get(url)
113
128
  secici = Selector(istek.text)
114
129
 
115
- item_title = secici.css("div.title h1::text").get()
116
- ep_name = secici.css("div#bolum-ismi::text").get().strip()
117
- item_id = secici.css("iframe#diziyouPlayer::attr(src)").get().split("/")[-1].replace(".html", "")
130
+ # Title ve episode name - None kontrolü ekle
131
+ item_title_raw = secici.css("div.title h1::text").get()
132
+ item_title = item_title_raw.strip() if item_title_raw else ""
133
+
134
+ ep_name_raw = secici.css("div#bolum-ismi::text").get()
135
+ ep_name = ep_name_raw.strip() if ep_name_raw else ""
136
+
137
+ # Player src'den item_id çıkar
138
+ player_src = secici.css("iframe#diziyouPlayer::attr(src)").get()
139
+ if not player_src:
140
+ return [] # Player bulunamadıysa boş liste döndür
141
+
142
+ item_id = player_src.split("/")[-1].replace(".html", "")
118
143
 
119
144
  subtitles = []
120
145
  stream_urls = []