KekikStream 2.3.7__py3-none-any.whl → 2.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of KekikStream might be problematic. Click here for more details.

@@ -9,6 +9,7 @@ class MolyStream(ExtractorBase):
9
9
 
10
10
  # Birden fazla domain destekle
11
11
  supported_domains = [
12
+ "dbx.molystream.org",
12
13
  "ydx.molystream.org",
13
14
  "yd.sheila.stream",
14
15
  "ydf.popcornvakti.net",
@@ -18,13 +19,54 @@ class MolyStream(ExtractorBase):
18
19
  return any(domain in url for domain in self.supported_domains)
19
20
 
20
21
  async def extract(self, url, referer=None) -> ExtractResult:
21
- if "doctype html" in url:
22
- secici = HTMLHelper(url)
23
- video = secici.select_attr("video#sheplayer source", "src")
22
+ if "doctype html" in url.lower():
23
+ text = url
24
24
  else:
25
- video = url
25
+ # Sheila-style referer fix
26
+ if "/embed/sheila/" in url:
27
+ referer = url.replace("/embed/sheila/", "/embed/")
26
28
 
27
- resp_sec = HTMLHelper(url)
29
+ self.httpx.headers.update({
30
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
31
+ "Referer" : referer or self.main_url
32
+ })
33
+ istek = await self.httpx.get(url, follow_redirects=True)
34
+ text = istek.text
35
+
36
+ # 1. Sheila-style links often have the m3u8 directly as the first http line or in script
37
+ m3u8 = None
38
+ if "#EXTM3U" in text:
39
+ for line in text.splitlines():
40
+ line = line.strip().replace('"', '').replace("'", "")
41
+ if line.startswith("http"):
42
+ m3u8 = line
43
+ break
44
+
45
+ if not m3u8:
46
+ for line in text.splitlines():
47
+ line = line.strip().replace('"', '').replace("'", "")
48
+ if line.startswith("http") and ".m3u8" in line:
49
+ m3u8 = line
50
+ break
51
+
52
+ if not m3u8:
53
+ secici = HTMLHelper(text)
54
+ # 2. Try video tag
55
+ m3u8 = secici.select_attr("video#sheplayer source", "src") or secici.select_attr("video source", "src")
56
+
57
+ if not m3u8:
58
+ # 3. Try regex
59
+ m3u8 = HTMLHelper(text).regex_first(r'["\'](https?://[^"\']+\.m3u8[^"\']*)["\']')
60
+
61
+ if not m3u8:
62
+ # Fallback to any http link in a script if it looks like a video link
63
+ m3u8 = HTMLHelper(text).regex_first(r'["\'](https?://[^"\']+/q/\d+)["\']')
64
+
65
+ if not m3u8:
66
+ m3u8 = url # Final fallback
67
+
68
+ # Subtitles (Sheila style addSrtFile)
69
+ resp_sec = HTMLHelper(text)
28
70
  matches = resp_sec.regex_all(r"addSrtFile\(['\"]([^'\"]+\.srt)['\"]\s*,\s*['\"][a-z]{2}['\"]\s*,\s*['\"]([^'\"]+)['\"]")
29
71
 
30
72
  subtitles = [
@@ -34,8 +76,8 @@ class MolyStream(ExtractorBase):
34
76
 
35
77
  return ExtractResult(
36
78
  name = self.name,
37
- url = video,
38
- referer = video.replace("/sheila", "") if video else None,
39
- user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0",
79
+ url = m3u8,
80
+ referer = url,
81
+ user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
40
82
  subtitles = subtitles
41
83
  )
@@ -24,6 +24,8 @@ class VidMoly(ExtractorBase):
24
24
 
25
25
  if ".me" in url:
26
26
  url = url.replace(".me", ".net")
27
+ if ".to" in url:
28
+ url = url.replace(".to", ".net")
27
29
 
28
30
  # VidMoly bazen redirect ediyor, takip et
29
31
  response = await self.httpx.get(url, follow_redirects=True)
@@ -70,6 +72,17 @@ class VidMoly(ExtractorBase):
70
72
  if sub.get("kind") == "captions"
71
73
  ]
72
74
 
75
+ if "#EXTM3U" in response.text:
76
+ for line in response.text.splitlines():
77
+ line = line.strip().replace('"', '').replace("'", "")
78
+ if line.startswith("http"):
79
+ return ExtractResult(
80
+ name = self.name,
81
+ url = line,
82
+ referer = self.main_url,
83
+ subtitles = subtitles
84
+ )
85
+
73
86
  if script_str := resp_sec.regex_first(r"sources:\s*\[(.*?)\],", flags= re.DOTALL):
74
87
  script_content = script_str
75
88
  # Video kaynaklarını ayrıştır
@@ -167,34 +167,44 @@ class DiziBox(PluginBase):
167
167
 
168
168
  crypt_data = iframe_secici.regex_first(r"CryptoJS\.AES\.decrypt\(\"(.*)\",\"", iframe_istek.text)
169
169
  crypt_pass = iframe_secici.regex_first(r"\",\"(.*)\"\);", iframe_istek.text)
170
- decode = CryptoJS.decrypt(crypt_pass, crypt_data)
171
-
172
- if video_match := iframe_secici.regex_first(r"file: '(.*)',", decode):
173
- results.append(video_match)
174
- else:
175
- results.append(decode)
170
+ if crypt_data and crypt_pass:
171
+ decode = CryptoJS.decrypt(crypt_pass, crypt_data)
172
+ if video_match := iframe_secici.regex_first(r"file: '(.*)',", decode):
173
+ results.append(video_match)
174
+ else:
175
+ results.append(decode)
176
176
 
177
177
  elif "/player/moly/moly.php" in iframe_link:
178
178
  iframe_link = iframe_link.replace("moly.php?h=", "moly.php?wmode=opaque&h=")
179
- while True:
179
+ for _ in range(3): # Max 3 attempts
180
180
  await asyncio.sleep(.3)
181
181
  with contextlib.suppress(Exception):
182
182
  moly_istek = await self.httpx.get(iframe_link)
183
183
  moly_secici = HTMLHelper(moly_istek.text)
184
184
 
185
- if atob_data := moly_secici.regex_first(r"unescape\(\"(.*)\"\)", moly_istek.text):
185
+ if atob_data := moly_secici.regex_first(r"unescape\(\"(.*)\"\)"):
186
186
  decoded_atob = urllib.parse.unquote(atob_data)
187
187
  str_atob = base64.b64decode(decoded_atob).decode("utf-8")
188
188
 
189
- iframe_src = HTMLHelper(str_atob).select_attr("div#Player iframe", "src")
190
- if iframe_src:
191
- results.append(iframe_src)
192
-
193
- break
189
+ iframe_src = HTMLHelper(str_atob).select_attr("div#Player iframe", "src")
190
+ if iframe_src:
191
+ # ! Sheila replacement (Kotlin referansı)
192
+ if "/embed/" in iframe_src:
193
+ iframe_src = iframe_src.replace("/embed/", "/embed/sheila/").replace("vidmoly.me", "vidmoly.net")
194
+
195
+ results.append(iframe_src)
196
+ break
197
+ elif embed_matches := moly_secici.regex_all(r'iframe.*?src="(.*?)"'):
198
+ for src in embed_matches:
199
+ if "/embed/" in src:
200
+ src = src.replace("/embed/", "/embed/sheila/").replace("vidmoly.me", "vidmoly.net")
201
+ results.append(src)
202
+ break
194
203
 
195
204
  elif "/player/haydi.php" in iframe_link:
196
- okru_url = base64.b64decode(iframe_link.split("?v=")[-1]).decode("utf-8")
197
- results.append(okru_url)
205
+ with contextlib.suppress(Exception):
206
+ okru_url = base64.b64decode(iframe_link.split("?v=")[-1]).decode("utf-8")
207
+ results.append(okru_url)
198
208
 
199
209
  return results
200
210
 
@@ -127,18 +127,27 @@ class DiziPal(PluginBase):
127
127
 
128
128
  poster = self.fix_url(secici.select_attr("meta[property='og:image']", "content")) if secici.select_attr("meta[property='og:image']", "content") else None
129
129
 
130
- # XPath yerine regex ile HTML'den çıkarma
131
- year = secici.regex_first(r'(?is)Yapım Yılı.*?<div[^>]*>(\d{4})</div>', secici.html)
132
-
133
- description = secici.select_text("div.summary p")
130
+ # Sidebar bilgilerini topla
131
+ info = {}
132
+ for li in secici.select("li"):
133
+ key = secici.select_text("div.key", li)
134
+ val = secici.select_text("div.value", li)
135
+ if key and val:
136
+ info[key.strip(":")] = val.strip()
137
+
138
+ year = info.get("Yapım Yılı")
139
+ rating = info.get("IMDB Puanı")
140
+
141
+ tags_raw = info.get("Türler", "")
142
+ tags = [t.strip() for t in tags_raw.split() if t.strip()] if tags_raw else None
134
143
 
135
- rating = secici.regex_first(r'(?is)IMDB Puanı.*?<div[^>]*>([0-9.]+)</div>', secici.html)
144
+ actors_raw = info.get("Oyuncular")
145
+ actors = [a.strip() for a in actors_raw.split(",") if a.strip()] if actors_raw else None
136
146
 
137
- tags_raw = secici.regex_first(r'(?is)Türler.*?<div[^>]*>([^<]+)</div>', secici.html)
138
- tags = [t.strip() for t in tags_raw.split() if t.strip()] if tags_raw else None
147
+ description = secici.select_text("div.summary p")
139
148
 
140
- duration_raw = secici.regex_first(r'(?is)Ortalama Süre.*?<div[^>]*>(\d+)', secici.html)
141
- duration = int(duration_raw) if duration_raw else None
149
+ duration_raw = info.get("Ortalama Süre")
150
+ duration = int(secici.regex_first(r"(\d+)", duration_raw)) if duration_raw else None
142
151
 
143
152
  if "/dizi/" in url:
144
153
  title = secici.select_text("div.cover h5")
@@ -177,6 +186,7 @@ class DiziPal(PluginBase):
177
186
  year = year,
178
187
  duration = duration,
179
188
  episodes = episodes if episodes else None,
189
+ actors = actors,
180
190
  )
181
191
  else:
182
192
  # Film için title - g-title div'lerinin 2. olanı
@@ -192,6 +202,7 @@ class DiziPal(PluginBase):
192
202
  rating = rating,
193
203
  year = year,
194
204
  duration = duration,
205
+ actors = actors,
195
206
  )
196
207
 
197
208
  async def load_links(self, url: str) -> list[ExtractResult]:
@@ -27,10 +27,10 @@ class DiziWatch(PluginBase):
27
27
  "1" : "Suç",
28
28
  }
29
29
 
30
+ c_key = ""
31
+ c_value = ""
32
+
30
33
  async def _init_session(self):
31
- if getattr(self, "c_key", None) and getattr(self, "c_value", None):
32
- return
33
-
34
34
  # Fetch anime-arsivi to get CSRF tokens
35
35
  resp = await self.httpx.get(f"{self.main_url}/anime-arsivi")
36
36
  sel = HTMLHelper(resp.text)
@@ -137,9 +137,9 @@ class DiziWatch(PluginBase):
137
137
  resp = await self.httpx.get(url)
138
138
  sel = HTMLHelper(resp.text)
139
139
 
140
- title = sel.select_text("h2")
141
- poster = sel.select_attr("img.rounded-md", "src")
142
- description = sel.select_text("div.text-sm")
140
+ title = sel.select_text("h2") or sel.select_text("h1")
141
+ poster = sel.select_attr("img.rounded-md", "src") or sel.select_attr("meta[property='og:image']", "content")
142
+ description = sel.select_text("div.text-sm") or sel.select_text("div.summary")
143
143
 
144
144
  year = sel.regex_first(r"Yap\u0131m Y\u0131l\u0131\s*:\s*(\d+)", resp.text)
145
145
 
@@ -72,7 +72,7 @@ class DiziYou(PluginBase):
72
72
  html_text = istek.text
73
73
 
74
74
  # Title - div.title h1 içinde
75
- title = secici.select_text("div.title h1")
75
+ title = (secici.select_text("div.title h1") or "").strip()
76
76
 
77
77
  # Fallback: Eğer title boşsa URL'den çıkar (telif kısıtlaması olan sayfalar için)
78
78
  if not title:
@@ -81,23 +81,19 @@ class DiziYou(PluginBase):
81
81
  title = slug.replace('-', ' ').title()
82
82
 
83
83
  # Poster
84
- poster_src = secici.select_attr("div.category_image img", "src")
84
+ poster_src = secici.select_attr("div.category_image img", "src") or secici.select_attr("meta[property='og:image']", "content")
85
85
  poster = self.fix_url(poster_src) if poster_src else ""
86
86
 
87
87
  # Year - regex ile çıkarma (xpath yerine)
88
88
  year = secici.regex_first(r"(?is)Yapım Yılı.*?(\d{4})", secici.html)
89
89
 
90
- description = None
91
- # Extract inner HTML via regex and clean
92
- desc_html = secici.regex_first(r'(?s)<div class="diziyou_desc">(.*?)</div>', secici.html)
93
- if desc_html:
94
- # Script taglarını kaldır
95
- desc_html = HTMLHelper(desc_html).regex_replace(r"(?s)<script.*?</script>", "")
96
- # div#icerikcat2 ve sonrasını kaldır (meta bilgileri içeriyor)
97
- desc_html = HTMLHelper(desc_html).regex_replace(r"(?s)<div id=\"icerikcat2\".*", "")
98
- # Kalan HTML'den text çıkar
99
- clean_sel = HTMLHelper(desc_html)
100
- description = clean_sel.select_text()
90
+ description_el = secici.select("div.diziyou_desc") or secici.select("div#icerikcat")
91
+ description = ""
92
+ if description_el:
93
+ # Scriptleri temizle
94
+ for script in secici.select("script", description_el[0]):
95
+ script.decompose()
96
+ description = secici.select_text(None, description_el[0])
101
97
 
102
98
  tags = [secici.select_text(None, a) for a in secici.select("div.genres a") if secici.select_text(None, a)]
103
99
 
@@ -109,9 +105,9 @@ class DiziYou(PluginBase):
109
105
  actors = [actor.strip() for actor in actors_raw.split(",") if actor.strip()] if actors_raw else []
110
106
 
111
107
  episodes = []
112
- # Episodes - daha fazla DOM/URL kalıbını destekle
113
- for link in secici.select("a"):
114
- ep_href = secici.select_attr("a", "href", link)
108
+ # Episodes - div#scrollbar-container a (kısıtlı alan)
109
+ for link in secici.select("div#scrollbar-container a"):
110
+ ep_href = secici.select_attr(None, "href", link)
115
111
  if not ep_href:
116
112
  continue
117
113
 
@@ -179,9 +175,9 @@ class DiziYou(PluginBase):
179
175
  # Player src'den item_id çıkar - önce özel player seçicisini dene
180
176
  player_src = None
181
177
  # Yaygın locatorlar
182
- for sel in ["iframe#diziyouPlayer", "div.player iframe", "iframe[src*='/episodes/']", "iframe"]:
178
+ for sel in ["iframe#diziyouPlayer", "div.player iframe", "iframe[src*='/player/']", "iframe[src*='/episodes/']", "iframe"]:
183
179
  p = secici.select_attr(sel, "src")
184
- if p and "youtube.com" not in p.lower():
180
+ if p and any(x in p.lower() for x in ["/player/", "/episodes/", "diziyou"]):
185
181
  player_src = p
186
182
  break
187
183
 
@@ -137,70 +137,60 @@ class Dizilla(PluginBase):
137
137
  istek = await self.httpx.get(url)
138
138
  secici = HTMLHelper(istek.text)
139
139
 
140
- title = secici.select_text("div.poster.poster h2")
141
- if not title:
140
+ next_data_text = secici.select_text("script#__NEXT_DATA__")
141
+ if not next_data_text:
142
142
  return None
143
143
 
144
- poster = secici.select_attr("div.w-full.page-top.relative img", "src")
145
- poster = self.fix_url(poster) if poster else None
146
-
147
- # Year extraction (Kotlin: [1] index for w-fit min-w-fit)
148
- info_boxes = secici.select("div.w-fit.min-w-fit")
149
- year = None
150
- if len(info_boxes) > 1:
151
- year_text = secici.select_text("span.text-sm.opacity-60", info_boxes[1])
152
- if year_text:
153
- year = year_text.split(" ")[-1] if " " in year_text else year_text
154
-
155
- description = secici.select_text("div.mt-2.text-sm")
156
-
157
- tags_text = secici.select_text("div.poster.poster h3")
158
- tags = [t.strip() for t in tags_text.split(",")] if tags_text else []
159
-
160
- actors = secici.select_all_text("div.global-box h5")
161
-
162
- episodeses = []
163
- # Seasons links iteration
164
- season_links = secici.select("div.flex.items-center.flex-wrap.gap-2.mb-4 a")
165
- for sezon in season_links:
166
- sezon_href = secici.select_attr("a", "href", sezon)
167
- sezon_href = self.fix_url(sezon_href)
168
- sezon_req = await self.httpx.get(sezon_href)
169
-
170
- season_num = None
171
- try:
172
- # URL'den sezon numarasını çek: ...-N-sezon formatı
173
- season_match = secici.regex_first(r"-(\d+)-sezon", sezon_href)
174
- if season_match:
175
- season_num = int(season_match)
176
- except:
177
- pass
178
-
179
- sezon_secici = HTMLHelper(sezon_req.text)
180
- for bolum in sezon_secici.select("div.episodes div.cursor-pointer"):
181
- # Kotlin: bolum.select("a").last()
182
- links = sezon_secici.select("a", bolum)
183
- if not links:
184
- continue
185
-
186
- ep_link = links[-1]
187
- ep_name = sezon_secici.select_text("a", ep_link)
188
- ep_href = sezon_secici.select_attr("a", "href", ep_link)
189
- ep_href = self.fix_url(ep_href)
144
+ next_data = loads(next_data_text)
145
+ secure_data = next_data.get("props", {}).get("pageProps", {}).get("secureData")
146
+ if not secure_data:
147
+ return None
148
+
149
+ decrypted = await self.decrypt_response(secure_data)
150
+ content = decrypted.get("contentItem", {})
151
+ if not content:
152
+ return None
153
+
154
+ title = content.get("original_title") or content.get("used_title")
155
+ description = content.get("description") or content.get("used_description")
156
+ rating = content.get("imdb_point") or content.get("local_vote_avg")
157
+ year = content.get("release_year")
158
+
159
+ # Poster and Backdrop - prefer backdrop if available for SeriesInfo
160
+ poster = self.fix_poster_url(self.fix_url(content.get("back_url") or content.get("poster_url")))
161
+
162
+ # Tags
163
+ tags = []
164
+ categories = decrypted.get("RelatedResults", {}).get("getSerieCategoriesById", {}).get("result", [])
165
+ for cat in categories:
166
+ tags.append(cat.get("name"))
167
+
168
+ # Actors
169
+ actors = []
170
+ casts = decrypted.get("RelatedResults", {}).get("getSerieCastsById", {}).get("result", [])
171
+ for cast in casts:
172
+ actors.append(cast.get("name"))
173
+
174
+ # Episodes
175
+ episodes = []
176
+ seasons_data = decrypted.get("RelatedResults", {}).get("getSerieSeasonAndEpisodes", {}).get("result", [])
177
+ for season_item in seasons_data:
178
+ season_num = season_item.get("season_no")
179
+ for ep_item in season_item.get("episodes", []):
180
+ ep_num = ep_item.get("episode_no")
181
+ ep_slug = ep_item.get("used_slug")
182
+ ep_name = ep_item.get("episode_text") or ""
190
183
 
191
- # Episode number (first link's text usually)
192
- ep_num = None
193
- try:
194
- ep_num_text = sezon_secici.select_text("a", links[0])
195
- ep_num = int(ep_num_text) if ep_num_text else None
196
- except:
197
- pass
198
-
199
- episodeses.append(Episode(
184
+ # Filter out duplicate language entries if any (we just need one link per episode)
185
+ # Usually they share the same slug for the episode page
186
+ if any(e.season == season_num and e.episode == ep_num for e in episodes):
187
+ continue
188
+
189
+ episodes.append(Episode(
200
190
  season = season_num,
201
191
  episode = ep_num,
202
192
  title = ep_name,
203
- url = ep_href
193
+ url = self.fix_url(f"{self.main_url}/{ep_slug}")
204
194
  ))
205
195
 
206
196
  return SeriesInfo(
@@ -209,8 +199,9 @@ class Dizilla(PluginBase):
209
199
  title = title,
210
200
  description = description,
211
201
  tags = tags,
202
+ rating = str(rating) if rating else None,
212
203
  year = str(year) if year else None,
213
- episodes = episodeses,
204
+ episodes = episodes,
214
205
  actors = actors
215
206
  )
216
207
 
@@ -108,18 +108,22 @@ class FilmBip(PluginBase):
108
108
  tags = secici.select_all_text("div.series-profile-type.tv-show-profile-type a")
109
109
 
110
110
  # XPath yerine regex kullanarak yıl, süre vs. çıkarma
111
- year = secici.regex_first(r'(?i)Yapım yılı.*?<p[^>]*>(\d{4})</p>', secici.html)
111
+ year = secici.regex_first(r"(?is)Yap\u0131m y\u0131l\u0131.*?<p[^>]*>(.*?)<\/p>")
112
+ if not year:
113
+ # Fallback: Başlığın sonundaki parantezli yılı yakala
114
+ year = secici.regex_first(r"\((\d{4})\)", title)
112
115
 
113
- duration = secici.regex_first(r'(?i)Süre.*?<p[^>]*>(\d+)', secici.html)
116
+ duration_raw = secici.regex_first(r"(?is)S\u00fcre.*?<p[^>]*>(.*?)<\/p>")
117
+ duration = secici.regex_first(r"(\d+)", duration_raw) if duration_raw else None
114
118
 
115
- rating = secici.regex_first(r'(?i)IMDB Puanı.*?<span[^>]*>([0-9.]+)</span>', secici.html)
119
+ rating = secici.regex_first(r"(?is)IMDB Puan\u0131.*?<span[^>]*>(.*?)<\/span>")
116
120
 
117
121
  actors = [img.attrs.get("alt") for img in secici.select("div.series-profile-cast ul li a img") if img.attrs.get("alt")]
118
122
 
119
123
  return MovieInfo(
120
124
  url = url,
121
125
  poster = self.fix_url(poster) if poster else None,
122
- title = self.clean_title(title) if title else "",
126
+ title = HTMLHelper(title).regex_replace(r"\(\d{4}\)", "").strip() if title else "",
123
127
  description = description,
124
128
  tags = tags,
125
129
  year = year,
@@ -91,7 +91,7 @@ class FilmMakinesi(PluginBase):
91
91
  year = secici.select_text("span.date a") or ""
92
92
 
93
93
  actors = secici.select_all_text("div.cast-name")
94
- tags = secici.select_all_text("div.genre a")
94
+ tags = [a.text(strip=True) for a in secici.select("div.type a") if "/tur/" in (a.attrs.get("href") or "")]
95
95
 
96
96
  duration = None
97
97
  duration_text = secici.select_text("div.time") or None
@@ -93,19 +93,13 @@ class FullHDFilmizlesene(PluginBase):
93
93
 
94
94
  tags = secici.select_all_text("a[rel='category tag']")
95
95
 
96
- # Rating: normalize-space yerine doğrudan class ile ve son kelimeyi al
97
- rating_text = secici.select_text("div.puanx-puan") or None
98
- rating = None
99
- if rating_text:
100
- parts = rating_text.split()
101
- rating = parts[-1] if parts else None
96
+ # Rating: regex ile sayısal değeri yakala
97
+ rating_text = secici.select_text("div.puanx-puan") or ""
98
+ rating = secici.regex_first(r"(\d+\.\d+|\d+)", rating_text)
102
99
 
103
100
  # Year: ilk yıl formatında değer
104
- year_text = secici.select_text("div.dd a.category") or None
105
- year = None
106
- if year_text:
107
- parts = year_text.split()
108
- year = parts[0] if parts else None
101
+ year_text = secici.select_text("div.dd a.category") or ""
102
+ year = secici.regex_first(r"(\d{4})", year_text)
109
103
 
110
104
  # Actors: nth-child yerine tüm li'leri alıp 2. index
111
105
  lis = secici.select("div.film-info ul li")
@@ -113,11 +107,8 @@ class FullHDFilmizlesene(PluginBase):
113
107
  if len(lis) >= 2:
114
108
  actors = secici.select_all_text("a > span", lis[1])
115
109
 
116
- duration = "0"
117
- duration_text = secici.select_text("span.sure") or None
118
- if duration_text:
119
- duration_parts = duration_text.split()
120
- duration = duration_parts[0] if duration_parts else "0"
110
+ # Duration: regex ile yakala (örn: 201 dk)
111
+ duration = secici.regex_first(r"(\d+)\s*(?:dk|dakika)", html_text)
121
112
 
122
113
  return MovieInfo(
123
114
  url = url,
@@ -113,6 +113,10 @@ class JetFilmizle(PluginBase):
113
113
  year = secici.extract_year("div.yap")
114
114
 
115
115
  actors = secici.select_all_text("div[itemprop='actor'] a span")
116
+ if not actors: # Fallback to img alt
117
+ actors = [img.attrs.get("alt") for img in secici.select("div.oyuncular div.oyuncu img") if img.attrs.get("alt")]
118
+
119
+ duration = secici.regex_first(r"(\d+)\s*dk", istek.text)
116
120
 
117
121
  return MovieInfo(
118
122
  url = url,
@@ -122,7 +126,8 @@ class JetFilmizle(PluginBase):
122
126
  tags = tags,
123
127
  rating = rating,
124
128
  year = year,
125
- actors = actors
129
+ actors = actors,
130
+ duration = int(duration) if duration else None
126
131
  )
127
132
 
128
133
  async def load_links(self, url: str) -> list[ExtractResult]:
@@ -91,7 +91,8 @@ class KultFilmler(PluginBase):
91
91
  time_text = secici.select_text("li.time span")
92
92
  duration = secici.regex_first(r"(\d+)", time_text) if time_text else None
93
93
 
94
- rating = secici.select_text("div.imdb-count")
94
+ rating_text = secici.select_text("div.imdb-count")
95
+ rating = secici.regex_first(r"(\d+\.\d+|\d+)", rating_text) if rating_text else None
95
96
 
96
97
  actors = [a.text(strip=True) for a in secici.select("div.actors a") if a.text(strip=True)]
97
98
 
@@ -96,27 +96,49 @@ class RecTV(PluginBase):
96
96
 
97
97
  episodes.append(ep_model)
98
98
 
99
+ # Süreyi dakikaya çevir (Örn: "1h 59min")
100
+ duration_raw = veri.get("duration")
101
+ duration = None
102
+ if duration_raw:
103
+ try:
104
+ h = int(HTMLHelper(duration_raw).regex_first(r"(\d+)h") or 0)
105
+ m = int(HTMLHelper(duration_raw).regex_first(r"(\d+)min") or 0)
106
+ duration = h * 60 + m
107
+ except: pass
108
+
99
109
  return SeriesInfo(
100
110
  url = url,
101
111
  poster = self.fix_url(veri.get("image")),
102
112
  title = veri.get("title"),
103
113
  description = veri.get("description"),
104
114
  tags = [genre.get("title") for genre in veri.get("genres")] if veri.get("genres") else [],
105
- rating = veri.get("imdb") or veri.get("rating"),
106
- year = veri.get("year"),
115
+ rating = str(veri.get("imdb") or veri.get("rating") or ""),
116
+ year = str(veri.get("year") or ""),
107
117
  actors = [],
118
+ duration = duration,
108
119
  episodes = episodes
109
120
  )
110
121
  case _:
122
+ # Süreyi dakikaya çevir
123
+ duration_raw = veri.get("duration")
124
+ duration = None
125
+ if duration_raw:
126
+ try:
127
+ h = int(HTMLHelper(duration_raw).regex_first(r"(\d+)h") or 0)
128
+ m = int(HTMLHelper(duration_raw).regex_first(r"(\d+)min") or 0)
129
+ duration = h * 60 + m
130
+ except: pass
131
+
111
132
  return MovieInfo(
112
133
  url = url,
113
134
  poster = self.fix_url(veri.get("image")),
114
135
  title = veri.get("title"),
115
136
  description = veri.get("description"),
116
137
  tags = [genre.get("title") for genre in veri.get("genres")] if veri.get("genres") else [],
117
- rating = veri.get("imdb") or veri.get("rating"),
118
- year = veri.get("year"),
119
- actors = []
138
+ rating = str(veri.get("imdb") or veri.get("rating") or ""),
139
+ year = str(veri.get("year") or ""),
140
+ actors = [],
141
+ duration = duration
120
142
  )
121
143
 
122
144
  async def load_links(self, url: str) -> list[ExtractResult]:
@@ -84,89 +84,87 @@ class RoketDizi(PluginBase):
84
84
  except Exception:
85
85
  return []
86
86
 
87
- async def load_item(self, url: str) -> SeriesInfo:
88
- # Note: Handling both Movie and Series logic in one, returning SeriesInfo generally or MovieInfo
87
+ async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
89
88
  resp = await self.httpx.get(url)
90
89
  sel = HTMLHelper(resp.text)
91
- html_text = resp.text
92
-
93
- title = sel.select_text("h1.text-white")
94
-
95
- poster = sel.select_attr("div.w-full.page-top img", "src")
96
-
97
- description = sel.select_text("div.mt-2.text-sm")
98
-
99
- # Tags - genre bilgileri (Detaylar bölümünde)
100
- tags = []
101
- genre_text = sel.select_text("h3.text-white.opacity-90")
102
- if genre_text:
103
- tags = [t.strip() for t in genre_text.split(",")]
104
-
105
- # Rating
106
- rating = sel.select_text("span.text-white.text-sm.font-bold")
107
-
108
- # Year ve Actors - Detaylar (Details) bölümünden
109
- year = None
110
- actors = []
90
+
91
+ next_data_text = sel.select_text("script#__NEXT_DATA__")
92
+ if not next_data_text:
93
+ return SeriesInfo(url=url, title=sel.select_text("h1") or "Bilinmeyen")
111
94
 
112
- # Detaylar bölümündeki tüm flex-col div'leri al
113
- detail_items = sel.select("div.flex.flex-col")
114
- for item in detail_items:
115
- label = sel.select_text("span.text-base", item)
116
- value = sel.select_text("span.text-sm.opacity-90", item)
95
+ try:
96
+ next_data = json.loads(next_data_text)
97
+ secure_data_raw = next_data["props"]["pageProps"]["secureData"]
98
+ secure_data = json.loads(base64.b64decode(secure_data_raw).decode('utf-8'))
117
99
 
118
- label = label if label else None
119
- value = value if value else None
100
+ content_item = secure_data.get("contentItem", {})
101
+ content = secure_data.get("content", {}).get("result", {})
120
102
 
121
- if label and value:
122
- # Yayın tarihi (yıl)
123
- if label == "Yayın tarihi":
124
- # "16 Ekim 2018" formatından yılı çıkar
125
- year = HTMLHelper(value).regex_first(r'\d{4}')
126
- # Yaratıcılar veya Oyuncular
127
- elif label in ["Yaratıcılar", "Oyuncular"]:
128
- if value:
129
- actors.append(value)
130
-
131
- # Check urls for episodes
132
- all_urls = HTMLHelper(html_text).regex_all(r'"url":"([^"]*)"')
133
- is_series = any("bolum-" in u for u in all_urls)
134
-
135
- episodes = []
136
- if is_series:
137
- # Dict kullanarak duplicate'leri önle ama sıralı tut
138
- episodes_dict = {}
139
- for u in all_urls:
140
- if "bolum" in u and u not in episodes_dict:
141
- season = HTMLHelper(u).regex_first(r'/sezon-(\d+)')
142
- ep_num = HTMLHelper(u).regex_first(r'/bolum-(\d+)')
143
-
144
- season = int(season) if season else 1
145
- episode_num = int(ep_num) if ep_num else 1
146
-
147
- # Key olarak (season, episode) tuple kullan
148
- key = (season, episode_num)
149
- episodes_dict[key] = Episode(
150
- season = season,
151
- episode = episode_num,
152
- title = f"{season}. Sezon {episode_num}. Bölüm",
153
- url = self.fix_url(u)
154
- )
155
-
156
- # Sıralı liste oluştur
157
- episodes = [episodes_dict[key] for key in sorted(episodes_dict.keys())]
158
-
159
- return SeriesInfo(
160
- title = title,
161
- url = url,
162
- poster = self.fix_url(poster) if poster else None,
163
- description = description,
164
- tags = tags,
165
- rating = rating,
166
- actors = actors,
167
- episodes = episodes,
168
- year = year
169
- )
103
+ title = content_item.get("original_title") or content_item.get("culture_title")
104
+ poster = content_item.get("poster_url") or content_item.get("face_url")
105
+ description = content_item.get("description")
106
+ rating = str(content_item.get("imdb_point") or "")
107
+ year = str(content_item.get("release_year") or "")
108
+ tags = content_item.get("categories", "").split(",")
109
+
110
+ # Actors extraction from getSerieCastsById or getMovieCastsById
111
+ actors = []
112
+ casts_data = content.get("getSerieCastsById") or content.get("getMovieCastsById")
113
+ if casts_data and casts_data.get("result"):
114
+ actors = [cast.get("name") for cast in casts_data["result"] if cast.get("name")]
115
+
116
+ # Episodes extraction
117
+ episodes = []
118
+ if "Series" in str(content.get("FindedType")):
119
+ # Check for episodes in SecureData -> RelatedResults -> getEpisodeSources (this might be for the current episode)
120
+ # Usually full episode list isn't in secureData, but we can get it from HTML or another API
121
+ # However, many times Next.js pages have them in props
122
+ # Let's fallback to the previous regex method for episodes if not in JSON
123
+ all_urls = HTMLHelper(resp.text).regex_all(r'"url":"([^"]*)"')
124
+ episodes_dict = {}
125
+ for u in all_urls:
126
+ if "bolum" in u and u not in episodes_dict:
127
+ s_match = HTMLHelper(u).regex_first(r'/sezon-(\d+)')
128
+ e_match = HTMLHelper(u).regex_first(r'/bolum-(\d+)')
129
+ s_val = int(s_match) if s_match else 1
130
+ e_val = int(e_match) if e_match else 1
131
+ episodes_dict[(s_val, e_val)] = Episode(
132
+ season = s_val,
133
+ episode = e_val,
134
+ title = f"{s_val}. Sezon {e_val}. Bölüm",
135
+ url = self.fix_url(u)
136
+ )
137
+ episodes = [episodes_dict[key] for key in sorted(episodes_dict.keys())]
138
+
139
+ return SeriesInfo(
140
+ url = url,
141
+ poster = self.fix_url(poster) if poster else None,
142
+ title = self.clean_title(title),
143
+ description = description,
144
+ tags = tags,
145
+ rating = rating,
146
+ year = year,
147
+ actors = actors,
148
+ episodes = episodes
149
+ )
150
+ else:
151
+ return MovieInfo(
152
+ url = url,
153
+ poster = self.fix_url(poster) if poster else None,
154
+ title = self.clean_title(title),
155
+ description = description,
156
+ tags = tags,
157
+ rating = rating,
158
+ year = year,
159
+ actors = actors
160
+ )
161
+
162
+ except Exception:
163
+ # Fallback to simple extraction if JSON parsing fails
164
+ return SeriesInfo(
165
+ url = url,
166
+ title = self.clean_title(sel.select_text("h1")) or "Bilinmeyen"
167
+ )
170
168
 
171
169
  async def load_links(self, url: str) -> list[ExtractResult]:
172
170
  resp = await self.httpx.get(url)
@@ -184,76 +184,99 @@ class SelcukFlix(PluginBase):
184
184
  async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
185
185
  resp = await self.httpx.get(url)
186
186
  sel = HTMLHelper(resp.text)
187
+
188
+ next_data_text = sel.select_text("script#__NEXT_DATA__")
189
+ if not next_data_text:
190
+ return SeriesInfo(url=url, title=sel.select_text("h1") or "Bilinmeyen")
187
191
 
188
- next_data = sel.select_text("script#__NEXT_DATA__")
189
- if not next_data:
190
- return None
191
-
192
- data = json.loads(next_data)
193
- secure_data = data["props"]["pageProps"]["secureData"]
194
- raw_data = base64.b64decode(secure_data.replace('"', ''))
195
192
  try:
196
- decoded_str = raw_data.decode('utf-8')
197
- except UnicodeDecodeError:
198
- decoded_str = raw_data.decode('iso-8859-1')
199
-
200
- content_details = json.loads(decoded_str)
201
- item = content_details.get("contentItem", {})
202
-
203
- title = item.get("original_title") or item.get("originalTitle") or ""
204
- poster = self.clean_image_url(item.get("poster_url") or item.get("posterUrl"))
205
- description = item.get("description") or item.get("used_description")
206
- rating = str(item.get("imdb_point") or item.get("imdbPoint", ""))
207
- year = item.get("release_year") or item.get("releaseYear")
208
- duration = item.get("total_minutes") or item.get("totalMinutes")
209
-
210
- series_data = content_details.get("relatedData", {}).get("seriesData")
211
- if not series_data and "RelatedResults" in content_details:
212
- series_data = content_details["RelatedResults"].get("getSerieSeasonAndEpisodes", {}).get("result")
213
- if series_data and isinstance(series_data, list):
214
- pass
215
-
216
- # Dizi mi film mi kontrol et (Kotlin referansı)
217
- if series_data:
218
- episodes = []
219
- seasons_list = []
220
- if isinstance(series_data, dict):
221
- seasons_list = series_data.get("seasons", [])
222
- elif isinstance(series_data, list):
223
- seasons_list = series_data
224
-
225
- for season in seasons_list:
226
- if not isinstance(season, dict): continue
227
- s_no = season.get("season_no") or season.get("seasonNo")
228
- ep_list = season.get("episodes", [])
229
- for ep in ep_list:
230
- episodes.append(Episode(
231
- season = s_no,
232
- episode = ep.get("episode_no") or ep.get("episodeNo"),
233
- title = ep.get("ep_text") or ep.get("epText"),
234
- url = self.fix_url(ep.get("used_slug") or ep.get("usedSlug"))
235
- ))
193
+ next_data = json.loads(next_data_text)
194
+ secure_data_raw = next_data["props"]["pageProps"].get("secureData")
195
+ if not secure_data_raw:
196
+ return SeriesInfo(url=url, title=sel.select_text("h1") or "Bilinmeyen")
236
197
 
237
- return SeriesInfo(
238
- title = title,
239
- url = url,
240
- poster = poster,
241
- description = description,
242
- rating = rating,
243
- year = year,
244
- episodes = episodes
245
- )
246
- else:
247
- # Film ise MovieInfo döndür
248
- return MovieInfo(
249
- title = title,
250
- url = url,
251
- poster = poster,
252
- description = description,
253
- rating = rating,
254
- year = year,
255
- duration = duration
256
- )
198
+ # Clean possible quotes from string before decoding
199
+ if isinstance(secure_data_raw, str):
200
+ secure_data_raw = secure_data_raw.strip('"')
201
+
202
+ decoded_str = base64.b64decode(secure_data_raw).decode('utf-8')
203
+ content_details = json.loads(decoded_str)
204
+
205
+ # Sometimes content_details might be a string (double encoded)
206
+ if isinstance(content_details, str):
207
+ content_details = json.loads(content_details)
208
+
209
+ print(f"DEBUG: type(content_details)={type(content_details)}")
210
+ item = content_details.get("contentItem", {})
211
+ print(f"DEBUG: type(item)={type(item)}")
212
+ related_results = content_details.get("RelatedResults", {})
213
+
214
+ title = item.get("original_title") or item.get("culture_title") or item.get("originalTitle") or ""
215
+ poster = self.clean_image_url(item.get("poster_url") or item.get("posterUrl") or item.get("face_url"))
216
+ description = item.get("description") or item.get("used_description")
217
+ rating = str(item.get("imdb_point") or item.get("imdbPoint") or "")
218
+ year = str(item.get("release_year") or item.get("releaseYear") or "")
219
+ duration = item.get("total_minutes") or item.get("totalMinutes")
220
+
221
+ tags = []
222
+ tags_raw = item.get("category_names") or item.get("categoryNames") or item.get("categories")
223
+ if isinstance(tags_raw, str):
224
+ tags = [t.strip() for t in tags_raw.split(",")]
225
+ elif isinstance(tags_raw, list):
226
+ tags = [c.get("title") if isinstance(c, dict) else str(c) for c in tags_raw]
227
+
228
+ actors = []
229
+ actors_raw = item.get("actor_names") or item.get("actorNames")
230
+ if isinstance(actors_raw, str):
231
+ actors = [a.strip() for a in actors_raw.split(",")]
232
+
233
+ # Casts from RelatedResults
234
+ casts_data = related_results.get("getSerieCastsById") or related_results.get("getMovieCastsById")
235
+ if casts_data and isinstance(casts_data, dict) and casts_data.get("result"):
236
+ actors = [cast.get("name") for cast in casts_data["result"] if cast.get("name")]
237
+
238
+ series_data = related_results.get("getSerieSeasonAndEpisodes")
239
+ if series_data and isinstance(series_data, dict) and series_data.get("result"):
240
+ episodes = []
241
+ for season in series_data["result"]:
242
+ s_no = season.get("season_no") or season.get("seasonNo") or 1
243
+ for ep in season.get("episodes", []):
244
+ ep_slug = ep.get("used_slug") or ep.get("usedSlug")
245
+ if ep_slug:
246
+ episodes.append(Episode(
247
+ season = s_no,
248
+ episode = ep.get("episode_no") or ep.get("episodeNo") or 1,
249
+ title = ep.get("ep_text") or ep.get("epText") or "",
250
+ url = self.fix_url(ep_slug)
251
+ ))
252
+
253
+ return SeriesInfo(
254
+ url = url,
255
+ poster = poster,
256
+ title = self.clean_title(title),
257
+ description = description,
258
+ tags = tags,
259
+ rating = rating,
260
+ year = year,
261
+ actors = actors,
262
+ duration = duration,
263
+ episodes = episodes
264
+ )
265
+ else:
266
+ return MovieInfo(
267
+ url = url,
268
+ poster = poster,
269
+ title = self.clean_title(title),
270
+ description = description,
271
+ tags = tags,
272
+ rating = rating,
273
+ year = year,
274
+ actors = actors,
275
+ duration = duration
276
+ )
277
+
278
+ except Exception:
279
+ return SeriesInfo(url=url, title=self.clean_title(sel.select_text("h1")) or "Bilinmeyen")
257
280
 
258
281
  async def load_links(self, url: str) -> list[ExtractResult]:
259
282
  resp = await self.httpx.get(url)
@@ -33,8 +33,8 @@ class SetFilmIzle(PluginBase):
33
33
  f"{main_url}/tur/western/" : "Western"
34
34
  }
35
35
 
36
- def _get_nonce(self, nonce_type: str = "video_nonce", referer: str = None) -> str:
37
- """Site cache'lenmiş nonce'ları expire olabiliyor, fresh nonce al"""
36
+ def _get_nonce(self, nonce_type: str = "video", referer: str = None) -> str:
37
+ """Site cache'lenmiş nonce'ları expire olabiliyor, fresh nonce al veya sayfadan çek"""
38
38
  try:
39
39
  resp = self.cloudscraper.post(
40
40
  f"{self.main_url}/wp-admin/admin-ajax.php",
@@ -45,8 +45,19 @@ class SetFilmIzle(PluginBase):
45
45
  },
46
46
  data = "action=st_cache_refresh_nonces"
47
47
  )
48
- nonces = resp.json().get("data", {}).get("nonces", {})
49
- return nonces.get(nonce_type, "")
48
+ data = resp.json()
49
+ if data and data.get("success"):
50
+ nonces = data.get("data", {}).get("nonces", {})
51
+ return nonces.get(nonce_type if nonce_type != "search" else "dt_ajax_search", "")
52
+ except:
53
+ pass
54
+
55
+ # AJAX başarısızsa sayfadan çekmeyi dene
56
+ try:
57
+ main_resp = self.cloudscraper.get(referer or self.main_url)
58
+ # STMOVIE_AJAX = { ... nonces: { search: "...", ... } }
59
+ nonce = HTMLHelper(main_resp.text).regex_first(rf'"{nonce_type}":\s*"([^"]+)"')
60
+ return nonce or ""
50
61
  except:
51
62
  return ""
52
63
 
@@ -112,7 +123,7 @@ class SetFilmIzle(PluginBase):
112
123
  return results
113
124
 
114
125
  async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
115
- istek = await self.httpx.get(url)
126
+ istek = self.cloudscraper.get(url)
116
127
  secici = HTMLHelper(istek.text)
117
128
  html_text = istek.text
118
129
 
@@ -123,15 +134,21 @@ class SetFilmIzle(PluginBase):
123
134
 
124
135
  description = secici.select_text("div.wp-content p")
125
136
 
126
- year_text = secici.select_text("div.extra span.C a")
127
- year = secici.regex_first(r"\d{4}", year_text) if year_text else None
137
+ rating = secici.select_text("b#repimdb strong") or secici.regex_first(r'repimdb"><strong>\s*([^<]+)</strong>', html_text)
138
+
139
+ # Yıl için info bölümünden veya regex ile yakala
140
+ year = secici.regex_first(r'(\d{4})', secici.select_text("div.extra span.valor") or secici.select_text("span.valor") or "")
141
+ if not year:
142
+ year = secici.regex_first(r'<span>(\d{4})</span>', html_text) or secici.regex_first(r'(\d{4})', html_text)
128
143
 
129
144
  tags = [a.text(strip=True) for a in secici.select("div.sgeneros a") if a.text(strip=True)]
130
145
 
131
146
  duration_text = secici.select_text("span.runtime")
132
147
  duration = int(secici.regex_first(r"\d+", duration_text)) if duration_text and secici.regex_first(r"\d+", duration_text) else None
133
148
 
134
- actors = [span.text(strip=True) for span in secici.select("span.valor a > span") if span.text(strip=True)]
149
+ actors = [a.text(strip=True) for a in secici.select("span.valor a") if "/oyuncu/" in (a.attrs.get("href") or "")]
150
+ if not actors:
151
+ actors = secici.regex_all(r'href="[^"]*/oyuncu/[^"]*">([^<]+)</a>')
135
152
 
136
153
  trailer = None
137
154
  if trailer_id := secici.regex_first(r'embed/([^?]*)\?rel', html_text):
@@ -179,6 +196,7 @@ class SetFilmIzle(PluginBase):
179
196
  title = title,
180
197
  description = description,
181
198
  tags = tags,
199
+ rating = rating,
182
200
  year = year,
183
201
  duration = duration,
184
202
  actors = actors,
@@ -191,6 +209,7 @@ class SetFilmIzle(PluginBase):
191
209
  title = title,
192
210
  description = description,
193
211
  tags = tags,
212
+ rating = rating,
194
213
  year = year,
195
214
  duration = duration,
196
215
  actors = actors
@@ -90,14 +90,17 @@ class YabanciDizi(PluginBase):
90
90
  poster = sel.select_attr("meta[property='og:image']", "content")
91
91
  description = sel.select_text("p#tv-series-desc")
92
92
 
93
- year = sel.select_text("td div.truncate")
94
- if year:
95
- year = year.strip()
93
+ # Try to extract year from table first
94
+ year_cell = sel.select_text("td div.truncate")
95
+ year = None
96
+ if year_cell:
97
+ year_match = sel.regex_first(r"(\d{4})", year_cell)
98
+ if year_match:
99
+ year = year_match
96
100
 
97
101
  tags = []
98
102
  rating = None
99
103
  duration = None
100
- year = None
101
104
  actors = []
102
105
  for item in sel.select("div.item"):
103
106
  text = item.text(strip=True)
@@ -174,13 +177,14 @@ class YabanciDizi(PluginBase):
174
177
  )
175
178
 
176
179
  async def load_links(self, url: str) -> list[ExtractResult]:
177
- resp = await self.httpx.get(url, headers={"Referer": f"{self.main_url}/"})
180
+ # Use cloudscraper to bypass Cloudflare
181
+ resp = self.cloudscraper.get(url, headers={"Referer": f"{self.main_url}/"})
178
182
  sel = HTMLHelper(resp.text)
179
183
 
180
184
  results = []
181
185
 
182
- # Method 1: alternatives-for-this
183
- for alt in sel.select("div.alternatives-for-this div.item:not(.active)"):
186
+ # Method 1: alternatives-for-this (include active too)
187
+ for alt in sel.select("div.alternatives-for-this div.item"):
184
188
  data_hash = alt.attrs.get("data-hash")
185
189
  data_link = alt.attrs.get("data-link")
186
190
  q_type = alt.attrs.get("data-querytype")
@@ -188,7 +192,7 @@ class YabanciDizi(PluginBase):
188
192
  if not data_hash or not data_link: continue
189
193
 
190
194
  try:
191
- post_resp = await self.httpx.post(
195
+ post_resp = self.cloudscraper.post(
192
196
  f"{self.main_url}/ajax/service",
193
197
  data = {
194
198
  "link" : data_link,
@@ -219,7 +223,7 @@ class YabanciDizi(PluginBase):
219
223
  data_eid = id_el.attrs.get("data-eid")
220
224
 
221
225
  try:
222
- post_resp = await self.httpx.post(
226
+ post_resp = self.cloudscraper.post(
223
227
  f"{self.main_url}/ajax/service",
224
228
  data = {
225
229
  "e_id" : data_eid,
@@ -244,9 +248,10 @@ class YabanciDizi(PluginBase):
244
248
 
245
249
  return results
246
250
 
247
- async def _fetch_and_extract(self, iframe_url, prefix=""):
251
+ def _fetch_and_extract_sync(self, iframe_url, prefix=""):
252
+ """Synchronous helper for _fetch_and_extract using cloudscraper."""
248
253
  # Initial fetch
249
- resp = await self.httpx.get(
254
+ resp = self.cloudscraper.get(
250
255
  iframe_url,
251
256
  headers = {"Referer": f"{self.main_url}/"},
252
257
  cookies = {"udys": "1760709729873", "level": "1"}
@@ -254,11 +259,12 @@ class YabanciDizi(PluginBase):
254
259
 
255
260
  # Handle "Lütfen bekleyiniz" check from Kotlin
256
261
  if "Lütfen bekleyiniz" in resp.text:
257
- await asyncio.sleep(1)
258
- timestamp = int(time.time())
262
+ import time as time_module
263
+ time_module.sleep(1)
264
+ timestamp = int(time_module.time())
259
265
  # Retry with t=timestamp as in Kotlin
260
266
  sep = "&" if "?" in iframe_url else "?"
261
- resp = await self.httpx.get(
267
+ resp = self.cloudscraper.get(
262
268
  f"{iframe_url}{sep}t={timestamp}",
263
269
  headers = {"Referer": f"{self.main_url}/"},
264
270
  cookies = resp.cookies # Use cookies from first response
@@ -267,6 +273,11 @@ class YabanciDizi(PluginBase):
267
273
  sel = HTMLHelper(resp.text)
268
274
  final_iframe = sel.select_attr("iframe", "src")
269
275
 
276
+ return final_iframe
277
+
278
+ async def _fetch_and_extract(self, iframe_url, prefix=""):
279
+ final_iframe = self._fetch_and_extract_sync(iframe_url, prefix)
280
+
270
281
  if final_iframe:
271
282
  final_url = self.fix_url(final_iframe)
272
283
  return await self.extract(final_url, referer=f"{self.main_url}/", prefix=prefix)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: KekikStream
3
- Version: 2.3.7
3
+ Version: 2.3.8
4
4
  Summary: terminal üzerinden medya içeriği aramanızı ve VLC/MPV gibi popüler medya oynatıcılar aracılığıyla doğrudan izlemenizi sağlayan modüler ve genişletilebilir bir bıdı bıdı
5
5
  Home-page: https://github.com/keyiflerolsun/KekikStream
6
6
  Author: keyiflerolsun
@@ -29,7 +29,7 @@ KekikStream/Extractors/JetTv.py,sha256=2X1vYDQ0hxBTcpnE_XTcbw9tMS1aXFURcobnPdN8Z
29
29
  KekikStream/Extractors/MailRu.py,sha256=xQVCWwYqNoG5T43VAW1_m0v4e80FbO-1pNPKkwhTccU,1218
30
30
  KekikStream/Extractors/MixPlayHD.py,sha256=u5fUePHfjOI3n7KlNsWhXIv7HA_NMj5bPw1ug-eiXLU,1557
31
31
  KekikStream/Extractors/MixTiger.py,sha256=4VbOYgE4s5H-BGVvJI0AI57M-WBWqnek_LGfCFHAucw,2116
32
- KekikStream/Extractors/MolyStream.py,sha256=SGKr4HdfxDmRk6nPgQUjSbdqFCKWzl7xWxRJtjjFMng,1420
32
+ KekikStream/Extractors/MolyStream.py,sha256=sEyJE8L_49GOVOhkA_FvZjwXhzLhyue0gw3gQT-dW8A,3125
33
33
  KekikStream/Extractors/Odnoklassniki.py,sha256=hajKPhWKiIuu_i441TXrWVORpLo2CdTcoJiyU3WQAuI,4038
34
34
  KekikStream/Extractors/PeaceMakerst.py,sha256=BJ5Cv5X2GEaMTwn_XFpAVVmts1h5xGno3l5rL7Ugob4,2335
35
35
  KekikStream/Extractors/PixelDrain.py,sha256=xPud8W_hqLUXJSU5O-MiCOblcmzrlDJpnEtuxr4ZdI4,1011
@@ -45,30 +45,30 @@ KekikStream/Extractors/TurboImgz.py,sha256=zLGMUwoThZ_LoW0bThyV8TSCcIYHsUme1RTqq
45
45
  KekikStream/Extractors/TurkeyPlayer.py,sha256=zdX0IOO3M-kgAYWex2WwJJu9aGf8WhOY-ZIrRmZRiC0,1246
46
46
  KekikStream/Extractors/VCTPlay.py,sha256=1BCl2_vVIrwvG56LCzl2KE5g2CUaMAhzImOZMdZpZCQ,1377
47
47
  KekikStream/Extractors/VidHide.py,sha256=38ly2Gi44rxIyjeM-6H_HjmwXbKEpSqK4ba16ToQUYs,2707
48
- KekikStream/Extractors/VidMoly.py,sha256=XYgqlyCUdDNN7Naxa_rjX0WoB5zt17Dda_TdNEDpKOE,4974
48
+ KekikStream/Extractors/VidMoly.py,sha256=4k5z68MrUASUuDMEWZ_Ynvp1Z7njjRcXPBZAnpbtGB4,5500
49
49
  KekikStream/Extractors/VidMoxy.py,sha256=dM7yBfrXSESvYyqc2uP_gLSgV61gpIAY940NAQ58Mts,1843
50
50
  KekikStream/Extractors/VidPapi.py,sha256=9y8TN-o4C3JvRyr2V8Ox908tFE1I2BItQLHZlqs8AuI,3175
51
51
  KekikStream/Extractors/VideoSeyred.py,sha256=KJxbJkuupmn4wWBj_ejnoDvmjUXwEXkzStYha3EsSpA,1995
52
52
  KekikStream/Extractors/YTDLP.py,sha256=Hy8loCSFSquu2zaL3INord-Jm6T8CM6K2-VcDA2K79g,7390
53
53
  KekikStream/Extractors/YildizKisaFilm.py,sha256=R_JlrOVeMiDlXYcuTdItnKvidyx8_u3B14fSrxew2aE,1316
54
54
  KekikStream/Plugins/BelgeselX.py,sha256=smoLjEJTdptjb7h4m6LhG7ZUmJQtIhYyi0CUFBsk970,8696
55
- KekikStream/Plugins/DiziBox.py,sha256=KZGWhs6p2-hUTsd-fjz2fsmGEkanL4At2PI8qHAoDm4,10541
56
- KekikStream/Plugins/DiziPal.py,sha256=CTCGlknBUQIzubhvjexQoqiT3sHni34lpxiTLTemCGo,10299
57
- KekikStream/Plugins/DiziWatch.py,sha256=Y5-tBK316WdJhc-OZsYiwjSdfjcdz-A78o_bDP4qu08,8847
58
- KekikStream/Plugins/DiziYou.py,sha256=ZV80_XHv1nN0wRGgJEtnoJcgFX7S_iVSKFGiFlAqcGQ,11277
59
- KekikStream/Plugins/Dizilla.py,sha256=apDLGe3Fd-13nNyhcV_TFQxqX4bOZZZxEEGLonKQzS4,13803
60
- KekikStream/Plugins/FilmBip.py,sha256=pzvleSRZCDHh2tx8Q0JwTFiH9TexNCRnFpr3MCiMb0E,6087
61
- KekikStream/Plugins/FilmMakinesi.py,sha256=WaCQD7tsZdPbeU35SEnBVRZt2SzUiAQOBRBZR6drvQ4,7797
55
+ KekikStream/Plugins/DiziBox.py,sha256=CKfbMAtyE6J7QgD19iryzhD5VJlc0Tz2s4nSYW7XSrg,11354
56
+ KekikStream/Plugins/DiziPal.py,sha256=tHUqAN8UvvzBAkJaGS4hFvdLo-eRO4EdQ_C9HYkj_0U,10576
57
+ KekikStream/Plugins/DiziWatch.py,sha256=NK9xccX4-HwWq67FFVfqDDzKwCDc_HvDJDW2QIvqjig,8900
58
+ KekikStream/Plugins/DiziYou.py,sha256=4KOvxHg-84mUHuHWsXoYlIG2SX4DCV2dm6GblHQ5wGo,11162
59
+ KekikStream/Plugins/Dizilla.py,sha256=PLN0pOkWB4IaGC7Toe-8f5rksmaNm_WfdSFMTAtt--0,13624
60
+ KekikStream/Plugins/FilmBip.py,sha256=40eSECwMHSKTWoUmF90UXxTJkbx6f71J_98Ht4Hnoj8,6352
61
+ KekikStream/Plugins/FilmMakinesi.py,sha256=0bVN28aCEfrxrvXrGyL6XtgipzUKUD9vN2QkHie2gY0,7859
62
62
  KekikStream/Plugins/FilmModu.py,sha256=ou1BrFNR4RQaJdxVqPB5FI8vnQ0UmD-siVdwLnpp7x0,7147
63
63
  KekikStream/Plugins/FullHDFilm.py,sha256=08NF5qEydmxT0rGYDWpTOSIYSad8Uv1H1V8yCKG_568,10525
64
- KekikStream/Plugins/FullHDFilmizlesene.py,sha256=QD1mQwwPFkNoWgBdLaSPFq22S1N1RSKspyyTqz-1Wjk,7145
64
+ KekikStream/Plugins/FullHDFilmizlesene.py,sha256=OpdndVQ7LjZ-sJdILGEqhYX-0D18yRqTS7Kpu-HrXmY,6870
65
65
  KekikStream/Plugins/HDFilmCehennemi.py,sha256=jntMKgE81k_jl3pFzJI3akqvi3g8U961dVx7bj5Pf2w,13140
66
- KekikStream/Plugins/JetFilmizle.py,sha256=I76CCdH8AJAfjPr5jejtndidcGTBMrVfTFNjnZwp9yo,7801
67
- KekikStream/Plugins/KultFilmler.py,sha256=e5kXjGJYiI15PQhSADIGznqX7MlA0lK5HYnfFwx_cpM,9383
68
- KekikStream/Plugins/RecTV.py,sha256=E5ZyWU_lqibwcRm9amb_fqdXpc8qdMkekbHVxY3UmuU,7268
69
- KekikStream/Plugins/RoketDizi.py,sha256=92c3_UFIhM1SkB0Ybnp53A06VtGw2GmXtr-xmiKeJi0,8444
70
- KekikStream/Plugins/SelcukFlix.py,sha256=iHLO52_726gzmKAsqgW5ki2_V16fdGAZVjvaqqnQozY,13601
71
- KekikStream/Plugins/SetFilmIzle.py,sha256=pM4DgR2La9jUacQPRr4dilbfnljcC9l_o1OUipJh3Eg,10418
66
+ KekikStream/Plugins/JetFilmizle.py,sha256=9sH9Z3y4SP8vta9v-gJOQOxFWAQnbZomFea1_G5EbmM,8100
67
+ KekikStream/Plugins/KultFilmler.py,sha256=rvIkd2OXRxuAXHMjiHCRmKrS5m09gy2JoMBgJh7ZIOk,9478
68
+ KekikStream/Plugins/RecTV.py,sha256=p-RELvvD1-gyUgitwosYryH9gRWQTjmn1O9tw-0YInk,8295
69
+ KekikStream/Plugins/RoketDizi.py,sha256=KiX7Xf5IyPPJ-CVcJLM9qc0M6Fi2dhg7zU3EgWkICXA,9318
70
+ KekikStream/Plugins/SelcukFlix.py,sha256=nJ7I5e5vBkn9AbLC_2bSu9bSSgMQeDhCQZBZovK00bc,15299
71
+ KekikStream/Plugins/SetFilmIzle.py,sha256=Z8A_Ivbe65i13RocGZXwmpwrVxNOwdj7Gh3CS2-Fslg,11437
72
72
  KekikStream/Plugins/SezonlukDizi.py,sha256=h8mIglL2ORUklnAvEwH_5z6tT3WYxiNnbkeIvxtGUTE,9751
73
73
  KekikStream/Plugins/SineWix.py,sha256=z0r90lggAugEWE1g9vg8gZsInBObUZPnVFQwq7GYmJs,7052
74
74
  KekikStream/Plugins/Sinefy.py,sha256=ShX13Q-_5KFBobxZufI5V_4zwWvEWfNYuP-g5CkBuww,11099
@@ -76,10 +76,10 @@ KekikStream/Plugins/SinemaCX.py,sha256=11kzAZWgjkitIonDHHiFHMgnViBj-GjyvTXg7k28M
76
76
  KekikStream/Plugins/Sinezy.py,sha256=fUj-3WaJMEsKZRnDpHFPxl5Eq2RPLroY80DcftLqvjM,5743
77
77
  KekikStream/Plugins/SuperFilmGeldi.py,sha256=StW0ue4qDj8p7CiWy19Lfr2aWtfYvslPExZJuR-3xiY,6348
78
78
  KekikStream/Plugins/UgurFilm.py,sha256=H6AA2iTaM0fn6uN8_Dfvr-OqUtM9gDdkg0BKIcZEj7U,4930
79
- KekikStream/Plugins/YabanciDizi.py,sha256=r3jusGf1Ufr0O1O04qQLxcxk3raCI3EFs4Z2Jwva2-s,11444
80
- kekikstream-2.3.7.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
81
- kekikstream-2.3.7.dist-info/METADATA,sha256=725PgUwp-rl5ONFCRfex-qsmSzYu85DOFTqCvvzkz5U,10761
82
- kekikstream-2.3.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
83
- kekikstream-2.3.7.dist-info/entry_points.txt,sha256=dFwdiTx8djyehI0Gsz-rZwjAfZzUzoBSrmzRu9ubjJc,50
84
- kekikstream-2.3.7.dist-info/top_level.txt,sha256=DNmGJDXl27Drdfobrak8KYLmocW_uznVYFJOzcjUgmY,12
85
- kekikstream-2.3.7.dist-info/RECORD,,
79
+ KekikStream/Plugins/YabanciDizi.py,sha256=QXzifSl2JMcVOwkwn2vafYIw1jqB5vBTrf-usvsyMBc,11947
80
+ kekikstream-2.3.8.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
81
+ kekikstream-2.3.8.dist-info/METADATA,sha256=9xG4vCFR9UYkg6ZuhfdW9WEIMlLDVVYZoRp5mJqdqSs,10761
82
+ kekikstream-2.3.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
83
+ kekikstream-2.3.8.dist-info/entry_points.txt,sha256=dFwdiTx8djyehI0Gsz-rZwjAfZzUzoBSrmzRu9ubjJc,50
84
+ kekikstream-2.3.8.dist-info/top_level.txt,sha256=DNmGJDXl27Drdfobrak8KYLmocW_uznVYFJOzcjUgmY,12
85
+ kekikstream-2.3.8.dist-info/RECORD,,