KekikStream 2.4.8__py3-none-any.whl → 2.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of KekikStream might be problematic. Click here for more details.

@@ -1,6 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult, HTMLHelper
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult, HTMLHelper
4
+ import asyncio
4
5
 
5
6
  class JetFilmizle(PluginBase):
6
7
  name = "JetFilmizle"
@@ -42,17 +43,15 @@ class JetFilmizle(PluginBase):
42
43
 
43
44
  results = []
44
45
  for veri in secici.select("article.movie"):
45
- # h2-h6 içindeki a linki
46
46
  title_text = None
47
47
  for h_tag in ["h2", "h3", "h4", "h5", "h6"]:
48
48
  title_text = secici.select_text(f"{h_tag} a", veri)
49
49
  if title_text:
50
50
  break
51
51
 
52
- href = secici.select_attr("a", "href", veri)
53
- poster = secici.select_poster("img", veri)
54
-
55
52
  title = self.clean_title(title_text) if title_text else None
53
+ href = secici.select_attr("a", "href", veri)
54
+ poster = secici.select_poster("img", veri)
56
55
 
57
56
  if title and href:
58
57
  results.append(MainPageResult(
@@ -74,17 +73,15 @@ class JetFilmizle(PluginBase):
74
73
 
75
74
  results = []
76
75
  for article in secici.select("article.movie"):
77
- # h2-h6 içindeki a linki
78
76
  title_text = None
79
77
  for h_tag in ["h2", "h3", "h4", "h5", "h6"]:
80
78
  title_text = secici.select_text(f"{h_tag} a", article)
81
79
  if title_text:
82
80
  break
83
81
 
84
- href = secici.select_attr("a", "href", article)
85
- poster = secici.select_poster("img", article)
86
-
87
82
  title = self.clean_title(title_text) if title_text else None
83
+ href = secici.select_attr("a", "href", article)
84
+ poster = secici.select_poster("img", article)
88
85
 
89
86
  if title and href:
90
87
  results.append(SearchResult(
@@ -126,53 +123,89 @@ class JetFilmizle(PluginBase):
126
123
  rating = rating,
127
124
  year = year,
128
125
  actors = actors,
129
- duration = int(total_minutes) if duration else None
126
+ duration = total_minutes if total_minutes else None
130
127
  )
131
128
 
132
- async def load_links(self, url: str) -> list[ExtractResult]:
133
- istek = await self.httpx.get(url)
134
- secici = HTMLHelper(istek.text)
135
-
129
+ async def _process_source(self, url: str, name: str, html: str | None) -> list[ExtractResult]:
136
130
  results = []
137
-
138
- # 1) Ana iframe'leri kontrol et
139
- for iframe in secici.select("iframe"):
140
- src = (iframe.attrs.get("src") or
141
- iframe.attrs.get("data-src") or
142
- iframe.attrs.get("data-lazy-src"))
143
-
144
- if src and src != "about:blank":
145
- iframe_url = self.fix_url(src)
146
- data = await self.extract(iframe_url)
147
- if data:
148
- results.append(data)
149
-
150
- # 2) Sayfa numaralarından linkleri topla (Fragman hariç)
151
- page_links = []
152
- for link in secici.select("a.post-page-numbers"):
153
- isim = secici.select_text("span", link) or ""
154
- if isim != "Fragman":
155
- href = link.attrs.get("href")
156
- if href:
157
- page_links.append((self.fix_url(href), isim))
158
-
159
- # 3) Her sayfa linkindeki iframe'leri bul
160
- for page_url, isim in page_links:
161
- try:
162
- page_resp = await self.httpx.get(page_url)
163
- page_sel = HTMLHelper(page_resp.text)
164
-
165
- for iframe in page_sel.select("div#movie iframe"):
131
+ try:
132
+ if html:
133
+ secici = HTMLHelper(html)
134
+ else:
135
+ resp = await self.httpx.get(url)
136
+ secici = HTMLHelper(resp.text)
137
+
138
+ # Iframe'leri bul
139
+ container = secici.select_first("div#movie") or secici.select_first("div.film-content")
140
+
141
+ if container:
142
+ for iframe in secici.select("iframe", container):
166
143
  src = (iframe.attrs.get("src") or
167
144
  iframe.attrs.get("data-src") or
168
145
  iframe.attrs.get("data-lazy-src"))
169
-
146
+
170
147
  if src and src != "about:blank":
171
148
  iframe_url = self.fix_url(src)
172
- data = await self.extract(iframe_url, prefix=isim)
149
+ # name_override KULLANMA, extractor kendi ismini versin
150
+ # Sonra biz düzenleriz
151
+ data = await self.extract(iframe_url)
152
+
173
153
  if data:
174
- results.append(data)
175
- except Exception:
176
- continue
154
+ items = data if isinstance(data, list) else [data]
155
+
156
+ for item in items:
157
+ # Sadece kalite bilgisi içeriyorsa ekle, yoksa sadece buton adını kullan
158
+ # Özellikle Zeus için kalite önemli (1080p, 720p)
159
+ # Diğerlerinde plugin adı (Apollo, JetPlay vb.) önemsiz
160
+
161
+ # Kalite kontrolü (basitçe)
162
+ quality_indicators = ["1080p", "720p", "480p", "360p", "240p", "144p", "4k", "2k"]
163
+ has_quality = any(q in item.name.lower() for q in quality_indicators)
164
+
165
+ if has_quality:
166
+ # Buton Adı | Extractor Adı (Kalite içerdiği için)
167
+ # Örn: Zeus | 1080p
168
+ # Eğer Extractor adı zaten Buton adını içeriyorsa (Zeus | 1080p -> Zeus) tekrar ekleme
169
+ if name.lower() not in item.name.lower():
170
+ item.name = f"{name} | {item.name}"
171
+ else:
172
+ # Kalite yoksa sadece Buton adını kullan
173
+ # Örn: Apollo | JetTv -> JetTv
174
+ item.name = name
175
+
176
+ results.append(item)
177
+ return results
178
+ except Exception:
179
+ return []
177
180
 
178
- return results
181
+ async def load_links(self, url: str) -> list[ExtractResult]:
182
+ istek = await self.httpx.get(url)
183
+ secici = HTMLHelper(istek.text)
184
+
185
+ sources = []
186
+ if film_part := secici.select_first("div.film_part"):
187
+ # Tüm spanları gez
188
+ for span in secici.select("span", film_part):
189
+ # Eğer bu span bir <a> etiketi içinde değilse, aktif kaynaktır
190
+ if span.parent.tag != "a":
191
+ name = span.text(strip=True)
192
+ if name:
193
+ sources.append((url, name, istek.text)) # html content var
194
+ break
195
+
196
+ # Diğer kaynak linkleri
197
+ for link in secici.select("a.post-page-numbers", film_part):
198
+ name = secici.select_text("span", link) or link.text(strip=True)
199
+ href = link.attrs.get("href")
200
+ if name != "Fragman" and href:
201
+ sources.append((self.fix_url(href), name, None)) # html yok, çekilecek
202
+
203
+ # Eğer film_part yoksa, sadece mevcut sayfayı tara (Tek part olabilir)
204
+ if not sources:
205
+ sources.append((url, "JetFilmizle", istek.text))
206
+
207
+ tasks = []
208
+ for page_url, source_name, html_content in sources:
209
+ tasks.append(self._process_source(page_url, source_name, html_content))
210
+
211
+ return [item for sublist in await asyncio.gather(*tasks) for item in sublist]
@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, Subtitle, HTMLHelper
4
- import base64
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, Subtitle, HTMLHelper
4
+ import base64, asyncio, contextlib
5
5
 
6
6
  class KultFilmler(PluginBase):
7
7
  name = "KultFilmler"
@@ -99,91 +99,159 @@ class KultFilmler(PluginBase):
99
99
  episodes.append(Episode(season=s or 1, episode=e or 1, title=name, url=self.fix_url(href)))
100
100
 
101
101
  return SeriesInfo(
102
- url=url, poster=poster, title=title, description=description,
103
- tags=tags, year=year, actors=actors, rating=rating, episodes=episodes
102
+ url = url,
103
+ poster = poster,
104
+ title = title,
105
+ description = description,
106
+ tags = tags,
107
+ year = year,
108
+ actors = actors,
109
+ rating = rating,
110
+ episodes = episodes
104
111
  )
105
112
 
106
113
  return MovieInfo(
107
- url=url, poster=poster, title=title, description=description,
108
- tags=tags, year=year, rating=rating, actors=actors, duration=duration
114
+ url = url,
115
+ poster = poster,
116
+ title = title,
117
+ description = description,
118
+ tags = tags,
119
+ year = year,
120
+ rating = rating,
121
+ actors = actors,
122
+ duration = duration
109
123
  )
110
124
 
111
- def _get_iframe(self, source_code: str) -> str:
112
- """Base64 kodlu iframe'i çözümle"""
113
- atob = HTMLHelper(source_code).regex_first(r"PHA\+[0-9a-zA-Z+/=]*")
114
- if not atob:
115
- return ""
125
+ def _decode_iframe(self, content: str) -> str | None:
126
+ """Base64 kodlanmış iframe verisini çözer"""
127
+ match = HTMLHelper(content).regex_first(r"PHA\+[0-9a-zA-Z+/=]*")
128
+ if not match:
129
+ return None
116
130
 
117
- # Padding düzelt
118
- padding = 4 - len(atob) % 4
119
- if padding < 4:
120
- atob = atob + "=" * padding
131
+ # Base64 Padding Fix
132
+ pad = len(match) % 4
133
+ if pad:
134
+ match += "=" * (4 - pad)
121
135
 
122
136
  try:
123
- decoded = base64.b64decode(atob).decode("utf-8")
124
- secici = HTMLHelper(decoded)
125
- iframe_src = secici.select_attr("iframe", "src")
126
- return self.fix_url(iframe_src) if iframe_src else ""
137
+ decoded = base64.b64decode(match).decode("utf-8")
138
+ src = HTMLHelper(decoded).select_attr("iframe", "src")
139
+ return self.fix_url(src) if src else None
127
140
  except Exception:
128
- return ""
141
+ return None
129
142
 
130
- def _extract_subtitle_url(self, source_code: str) -> str | None:
131
- """Altyazı URL'sini çıkar"""
132
- return HTMLHelper(source_code).regex_first(r"(https?://[^\s\"]+\.srt)")
143
+ async def _resolve_alt_page(self, url: str, title: str) -> tuple[str | None, str]:
144
+ """Alternatif sayfa kaynak kodunu indirip iframe'i bulur"""
145
+ try:
146
+ res = await self.httpx.get(url)
147
+ return self._decode_iframe(res.text), title
148
+ except Exception:
149
+ return None, title
150
+
151
+ async def _extract_stream(self, iframe_url: str, title: str, subtitles: list[Subtitle]) -> list[ExtractResult]:
152
+ """Iframe üzerinden stream linklerini ayıklar"""
153
+ results = []
154
+
155
+ # 1. VidMoly Özel Çözümleme(M3U)
156
+ if "vidmoly" in iframe_url:
157
+ with contextlib.suppress(Exception):
158
+ res = await self.httpx.get(
159
+ url = iframe_url,
160
+ headers = {
161
+ "User-Agent" : "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36",
162
+ "Sec-Fetch-Dest" : "iframe"
163
+ }
164
+ )
165
+ m3u = HTMLHelper(res.text).regex_first(r'file:"([^"]+)"')
166
+
167
+ if m3u:
168
+ results.append(ExtractResult(
169
+ name = title or "VidMoly",
170
+ url = m3u,
171
+ referer = self.main_url,
172
+ subtitles = subtitles
173
+ ))
174
+
175
+ return results
176
+
177
+ # 2. Genel Extractor Kullanımı
178
+ with contextlib.suppress(Exception):
179
+ extracted = await self.extract(iframe_url)
180
+ if not extracted:
181
+ return []
182
+
183
+ items = extracted if isinstance(extracted, list) else [extracted]
184
+ for item in items:
185
+ # İsim ve altyazı bilgilerini güncelle
186
+ # Orijinal extractor ismini ezmek için title kullan
187
+ if title:
188
+ item.name = title
189
+
190
+ # Varsa altyazıları ekle
191
+ if subtitles:
192
+ # Copy update daha güvenli (Pydantic model)
193
+ if hasattr(item, "model_copy"):
194
+ item = item.model_copy(update={"subtitles": subtitles})
195
+ else:
196
+ item.subtitles = subtitles
197
+
198
+ results.append(item)
199
+
200
+ return results
133
201
 
134
202
  async def load_links(self, url: str) -> list[ExtractResult]:
135
- istek = await self.httpx.get(url)
136
- secici = HTMLHelper(istek.text)
203
+ response = await self.httpx.get(url)
204
+ source = response.text
205
+ helper = HTMLHelper(source)
137
206
 
138
- iframes = set()
207
+ # Altyazı Bul
208
+ sub_url = helper.regex_first(r"(https?://[^\s\"]+\.srt)")
209
+ subtitles = [Subtitle(name="Türkçe", url=sub_url)] if sub_url else []
139
210
 
140
- # Ana iframe
141
- main_frame = self._get_iframe(istek.text)
142
- if main_frame:
143
- iframes.add(main_frame)
211
+ # İşlenecek kaynakları topla: (Iframe_URL, Başlık)
212
+ sources = []
144
213
 
145
- # Alternatif player'lar
146
- for player in secici.select("div.container#player"):
147
- iframe_src = secici.select_attr("iframe", "src", player)
148
- alt_iframe = self.fix_url(iframe_src) if iframe_src else None
149
- if alt_iframe:
150
- alt_istek = await self.httpx.get(alt_iframe)
151
- alt_frame = self._get_iframe(alt_istek.text)
152
- if alt_frame:
153
- iframes.add(alt_frame)
214
+ # A) Ana Player
215
+ main_iframe = self._decode_iframe(source)
216
+ if main_iframe:
217
+ p_name = helper.select_text("div.parts-middle div.part.active div.part-name") or None
218
+ p_lang = helper.select_attr("div.parts-middle div.part.active div.part-lang span", "title")
219
+ full_title = f"{p_name} | {p_lang}" if p_lang else p_name
220
+ sources.append((main_iframe, full_title))
154
221
 
155
- results = []
222
+ # B) Alternatif Playerlar (Link Çözümleme Gerektirir)
223
+ alt_tasks = []
224
+ for link in helper.select("div.parts-middle a.post-page-numbers"):
225
+ href = link.attrs.get("href")
226
+ if not href:
227
+ continue
156
228
 
157
- for iframe in iframes:
158
- subtitles = []
229
+ a_name = helper.select_text("div.part-name", link) or "Alternatif"
230
+ a_lang = helper.select_attr("div.part-lang span", "title", link)
231
+ full_title = f"{a_name} | {a_lang}" if a_lang else a_name
159
232
 
160
- # VidMoly özel işleme
161
- if "vidmoly" in iframe:
162
- headers = {
163
- "User-Agent" : "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36",
164
- "Sec-Fetch-Dest" : "iframe"
165
- }
166
- iframe_istek = await self.httpx.get(iframe, headers=headers)
167
- m3u_match = HTMLHelper(iframe_istek.text).regex_first(r'file:"([^"]+)"')
233
+ alt_tasks.append(self._resolve_alt_page(self.fix_url(href), full_title))
168
234
 
169
- if m3u_match:
170
- results.append(ExtractResult(
171
- name = "VidMoly",
172
- url = m3u_match,
173
- referer = self.main_url,
174
- subtitles = []
175
- ))
176
- continue
235
+ if alt_tasks:
236
+ resolved_alts = await asyncio.gather(*alt_tasks)
237
+ for iframe, title in resolved_alts:
238
+ if iframe:
239
+ sources.append((iframe, title))
177
240
 
178
- # Altyazı çıkar
179
- subtitle_url = self._extract_subtitle_url(url)
180
- if subtitle_url:
181
- subtitles.append(Subtitle(name="Türkçe", url=subtitle_url))
241
+ # 3. Tüm kaynakları paralel işle (Extract)
242
+ if not sources:
243
+ return []
182
244
 
183
- data = await self.extract(iframe)
184
- if data:
185
- # ExtractResult objesi immutable, yeni bir kopya oluştur
186
- updated_data = data.model_copy(update={"subtitles": subtitles}) if subtitles else data
187
- results.append(updated_data)
245
+ extract_tasks = [
246
+ self._extract_stream(iframe, title, subtitles)
247
+ for iframe, title in sources
248
+ ]
188
249
 
189
- return results
250
+ results_groups = await asyncio.gather(*extract_tasks)
251
+
252
+ # Sonuçları düzleştir
253
+ final_results = []
254
+ for group in results_groups:
255
+ final_results.extend(group)
256
+
257
+ return final_results
@@ -2,7 +2,7 @@
2
2
 
3
3
  from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, Episode, SeriesInfo, ExtractResult, HTMLHelper
4
4
  from json import dumps, loads
5
- import re
5
+ import re, contextlib
6
6
 
7
7
  class RecTV(PluginBase):
8
8
  name = "RecTV"
@@ -73,11 +73,10 @@ class RecTV(PluginBase):
73
73
  duration_raw = veri.get("duration")
74
74
  duration = None
75
75
  if duration_raw:
76
- try:
76
+ with contextlib.suppress(Exception):
77
77
  h = int(HTMLHelper(duration_raw).regex_first(r"(\d+)h") or 0)
78
78
  m = int(HTMLHelper(duration_raw).regex_first(r"(\d+)min") or 0)
79
79
  duration = h * 60 + m
80
- except: pass
81
80
 
82
81
  common_info = {
83
82
  "url" : url,
@@ -110,10 +109,15 @@ class RecTV(PluginBase):
110
109
  tag = " (Altyazı)"; clean_s = re.sub(r"\s*altyaz[ıi]\s*", "", s_title, flags=re.I).strip()
111
110
 
112
111
  ep_data = {"url": self.fix_url(source.get("url")), "title": f"{veri.get('title')} | {s_title} {e_title} - {source.get('title')}", "is_episode": True}
113
- episodes.append(Episode(season=s or 1, episode=e or 1, title=f"{clean_s} {e_title}{tag} - {source.get('title')}", url=dumps(ep_data)))
112
+ episodes.append(Episode(
113
+ season = s or 1,
114
+ episode = e or 1,
115
+ title = f"{clean_s} {e_title}{tag} - {source.get('title')}",
116
+ url = dumps(ep_data)
117
+ ))
114
118
 
115
119
  return SeriesInfo(**common_info, episodes=episodes, actors=[])
116
-
120
+
117
121
  return MovieInfo(**common_info, actors=[])
118
122
 
119
123
  async def load_links(self, url: str) -> list[ExtractResult]:
@@ -1,6 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, MovieInfo, HTMLHelper
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, MovieInfo, HTMLHelper
4
4
  import base64, json
5
5
 
6
6
  class RoketDizi(PluginBase):
@@ -26,8 +26,6 @@ class RoketDizi(PluginBase):
26
26
  secici = HTMLHelper(istek.text)
27
27
 
28
28
  results = []
29
-
30
- # Use div.new-added-list to find the container, then get items
31
29
  for item in secici.select("div.new-added-list > span"):
32
30
  title = secici.select_text("span.line-clamp-1", item)
33
31
  href = secici.select_attr("a", "href", item)
@@ -52,7 +50,7 @@ class RoketDizi(PluginBase):
52
50
  "Referer" : f"{self.main_url}/",
53
51
  }
54
52
  )
55
-
53
+
56
54
  try:
57
55
  veri = istek.json()
58
56
  encoded = veri.get("response", "")
@@ -87,39 +85,33 @@ class RoketDizi(PluginBase):
87
85
  async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
88
86
  resp = await self.httpx.get(url)
89
87
  sel = HTMLHelper(resp.text)
90
-
88
+
91
89
  next_data_text = sel.select_text("script#__NEXT_DATA__")
92
90
  if not next_data_text:
93
91
  return SeriesInfo(url=url, title=sel.select_text("h1") or "Bilinmeyen")
94
92
 
95
93
  try:
96
- next_data = json.loads(next_data_text)
94
+ next_data = json.loads(next_data_text)
97
95
  secure_data_raw = next_data["props"]["pageProps"]["secureData"]
98
- secure_data = json.loads(base64.b64decode(secure_data_raw).decode('utf-8'))
99
-
96
+ secure_data = json.loads(base64.b64decode(secure_data_raw).decode('utf-8'))
97
+
100
98
  content_item = secure_data.get("contentItem", {})
101
99
  content = secure_data.get("content", {}).get("result", {})
102
-
100
+
103
101
  title = content_item.get("original_title") or content_item.get("culture_title")
104
102
  poster = content_item.get("poster_url") or content_item.get("face_url")
105
103
  description = content_item.get("description")
106
104
  rating = str(content_item.get("imdb_point") or "")
107
105
  year = str(content_item.get("release_year") or "")
108
106
  tags = content_item.get("categories", "").split(",")
109
-
110
- # Actors extraction from getSerieCastsById or getMovieCastsById
107
+
111
108
  actors = []
112
109
  casts_data = content.get("getSerieCastsById") or content.get("getMovieCastsById")
113
110
  if casts_data and casts_data.get("result"):
114
111
  actors = [cast.get("name") for cast in casts_data["result"] if cast.get("name")]
115
112
 
116
- # Episodes extraction
117
113
  episodes = []
118
114
  if "Series" in str(content.get("FindedType")):
119
- # Check for episodes in SecureData -> RelatedResults -> getEpisodeSources (this might be for the current episode)
120
- # Usually full episode list isn't in secureData, but we can get it from HTML or another API
121
- # However, many times Next.js pages have them in props
122
- # Let's fallback to the previous regex method for episodes if not in JSON
123
115
  all_urls = HTMLHelper(resp.text).regex_all(r'"url":"([^"]*)"')
124
116
  episodes_dict = {}
125
117
  for u in all_urls:
@@ -169,17 +161,16 @@ class RoketDizi(PluginBase):
169
161
  async def load_links(self, url: str) -> list[ExtractResult]:
170
162
  resp = await self.httpx.get(url)
171
163
  sel = HTMLHelper(resp.text)
172
-
164
+
173
165
  next_data = sel.select_text("script#__NEXT_DATA__")
174
166
  if not next_data:
175
167
  return []
176
168
 
177
169
  try:
178
- data = json.loads(next_data)
179
- secure_data = data["props"]["pageProps"]["secureData"]
170
+ data = json.loads(next_data)
171
+ secure_data = data["props"]["pageProps"]["secureData"]
180
172
  decoded_json = json.loads(base64.b64decode(secure_data).decode('utf-8'))
181
173
 
182
- # secureData içindeki RelatedResults -> getEpisodeSources -> result dizisini al
183
174
  sources = decoded_json.get("RelatedResults", {}).get("getEpisodeSources", {}).get("result", [])
184
175
 
185
176
  seen_urls = set()
@@ -200,8 +191,8 @@ class RoketDizi(PluginBase):
200
191
  iframe_url = "https://" + iframe_url
201
192
 
202
193
  iframe_url = self.fix_url(iframe_url)
203
-
204
- # Deduplicate
194
+
195
+ # Deduplicate
205
196
  if iframe_url in seen_urls:
206
197
  continue
207
198
  seen_urls.add(iframe_url)