KekikStream 2.4.9__py3-none-any.whl → 2.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -144,14 +144,20 @@ class PluginBase(ABC):
144
144
  if name_override:
145
145
  item.name = name_override
146
146
  elif prefix and item.name:
147
- item.name = f"{prefix} | {item.name}"
147
+ if item.name.lower() in prefix.lower():
148
+ item.name = prefix
149
+ else:
150
+ item.name = f"{prefix} | {item.name}"
148
151
  return data
149
152
 
150
153
  # Tekil öğe ise
151
154
  if name_override:
152
155
  data.name = name_override
153
156
  elif prefix and data.name:
154
- data.name = f"{prefix} | {data.name}"
157
+ if data.name.lower() in prefix.lower():
158
+ data.name = prefix
159
+ else:
160
+ data.name = f"{prefix} | {data.name}"
155
161
 
156
162
  return data
157
163
  except Exception as hata:
@@ -0,0 +1,27 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper
4
+
5
+ class Abstream(ExtractorBase):
6
+ name = "Abstream"
7
+ main_url = "https://abstream.to"
8
+
9
+ async def extract(self, url: str, referer: str = None) -> ExtractResult:
10
+ istek = await self.httpx.get(
11
+ url = url,
12
+ headers = {
13
+ "Accept-Language" : "en-US,en;q=0.5",
14
+ "Referer" : referer or self.main_url,
15
+ }
16
+ )
17
+ secici = HTMLHelper(istek.text)
18
+ video_url = secici.regex_first(r'file:"([^"]*)"')
19
+
20
+ if not video_url:
21
+ raise ValueError(f"Abstream: Video URL bulunamadı. {url}")
22
+
23
+ return ExtractResult(
24
+ name = self.name,
25
+ url = video_url,
26
+ referer = referer or self.main_url
27
+ )
@@ -13,7 +13,8 @@ class Filemoon(ExtractorBase):
13
13
  "filemoon.in",
14
14
  "filemoon.sx",
15
15
  "filemoon.nl",
16
- "filemoon.com"
16
+ "filemoon.com",
17
+ "bysejikuar.com"
17
18
  ]
18
19
 
19
20
  def can_handle_url(self, url: str) -> bool:
@@ -137,23 +137,60 @@ class FilmEkseni(PluginBase):
137
137
  istek = await self.httpx.get(url)
138
138
  secici = HTMLHelper(istek.text)
139
139
 
140
+ # Dil sekmelerini bul (Dublaj, Altyazı vb.)
141
+ # Fragman vb. linkleri dahil etmemek için sadece 'a.nav-link' bakıyoruz
142
+ lang_tabs = [
143
+ tab for tab in secici.select("ul.nav-tabs.nav-slider a.nav-link")
144
+ if "fragman" not in tab.text().lower()
145
+ ]
146
+
147
+ # Player panellerini bul
148
+ tab_panes = secici.select("div.tab-pane")
149
+
140
150
  sources = [] # (name, url, is_active)
141
- if nav_links := secici.select("nav.card-nav a.nav-link"):
142
- seen_urls = set()
143
- for link in nav_links:
144
- if link.attrs.get("href") == "#":
145
- continue # Sinema Modu vb.
146
-
147
- name = link.text(strip=True)
148
- href = link.attrs.get("href")
149
- is_active = "active" in link.attrs.get("class", "")
150
-
151
- if href and href not in seen_urls:
152
- seen_urls.add(href)
153
- sources.append((name, href, is_active))
154
- else:
155
- # Nav yoksa mevcut sayfayı (Varsayılan/VIP) al
156
- sources.append(("VIP", url, True))
151
+
152
+ # Eğer dil sekmeleri ve paneller eşleşiyorsa (ideal durum)
153
+ if lang_tabs and tab_panes:
154
+ for i, pane in enumerate(tab_panes):
155
+ if i >= len(lang_tabs):
156
+ break
157
+
158
+ lang_name = lang_tabs[i].text(strip=True)
159
+ player_links = secici.select("a.nav-link", element=pane)
160
+
161
+ for link in player_links:
162
+ p_name = link.text(strip=True)
163
+ if not p_name or any(x in p_name.lower() for x in ["paylaş", "indir", "hata"]):
164
+ continue
165
+
166
+ href = link.attrs.get("href")
167
+ if not href or href == "#":
168
+ continue
169
+
170
+ # Yeni isim "Moly | Türkçe Dublaj"
171
+ full_name = f"{p_name} | {lang_name}"
172
+ is_active = "active" in link.attrs.get("class", "")
173
+
174
+ sources.append((full_name, self.fix_url(href), is_active))
175
+
176
+ # Eğer panel yapısı beklediğimizden farklıysa eski mantığa dön
177
+ if not sources:
178
+ if nav_links := secici.select("nav.card-nav a.nav-link"):
179
+ seen_urls = set()
180
+ for link in nav_links:
181
+ if link.attrs.get("href") == "#":
182
+ continue # Sinema Modu vb.
183
+
184
+ name = link.text(strip=True)
185
+ href = link.attrs.get("href")
186
+ is_active = "active" in link.attrs.get("class", "")
187
+
188
+ if href and href not in seen_urls:
189
+ seen_urls.add(href)
190
+ sources.append((name, self.fix_url(href), is_active))
191
+ else:
192
+ # Nav yoksa mevcut sayfayı (Varsayılan/VIP) al
193
+ sources.append(("VIP", url, True))
157
194
 
158
195
  tasks = []
159
196
  for name, link_url, is_active in sources:
@@ -78,7 +78,7 @@ class FilmMakinesi(PluginBase):
78
78
 
79
79
  title = self.clean_title(secici.select_text("h1.title"))
80
80
  poster = secici.select_poster("img.cover-img")
81
- description = secici.select_text("div.info-description p")
81
+ description = secici.select_text("div.info-description")
82
82
  rating = secici.select_text("div.info div.imdb b")
83
83
  year = secici.select_text("span.date a")
84
84
  actors = secici.select_texts("div.cast-name")
@@ -5,7 +5,7 @@ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo
5
5
  class FilmciBaba(PluginBase):
6
6
  name = "FilmciBaba"
7
7
  language = "tr"
8
- main_url = "https://izlehdfilm.cc"
8
+ main_url = "https://4kizle.live"
9
9
  favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
10
10
  description = "Filmci Baba, film izleme sitesi 4k Full film izle, 1080p ve 4k kalite de sinema filmleri ve dizileri, tek parça hd kalitede türkçe dublajlı filmler seyret."
11
11
 
@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, Subtitle, HTMLHelper
4
- import base64
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, Subtitle, HTMLHelper
4
+ import base64, asyncio, contextlib
5
5
 
6
6
  class KultFilmler(PluginBase):
7
7
  name = "KultFilmler"
@@ -99,91 +99,159 @@ class KultFilmler(PluginBase):
99
99
  episodes.append(Episode(season=s or 1, episode=e or 1, title=name, url=self.fix_url(href)))
100
100
 
101
101
  return SeriesInfo(
102
- url=url, poster=poster, title=title, description=description,
103
- tags=tags, year=year, actors=actors, rating=rating, episodes=episodes
102
+ url = url,
103
+ poster = poster,
104
+ title = title,
105
+ description = description,
106
+ tags = tags,
107
+ year = year,
108
+ actors = actors,
109
+ rating = rating,
110
+ episodes = episodes
104
111
  )
105
112
 
106
113
  return MovieInfo(
107
- url=url, poster=poster, title=title, description=description,
108
- tags=tags, year=year, rating=rating, actors=actors, duration=duration
114
+ url = url,
115
+ poster = poster,
116
+ title = title,
117
+ description = description,
118
+ tags = tags,
119
+ year = year,
120
+ rating = rating,
121
+ actors = actors,
122
+ duration = duration
109
123
  )
110
124
 
111
- def _get_iframe(self, source_code: str) -> str:
112
- """Base64 kodlu iframe'i çözümle"""
113
- atob = HTMLHelper(source_code).regex_first(r"PHA\+[0-9a-zA-Z+/=]*")
114
- if not atob:
115
- return ""
125
+ def _decode_iframe(self, content: str) -> str | None:
126
+ """Base64 kodlanmış iframe verisini çözer"""
127
+ match = HTMLHelper(content).regex_first(r"PHA\+[0-9a-zA-Z+/=]*")
128
+ if not match:
129
+ return None
116
130
 
117
- # Padding düzelt
118
- padding = 4 - len(atob) % 4
119
- if padding < 4:
120
- atob = atob + "=" * padding
131
+ # Base64 Padding Fix
132
+ pad = len(match) % 4
133
+ if pad:
134
+ match += "=" * (4 - pad)
121
135
 
122
136
  try:
123
- decoded = base64.b64decode(atob).decode("utf-8")
124
- secici = HTMLHelper(decoded)
125
- iframe_src = secici.select_attr("iframe", "src")
126
- return self.fix_url(iframe_src) if iframe_src else ""
137
+ decoded = base64.b64decode(match).decode("utf-8")
138
+ src = HTMLHelper(decoded).select_attr("iframe", "src")
139
+ return self.fix_url(src) if src else None
127
140
  except Exception:
128
- return ""
141
+ return None
129
142
 
130
- def _extract_subtitle_url(self, source_code: str) -> str | None:
131
- """Altyazı URL'sini çıkar"""
132
- return HTMLHelper(source_code).regex_first(r"(https?://[^\s\"]+\.srt)")
143
+ async def _resolve_alt_page(self, url: str, title: str) -> tuple[str | None, str]:
144
+ """Alternatif sayfa kaynak kodunu indirip iframe'i bulur"""
145
+ try:
146
+ res = await self.httpx.get(url)
147
+ return self._decode_iframe(res.text), title
148
+ except Exception:
149
+ return None, title
150
+
151
+ async def _extract_stream(self, iframe_url: str, title: str, subtitles: list[Subtitle]) -> list[ExtractResult]:
152
+ """Iframe üzerinden stream linklerini ayıklar"""
153
+ results = []
154
+
155
+ # 1. VidMoly Özel Çözümleme(M3U)
156
+ if "vidmoly" in iframe_url:
157
+ with contextlib.suppress(Exception):
158
+ res = await self.httpx.get(
159
+ url = iframe_url,
160
+ headers = {
161
+ "User-Agent" : "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36",
162
+ "Sec-Fetch-Dest" : "iframe"
163
+ }
164
+ )
165
+ m3u = HTMLHelper(res.text).regex_first(r'file:"([^"]+)"')
166
+
167
+ if m3u:
168
+ results.append(ExtractResult(
169
+ name = title or "VidMoly",
170
+ url = m3u,
171
+ referer = self.main_url,
172
+ subtitles = subtitles
173
+ ))
174
+
175
+ return results
176
+
177
+ # 2. Genel Extractor Kullanımı
178
+ with contextlib.suppress(Exception):
179
+ extracted = await self.extract(iframe_url)
180
+ if not extracted:
181
+ return []
182
+
183
+ items = extracted if isinstance(extracted, list) else [extracted]
184
+ for item in items:
185
+ # İsim ve altyazı bilgilerini güncelle
186
+ # Orijinal extractor ismini ezmek için title kullan
187
+ if title:
188
+ item.name = title
189
+
190
+ # Varsa altyazıları ekle
191
+ if subtitles:
192
+ # Copy update daha güvenli (Pydantic model)
193
+ if hasattr(item, "model_copy"):
194
+ item = item.model_copy(update={"subtitles": subtitles})
195
+ else:
196
+ item.subtitles = subtitles
197
+
198
+ results.append(item)
199
+
200
+ return results
133
201
 
134
202
  async def load_links(self, url: str) -> list[ExtractResult]:
135
- istek = await self.httpx.get(url)
136
- secici = HTMLHelper(istek.text)
203
+ response = await self.httpx.get(url)
204
+ source = response.text
205
+ helper = HTMLHelper(source)
137
206
 
138
- iframes = set()
207
+ # Altyazı Bul
208
+ sub_url = helper.regex_first(r"(https?://[^\s\"]+\.srt)")
209
+ subtitles = [Subtitle(name="Türkçe", url=sub_url)] if sub_url else []
139
210
 
140
- # Ana iframe
141
- main_frame = self._get_iframe(istek.text)
142
- if main_frame:
143
- iframes.add(main_frame)
211
+ # İşlenecek kaynakları topla: (Iframe_URL, Başlık)
212
+ sources = []
144
213
 
145
- # Alternatif player'lar
146
- for player in secici.select("div.container#player"):
147
- iframe_src = secici.select_attr("iframe", "src", player)
148
- alt_iframe = self.fix_url(iframe_src) if iframe_src else None
149
- if alt_iframe:
150
- alt_istek = await self.httpx.get(alt_iframe)
151
- alt_frame = self._get_iframe(alt_istek.text)
152
- if alt_frame:
153
- iframes.add(alt_frame)
214
+ # A) Ana Player
215
+ main_iframe = self._decode_iframe(source)
216
+ if main_iframe:
217
+ p_name = helper.select_text("div.parts-middle div.part.active div.part-name") or None
218
+ p_lang = helper.select_attr("div.parts-middle div.part.active div.part-lang span", "title")
219
+ full_title = f"{p_name} | {p_lang}" if p_lang else p_name
220
+ sources.append((main_iframe, full_title))
154
221
 
155
- results = []
222
+ # B) Alternatif Playerlar (Link Çözümleme Gerektirir)
223
+ alt_tasks = []
224
+ for link in helper.select("div.parts-middle a.post-page-numbers"):
225
+ href = link.attrs.get("href")
226
+ if not href:
227
+ continue
156
228
 
157
- for iframe in iframes:
158
- subtitles = []
229
+ a_name = helper.select_text("div.part-name", link) or "Alternatif"
230
+ a_lang = helper.select_attr("div.part-lang span", "title", link)
231
+ full_title = f"{a_name} | {a_lang}" if a_lang else a_name
159
232
 
160
- # VidMoly özel işleme
161
- if "vidmoly" in iframe:
162
- headers = {
163
- "User-Agent" : "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36",
164
- "Sec-Fetch-Dest" : "iframe"
165
- }
166
- iframe_istek = await self.httpx.get(iframe, headers=headers)
167
- m3u_match = HTMLHelper(iframe_istek.text).regex_first(r'file:"([^"]+)"')
233
+ alt_tasks.append(self._resolve_alt_page(self.fix_url(href), full_title))
168
234
 
169
- if m3u_match:
170
- results.append(ExtractResult(
171
- name = "VidMoly",
172
- url = m3u_match,
173
- referer = self.main_url,
174
- subtitles = []
175
- ))
176
- continue
235
+ if alt_tasks:
236
+ resolved_alts = await asyncio.gather(*alt_tasks)
237
+ for iframe, title in resolved_alts:
238
+ if iframe:
239
+ sources.append((iframe, title))
177
240
 
178
- # Altyazı çıkar
179
- subtitle_url = self._extract_subtitle_url(url)
180
- if subtitle_url:
181
- subtitles.append(Subtitle(name="Türkçe", url=subtitle_url))
241
+ # 3. Tüm kaynakları paralel işle (Extract)
242
+ if not sources:
243
+ return []
182
244
 
183
- data = await self.extract(iframe)
184
- if data:
185
- # ExtractResult objesi immutable, yeni bir kopya oluştur
186
- updated_data = data.model_copy(update={"subtitles": subtitles}) if subtitles else data
187
- results.append(updated_data)
245
+ extract_tasks = [
246
+ self._extract_stream(iframe, title, subtitles)
247
+ for iframe, title in sources
248
+ ]
188
249
 
189
- return results
250
+ results_groups = await asyncio.gather(*extract_tasks)
251
+
252
+ # Sonuçları düzleştir
253
+ final_results = []
254
+ for group in results_groups:
255
+ final_results.extend(group)
256
+
257
+ return final_results
@@ -2,7 +2,7 @@
2
2
 
3
3
  from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, Episode, SeriesInfo, ExtractResult, HTMLHelper
4
4
  from json import dumps, loads
5
- import re
5
+ import re, contextlib
6
6
 
7
7
  class RecTV(PluginBase):
8
8
  name = "RecTV"
@@ -73,11 +73,10 @@ class RecTV(PluginBase):
73
73
  duration_raw = veri.get("duration")
74
74
  duration = None
75
75
  if duration_raw:
76
- try:
76
+ with contextlib.suppress(Exception):
77
77
  h = int(HTMLHelper(duration_raw).regex_first(r"(\d+)h") or 0)
78
78
  m = int(HTMLHelper(duration_raw).regex_first(r"(\d+)min") or 0)
79
79
  duration = h * 60 + m
80
- except: pass
81
80
 
82
81
  common_info = {
83
82
  "url" : url,
@@ -110,10 +109,15 @@ class RecTV(PluginBase):
110
109
  tag = " (Altyazı)"; clean_s = re.sub(r"\s*altyaz[ıi]\s*", "", s_title, flags=re.I).strip()
111
110
 
112
111
  ep_data = {"url": self.fix_url(source.get("url")), "title": f"{veri.get('title')} | {s_title} {e_title} - {source.get('title')}", "is_episode": True}
113
- episodes.append(Episode(season=s or 1, episode=e or 1, title=f"{clean_s} {e_title}{tag} - {source.get('title')}", url=dumps(ep_data)))
112
+ episodes.append(Episode(
113
+ season = s or 1,
114
+ episode = e or 1,
115
+ title = f"{clean_s} {e_title}{tag} - {source.get('title')}",
116
+ url = dumps(ep_data)
117
+ ))
114
118
 
115
119
  return SeriesInfo(**common_info, episodes=episodes, actors=[])
116
-
120
+
117
121
  return MovieInfo(**common_info, actors=[])
118
122
 
119
123
  async def load_links(self, url: str) -> list[ExtractResult]:
@@ -1,6 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, MovieInfo, HTMLHelper
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, MovieInfo, HTMLHelper
4
4
  import base64, json
5
5
 
6
6
  class RoketDizi(PluginBase):
@@ -26,8 +26,6 @@ class RoketDizi(PluginBase):
26
26
  secici = HTMLHelper(istek.text)
27
27
 
28
28
  results = []
29
-
30
- # Use div.new-added-list to find the container, then get items
31
29
  for item in secici.select("div.new-added-list > span"):
32
30
  title = secici.select_text("span.line-clamp-1", item)
33
31
  href = secici.select_attr("a", "href", item)
@@ -52,7 +50,7 @@ class RoketDizi(PluginBase):
52
50
  "Referer" : f"{self.main_url}/",
53
51
  }
54
52
  )
55
-
53
+
56
54
  try:
57
55
  veri = istek.json()
58
56
  encoded = veri.get("response", "")
@@ -87,39 +85,33 @@ class RoketDizi(PluginBase):
87
85
  async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
88
86
  resp = await self.httpx.get(url)
89
87
  sel = HTMLHelper(resp.text)
90
-
88
+
91
89
  next_data_text = sel.select_text("script#__NEXT_DATA__")
92
90
  if not next_data_text:
93
91
  return SeriesInfo(url=url, title=sel.select_text("h1") or "Bilinmeyen")
94
92
 
95
93
  try:
96
- next_data = json.loads(next_data_text)
94
+ next_data = json.loads(next_data_text)
97
95
  secure_data_raw = next_data["props"]["pageProps"]["secureData"]
98
- secure_data = json.loads(base64.b64decode(secure_data_raw).decode('utf-8'))
99
-
96
+ secure_data = json.loads(base64.b64decode(secure_data_raw).decode('utf-8'))
97
+
100
98
  content_item = secure_data.get("contentItem", {})
101
99
  content = secure_data.get("content", {}).get("result", {})
102
-
100
+
103
101
  title = content_item.get("original_title") or content_item.get("culture_title")
104
102
  poster = content_item.get("poster_url") or content_item.get("face_url")
105
103
  description = content_item.get("description")
106
104
  rating = str(content_item.get("imdb_point") or "")
107
105
  year = str(content_item.get("release_year") or "")
108
106
  tags = content_item.get("categories", "").split(",")
109
-
110
- # Actors extraction from getSerieCastsById or getMovieCastsById
107
+
111
108
  actors = []
112
109
  casts_data = content.get("getSerieCastsById") or content.get("getMovieCastsById")
113
110
  if casts_data and casts_data.get("result"):
114
111
  actors = [cast.get("name") for cast in casts_data["result"] if cast.get("name")]
115
112
 
116
- # Episodes extraction
117
113
  episodes = []
118
114
  if "Series" in str(content.get("FindedType")):
119
- # Check for episodes in SecureData -> RelatedResults -> getEpisodeSources (this might be for the current episode)
120
- # Usually full episode list isn't in secureData, but we can get it from HTML or another API
121
- # However, many times Next.js pages have them in props
122
- # Let's fallback to the previous regex method for episodes if not in JSON
123
115
  all_urls = HTMLHelper(resp.text).regex_all(r'"url":"([^"]*)"')
124
116
  episodes_dict = {}
125
117
  for u in all_urls:
@@ -169,17 +161,16 @@ class RoketDizi(PluginBase):
169
161
  async def load_links(self, url: str) -> list[ExtractResult]:
170
162
  resp = await self.httpx.get(url)
171
163
  sel = HTMLHelper(resp.text)
172
-
164
+
173
165
  next_data = sel.select_text("script#__NEXT_DATA__")
174
166
  if not next_data:
175
167
  return []
176
168
 
177
169
  try:
178
- data = json.loads(next_data)
179
- secure_data = data["props"]["pageProps"]["secureData"]
170
+ data = json.loads(next_data)
171
+ secure_data = data["props"]["pageProps"]["secureData"]
180
172
  decoded_json = json.loads(base64.b64decode(secure_data).decode('utf-8'))
181
173
 
182
- # secureData içindeki RelatedResults -> getEpisodeSources -> result dizisini al
183
174
  sources = decoded_json.get("RelatedResults", {}).get("getEpisodeSources", {}).get("result", [])
184
175
 
185
176
  seen_urls = set()
@@ -200,8 +191,8 @@ class RoketDizi(PluginBase):
200
191
  iframe_url = "https://" + iframe_url
201
192
 
202
193
  iframe_url = self.fix_url(iframe_url)
203
-
204
- # Deduplicate
194
+
195
+ # Deduplicate
205
196
  if iframe_url in seen_urls:
206
197
  continue
207
198
  seen_urls.add(iframe_url)