KekikStream 2.4.9__py3-none-any.whl → 2.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of KekikStream might be problematic. Click here for more details.

@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
4
- import asyncio
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, ExtractResult, HTMLHelper
4
+ import asyncio, contextlib
5
5
 
6
6
  class SezonlukDizi(PluginBase):
7
7
  name = "SezonlukDizi"
@@ -40,10 +40,10 @@ class SezonlukDizi(PluginBase):
40
40
 
41
41
  async def _get_asp_data(self) -> dict:
42
42
  js_req = await self.httpx.get(f"{self.main_url}/js/site.min.js")
43
- js = HTMLHelper(js_req.text)
44
- alt = js.regex_first(r"dataAlternatif(.*?)\.asp")
45
- emb = js.regex_first(r"dataEmbed(.*?)\.asp")
46
-
43
+ js = HTMLHelper(js_req.text)
44
+ alt = js.regex_first(r"dataAlternatif(.*?)\.asp")
45
+ emb = js.regex_first(r"dataEmbed(.*?)\.asp")
46
+
47
47
  return {
48
48
  "alternatif": alt or "",
49
49
  "embed": emb or ""
@@ -116,7 +116,12 @@ class SezonlukDizi(PluginBase):
116
116
  href = e_sel.select_attr("a", "href", tds[3])
117
117
  if name and href:
118
118
  s, e = e_sel.extract_season_episode(f"{tds[1].text(strip=True)} {tds[2].text(strip=True)}")
119
- episodes.append(Episode(season=s or 1, episode=e or 1, title=name, url=self.fix_url(href)))
119
+ episodes.append(Episode(
120
+ season = s or 1,
121
+ episode = e or 1,
122
+ title = name,
123
+ url = self.fix_url(href)
124
+ ))
120
125
 
121
126
  return SeriesInfo(
122
127
  url = url,
@@ -131,10 +136,10 @@ class SezonlukDizi(PluginBase):
131
136
  )
132
137
 
133
138
  async def load_links(self, url: str) -> list[ExtractResult]:
134
- istek = await self.httpx.get(url)
135
- secici = HTMLHelper(istek.text)
139
+ istek = await self.httpx.get(url)
140
+ secici = HTMLHelper(istek.text)
136
141
  asp_data = await self._get_asp_data()
137
-
142
+
138
143
  bid = secici.select_attr("div#dilsec", "data-id")
139
144
  if not bid:
140
145
  return []
@@ -142,41 +147,64 @@ class SezonlukDizi(PluginBase):
142
147
  semaphore = asyncio.Semaphore(5)
143
148
  tasks = []
144
149
 
145
- async def fetch_and_extract(veri, dil_etiketi):
150
+ async def fetch_and_extract(veri, dil_etiketi) -> list[ExtractResult]:
146
151
  async with semaphore:
147
152
  try:
148
153
  embed_resp = await self.httpx.post(
149
- f"{self.main_url}/ajax/dataEmbed{asp_data['embed']}.asp",
154
+ url = f"{self.main_url}/ajax/dataEmbed{asp_data['embed']}.asp",
150
155
  headers = {"X-Requested-With": "XMLHttpRequest"},
151
156
  data = {"id": str(veri.get("id"))}
152
157
  )
153
158
  embed_secici = HTMLHelper(embed_resp.text)
154
- iframe_src = embed_secici.select_attr("iframe", "src")
155
-
156
- if iframe_src:
157
- if "link.asp" in iframe_src:
158
- return None
159
-
160
- iframe_url = self.fix_url(iframe_src)
161
- return await self.extract(iframe_url, referer=f"{self.main_url}/", prefix=f"{dil_etiketi} - {veri.get('baslik')}")
162
- except:
163
- pass
164
- return None
159
+ iframe_src = embed_secici.select_attr("iframe", "src") or embed_secici.regex_first(r'src="(.*?)"')
160
+
161
+ if not iframe_src:
162
+ return []
163
+
164
+ iframe_url = self.fix_url(iframe_src)
165
+
166
+ real_url = iframe_url
167
+ if "url=" in iframe_url:
168
+ real_url = HTMLHelper(iframe_url).regex_first(r"url=([^&]+)")
169
+ if real_url:
170
+ real_url = self.fix_url(real_url)
171
+
172
+ source_name = veri.get('baslik') or "SezonlukDizi"
173
+ full_name = f"{dil_etiketi} - {source_name}"
174
+
175
+ extracted = await self.extract(real_url, referer=f"{self.main_url}/")
176
+
177
+ if not extracted:
178
+ return []
179
+
180
+ results = []
181
+ items = extracted if isinstance(extracted, list) else [extracted]
182
+ for item in items:
183
+ item.name = full_name
184
+ results.append(item)
185
+ return results
186
+
187
+ except Exception:
188
+ return []
165
189
 
166
190
  for dil_kodu, dil_etiketi in [("1", "Altyazı"), ("0", "Dublaj")]:
167
- altyazi_resp = await self.httpx.post(
168
- f"{self.main_url}/ajax/dataAlternatif{asp_data['alternatif']}.asp",
169
- headers = {"X-Requested-With": "XMLHttpRequest"},
170
- data = {"bid": bid, "dil": dil_kodu}
171
- )
172
-
173
- try:
191
+ with contextlib.suppress(Exception):
192
+ altyazi_resp = await self.httpx.post(
193
+ url = f"{self.main_url}/ajax/dataAlternatif{asp_data['alternatif']}.asp",
194
+ headers = {"X-Requested-With": "XMLHttpRequest"},
195
+ data = {"bid": bid, "dil": dil_kodu}
196
+ )
197
+
174
198
  data_json = altyazi_resp.json()
175
199
  if data_json.get("status") == "success" and data_json.get("data"):
176
200
  for veri in data_json["data"]:
177
201
  tasks.append(fetch_and_extract(veri, dil_etiketi))
178
- except:
179
- continue
180
202
 
181
- results = await asyncio.gather(*tasks)
182
- return [r for r in results if r]
203
+ results_groups = await asyncio.gather(*tasks)
204
+
205
+ final_results = []
206
+ for group in results_groups:
207
+ if group:
208
+ final_results.extend(group)
209
+
210
+ return final_results
@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, MovieInfo, ExtractResult, HTMLHelper
4
- import json, urllib.parse
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode, MovieInfo, ExtractResult, HTMLHelper
4
+ import json, contextlib, asyncio
5
5
 
6
6
  class Sinefy(PluginBase):
7
7
  name = "Sinefy"
@@ -42,14 +42,14 @@ class Sinefy(PluginBase):
42
42
  else:
43
43
  full_url = f"{url}&page={page}"
44
44
 
45
- resp = await self.httpx.get(full_url)
46
- sel = HTMLHelper(resp.text)
45
+ istek = await self.httpx.get(full_url)
46
+ secici = HTMLHelper(istek.text)
47
47
 
48
48
  results = []
49
- for item in sel.select("div.poster-with-subject, div.dark-segment div.poster-md.poster"):
50
- title = sel.select_text("h2", item)
51
- href = sel.select_attr("a", "href", item)
52
- poster = sel.select_attr("img", "data-srcset", item)
49
+ for item in secici.select("div.poster-with-subject, div.dark-segment div.poster-md.poster"):
50
+ title = secici.select_text("h2", item)
51
+ href = secici.select_attr("a", "href", item)
52
+ poster = secici.select_attr("img", "data-srcset", item)
53
53
 
54
54
  if poster:
55
55
  poster = poster.split(",")[0].split(" ")[0]
@@ -69,89 +69,84 @@ class Sinefy(PluginBase):
69
69
  c_key = "ca1d4a53d0f4761a949b85e51e18f096"
70
70
  c_value = "MTc0NzI2OTAwMDU3ZTEwYmZjMDViNWFmOWIwZDViODg0MjU4MjA1ZmYxOThmZTYwMDdjMWQzMzliNzY5NzFlZmViMzRhMGVmNjgwODU3MGIyZA=="
71
71
 
72
- try:
73
- resp = await self.httpx.get(self.main_url)
74
- sel = HTMLHelper(resp.text)
72
+ with contextlib.suppress(Exception):
73
+ istek = await self.httpx.get(self.main_url)
74
+ secici = HTMLHelper(istek.text)
75
75
 
76
- cke = sel.select_attr("input[name='cKey']", "value")
77
- cval = sel.select_attr("input[name='cValue']", "value")
76
+ cke = secici.select_attr("input[name='cKey']", "value")
77
+ cval = secici.select_attr("input[name='cValue']", "value")
78
78
 
79
79
  if cke and cval:
80
80
  c_key = cke
81
81
  c_value = cval
82
82
 
83
- except Exception:
84
- pass
83
+ response = await self.httpx.post(
84
+ url = f"{self.main_url}/bg/searchcontent",
85
+ headers = {
86
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0",
87
+ "Accept" : "application/json, text/javascript, */*; q=0.01",
88
+ "X-Requested-With" : "XMLHttpRequest",
89
+ "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8"
90
+ },
91
+ data = {
92
+ "cKey" : c_key,
93
+ "cValue" : c_value,
94
+ "searchTerm" : query
95
+ }
96
+ )
85
97
 
86
- post_url = f"{self.main_url}/bg/searchcontent"
87
- data = {
88
- "cKey" : c_key,
89
- "cValue" : c_value,
90
- "searchTerm" : query
91
- }
92
-
93
- headers = {
94
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0",
95
- "Accept" : "application/json, text/javascript, */*; q=0.01",
96
- "X-Requested-With" : "XMLHttpRequest",
97
- "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8"
98
- }
99
-
100
- response = await self.httpx.post(post_url, data=data, headers=headers)
101
-
102
- try:
98
+ with contextlib.suppress(Exception):
103
99
  # Extract JSON data from response (might contain garbage chars at start)
104
100
  raw = response.text
105
101
  json_start = raw.find('{')
106
102
  if json_start != -1:
107
103
  clean_json = raw[json_start:]
108
- data = json.loads(clean_json)
109
-
104
+ data = json.loads(clean_json)
105
+
110
106
  results = []
111
107
  # Result array is in data['data']['result']
112
108
  res_array = data.get("data", {}).get("result", [])
113
-
109
+
114
110
  if not res_array:
115
111
  # Fallback manual parsing ?
116
112
  pass
117
113
 
118
114
  for item in res_array:
119
- name = item.get("object_name")
120
- slug = item.get("used_slug")
115
+ name = item.get("object_name")
116
+ slug = item.get("used_slug")
121
117
  poster = item.get("object_poster_url")
122
-
118
+
123
119
  if name and slug:
124
120
  if "cdn.ampproject.org" in poster:
125
121
  poster = "https://images.macellan.online/images/movie/poster/180/275/80/" + poster.split("/")[-1]
126
-
122
+
127
123
  results.append(SearchResult(
128
- title=name,
129
- url=self.fix_url(slug),
130
- poster=self.fix_url(poster)
124
+ title = name,
125
+ url = self.fix_url(slug),
126
+ poster = self.fix_url(poster)
131
127
  ))
132
128
  return results
133
129
 
134
- except Exception:
135
- pass
136
130
  return []
137
131
 
138
132
  async def load_item(self, url: str) -> SeriesInfo | MovieInfo:
139
- resp = await self.httpx.get(url)
140
- sel = HTMLHelper(resp.text)
133
+ istek = await self.httpx.get(url)
134
+ secici = HTMLHelper(istek.text)
141
135
 
142
- title = sel.select_direct_text("h1")
143
- poster_attr = sel.select_attr("img.series-profile-thumb", "data-srcset") or sel.select_attr("img.series-profile-thumb", "srcset")
136
+ title = secici.select_direct_text("h1")
137
+ poster_attr = secici.select_attr("img.series-profile-thumb", "data-srcset") or secici.select_attr("img.series-profile-thumb", "srcset")
144
138
  if poster_attr:
145
139
  # "url 1x, url 2x" -> en sondakini (en yüksek kalite) al
146
140
  poster = poster_attr.split(",")[-1].strip().split(" ")[0]
147
141
  else:
148
- poster = sel.select_poster("img.series-profile-thumb")
149
- description = sel.select_text("p#tv-series-desc")
150
- tags = sel.select_texts("div.item.categories a")
151
- rating = sel.select_text("span.color-imdb")
152
- actors = sel.select_texts("div.content h5")
153
- year = sel.extract_year("div.truncate")
154
- duration = sel.regex_first(r"(\d+)", sel.select_text(".media-meta td:last-child"))
142
+ poster = secici.select_poster("img.series-profile-thumb")
143
+
144
+ description = secici.select_text("p#tv-series-desc")
145
+ tags = secici.select_texts("div.item.categories a")
146
+ rating = secici.select_text("span.color-imdb")
147
+ actors = secici.select_texts("div.content h5")
148
+ year = secici.extract_year("div.truncate")
149
+ duration = secici.regex_first(r"(\d+)", secici.select_text(".media-meta td:last-child"))
155
150
  if duration == year or int(duration) < 40:
156
151
  duration = None
157
152
 
@@ -168,40 +163,136 @@ class Sinefy(PluginBase):
168
163
  }
169
164
 
170
165
  episodes = []
171
- for tab in sel.select("div.ui.tab"):
172
- for link in sel.select("a[href*='bolum']", tab):
166
+ for tab in secici.select("div.ui.tab"):
167
+ for link in secici.select("a[href*='bolum']", tab):
173
168
  href = link.attrs.get("href")
174
169
  if href:
175
- s, e = sel.extract_season_episode(href)
176
- name = sel.select_text("div.content div.header", link) or link.text(strip=True)
170
+ s, e = secici.extract_season_episode(href)
171
+ name = secici.select_text("div.content div.header", link) or link.text(strip=True)
177
172
  episodes.append(Episode(season=s or 1, episode=e or 1, title=name, url=self.fix_url(href)))
178
-
173
+
179
174
  if episodes:
180
175
  return SeriesInfo(**common_info, episodes=episodes)
181
176
 
182
177
  return MovieInfo(**common_info)
183
178
 
184
- async def load_links(self, url: str) -> list[ExtractResult]:
185
- resp = await self.httpx.get(url)
186
- sel = HTMLHelper(resp.text)
187
-
188
- iframe = sel.select_attr("iframe", "src")
179
+ def _find_iframe(self, secici: HTMLHelper) -> str | None:
180
+ """Sayfa kaynağındaki video iframe adresini bulur."""
181
+ src = secici.select_attr("iframe", "src") or \
182
+ secici.select_attr("iframe", "data-src") or \
183
+ secici.regex_first(r'<iframe[^>]+src="([^"]+)"')
184
+ return self.fix_url(src) if src else None
189
185
 
190
- if not iframe:
191
- return []
192
-
193
- iframe_url = self.fix_url(iframe)
194
-
195
- # Try to extract actual video URL, fallback to raw iframe if fails
186
+ async def _process_source(self, source: dict, subtitles: list) -> list[ExtractResult]:
187
+ """Tekil bir kaynağı işleyip sonucu döndürür."""
188
+ target_url = source["url"]
189
+ name = source["name"]
190
+
191
+ # Eğer direkt iframe değilse (Sayfa linki ise), önce iframe'i bul
192
+ if not source.get("is_main"):
193
+ try:
194
+ resp = await self.httpx.get(target_url)
195
+ temp_sel = HTMLHelper(resp.text)
196
+
197
+ if not (iframe_url := self._find_iframe(temp_sel)):
198
+ return []
199
+
200
+ target_url = iframe_url
201
+
202
+ # Tab (Dil Seçeneği) ise, gittiğimiz sayfadaki aktif player ismini ekle
203
+ if source.get("is_tab"):
204
+ p_name = temp_sel.select_text("div.alternatives-for-this div.playeritems.active") or "PUB"
205
+ name = f"{name} | {p_name}"
206
+ except Exception:
207
+ return []
208
+
209
+ # Linki Extract Et
196
210
  try:
197
- result = await self.extract(iframe_url)
198
- if result:
199
- return [result] if not isinstance(result, list) else result
211
+ extracted = await self.extract(target_url, referer=self.main_url)
212
+ if not extracted:
213
+ return []
214
+
215
+ items = extracted if isinstance(extracted, list) else [extracted]
216
+
217
+ # Sonuçları işle (İsim ver, altyazı ekle)
218
+ copy_subtitles = list(subtitles) # Her item için kopyasını kullan
219
+ for item in items:
220
+ item.name = name
221
+ if copy_subtitles:
222
+ if not item.subtitles:
223
+ item.subtitles = copy_subtitles
224
+ else:
225
+ item.subtitles.extend(copy_subtitles)
226
+
227
+ return items
200
228
  except Exception:
201
- pass
202
-
203
- # Fallback: return raw iframe URL
204
- return [ExtractResult(
205
- url = iframe_url,
206
- name = "Sinefy Player"
207
- )]
229
+ return []
230
+
231
+ async def load_links(self, url: str) -> list[ExtractResult]:
232
+ istek = await self.httpx.get(url)
233
+ secici = HTMLHelper(istek.text)
234
+
235
+ # 1. Altyazıları Topla
236
+ subtitles = []
237
+ for track in secici.select("track"):
238
+ if track.attrs.get("kind") in ("subtitles", "captions"):
239
+ if src := track.attrs.get("src"):
240
+ lang = track.attrs.get("label") or track.attrs.get("srclang") or "Altyazı"
241
+ subtitles.append(self.new_subtitle(src, lang))
242
+
243
+ sources = []
244
+
245
+ # Aktif Sayfa Bilgileri
246
+ active_tab_name = secici.select_text("div#series-tabs a.active") or "Sinefy"
247
+ active_player = secici.select_text("div.alternatives-for-this div.playeritems.active") or "PUB"
248
+
249
+ # A) Ana Video (Main Iframe)
250
+ if main_iframe := self._find_iframe(secici):
251
+ sources.append({
252
+ "url" : main_iframe,
253
+ "name" : f"{active_tab_name} | {active_player}",
254
+ "is_main" : True,
255
+ "is_tab" : False
256
+ })
257
+
258
+ # B) Alternatif Playerlar (Mevcut Sayfa Player Butonları)
259
+ for btn in secici.select("div.alternatives-for-this div.playeritems:not(.active) a"):
260
+ if href := btn.attrs.get("href"):
261
+ if "javascript" not in href:
262
+ sources.append({
263
+ "url" : self.fix_url(href),
264
+ "name" : f"{active_tab_name} | {btn.text(strip=True)}",
265
+ "is_main" : False,
266
+ "is_tab" : False
267
+ })
268
+
269
+ # C) Diğer Dil Seçenekleri (Tabs - Sekmeler)
270
+ for tab in secici.select("div#series-tabs a:not(.active)"):
271
+ if href := tab.attrs.get("href"):
272
+ sources.append({
273
+ "url" : self.fix_url(href),
274
+ "name" : tab.text(strip=True),
275
+ "is_main" : False,
276
+ "is_tab" : True
277
+ })
278
+
279
+ # 2. Kaynakları Paralel İşle
280
+ tasks = [self._process_source(src, subtitles) for src in sources]
281
+ results_groups = await asyncio.gather(*tasks)
282
+
283
+ # 3. Sonuçları Birleştir
284
+ final_results = []
285
+ for group in results_groups:
286
+ if group:
287
+ final_results.extend(group)
288
+
289
+ # 4. Duplicate Temizle (URL + İsim Kombinasyonu)
290
+ unique_results = []
291
+ seen = set()
292
+ for res in final_results:
293
+ key = (res.url, res.name)
294
+ if res.url and key not in seen:
295
+ unique_results.append(res)
296
+ seen.add(key)
297
+
298
+ return unique_results