KekikStream 2.3.9__py3-none-any.whl → 2.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -113,8 +113,8 @@ class HTMLHelper:
113
113
  return int(m.group(1)), int(m.group(2))
114
114
 
115
115
  # Ayrı ayrı ara
116
- s = re.search(r"(\d+)\.\s*[Ss]ezon|[Ss]ezon[- ]?(\d+)|-(\d+)-sezon", text, re.I)
117
- e = re.search(r"(\d+)\.\s*[Bb]ölüm|[Bb]olum[- ]?(\d+)|-(\d+)-bolum|[Ee](\d+)", text, re.I)
116
+ s = re.search(r"(\d+)\.\s*[Ss]ezon|[Ss]ezon[- ]?(\d+)|-(\d+)-sezon|S(\d+)|(\d+)\.[Ss]", text, re.I)
117
+ e = re.search(r"(\d+)\.\s*[Bb][öo]l[üu]m|[Bb][öo]l[üu]m[- ]?(\d+)|-(\d+)-bolum|[Ee](\d+)", text, re.I)
118
118
 
119
119
  # İlk bulunan grubu al (None değilse)
120
120
  s_val = next((int(g) for g in s.groups() if g), None) if s else None
@@ -51,9 +51,6 @@ class Episode(BaseModel):
51
51
  if not self.title:
52
52
  self.title = ""
53
53
 
54
- if any(keyword in self.title.lower() for keyword in ["bölüm", "sezon", "episode"]):
55
- self.title = ""
56
-
57
54
  return self
58
55
 
59
56
  class SeriesInfo(BaseModel):
@@ -0,0 +1,62 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
4
+ from Kekik.Sifreleme import AESManager
5
+ import re
6
+ import json
7
+
8
+ class HDMomPlayer(ExtractorBase):
9
+ name = "HDMomPlayer"
10
+ main_url = "hdmomplayer.com"
11
+
12
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
13
+ if referer:
14
+ self.httpx.headers.update({"Referer": referer})
15
+
16
+ try:
17
+ response = await self.httpx.get(url)
18
+ page_source = response.text
19
+
20
+ m3u_link = None
21
+
22
+ # Regex for bePlayer matches
23
+ # Matches: bePlayer('PASS', '{DATA}');
24
+ helper = HTMLHelper(page_source)
25
+ be_matches = helper.regex_all(r"bePlayer\('([^']+)',\s*'(\{[^\}]+\})'\);")
26
+
27
+ if be_matches:
28
+ pass_val, data_val = be_matches[0]
29
+
30
+ try:
31
+ # Use Kekik.Sifreleme.AESManager as requested
32
+ decrypted = AESManager.decrypt(data_val, pass_val).replace("\\", "")
33
+
34
+ # Search for video_location in decrypted string
35
+ # Kotlin: video_location":"([^"]+)
36
+ m_loc = re.search(r'video_location":"([^"]+)"', decrypted)
37
+ if m_loc:
38
+ m3u_link = m_loc.group(1).replace(r"\/", "/")
39
+ except Exception:
40
+ pass
41
+
42
+ if not m3u_link:
43
+ # Fallback regex
44
+ # file:"..."
45
+ m_file = re.search(r'file:"([^"]+)"', page_source)
46
+ if m_file:
47
+ m3u_link = m_file.group(1)
48
+
49
+ if not m3u_link:
50
+ return None
51
+
52
+ # Fix URL if needed
53
+ if m3u_link.startswith("//"):
54
+ m3u_link = "https:" + m3u_link
55
+
56
+ return ExtractResult(
57
+ name = "HDMomPlayer",
58
+ url = m3u_link,
59
+ referer = url,
60
+ )
61
+ except Exception:
62
+ return None
@@ -0,0 +1,45 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper
4
+ from Kekik.Sifreleme import AESManager
5
+ import re
6
+ import json
7
+
8
+ class HotStream(ExtractorBase):
9
+ name = "HotStream"
10
+ main_url = "https://hotstream.club"
11
+
12
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
13
+ if referer:
14
+ self.httpx.headers.update({"Referer": referer})
15
+
16
+ istek = await self.httpx.get(url)
17
+ html = istek.text
18
+ helper = HTMLHelper(html)
19
+
20
+ m = re.search(r"bePlayer\('([^']+)',\s*'(\{[^']+\})'\)", html)
21
+ if not m:
22
+ # Try double quotes just in case
23
+ m = re.search(r'bePlayer\("([^"]+)",\s*"(\{[^"]+\})"\)', html)
24
+
25
+ if m:
26
+ pass_val = m.group(1)
27
+ data_val = m.group(2)
28
+
29
+ try:
30
+ decrypted = AESManager.decrypt(data_val, pass_val)
31
+ if decrypted:
32
+ decrypted = decrypted.replace("\\", "")
33
+ # Search for video_location in decrypted string
34
+ m_loc = re.search(r'"video_location":"([^"]+)"', decrypted)
35
+ if m_loc:
36
+ video_url = m_loc.group(1).replace(r"\/", "/")
37
+ return ExtractResult(
38
+ name = self.name,
39
+ url = video_url,
40
+ referer = url
41
+ )
42
+ except Exception:
43
+ pass
44
+
45
+ return None
@@ -0,0 +1,115 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper, Subtitle
4
+ from urllib.parse import quote
5
+ from json import loads
6
+
7
+ class Videostr(ExtractorBase):
8
+ name = "Videostr"
9
+ main_url = "videostr.net"
10
+
11
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
12
+ headers = {
13
+ "Accept": "*/*",
14
+ "X-Requested-With": "XMLHttpRequest",
15
+ "Referer": "https://videostr.net",
16
+ }
17
+
18
+ # 1. Get nonce page
19
+ # Kotlin: url.substringAfterLast("/").substringBefore("?")
20
+ id = url.split("?")[0].split("/")[-1]
21
+
22
+ istek = await self.httpx.get(url, headers=headers)
23
+ if istek.status_code != 200:
24
+ return None
25
+
26
+ responsenonce = istek.text
27
+
28
+ # Find nonce
29
+ # Regex: \b[a-zA-Z0-9]{48}\b
30
+ # Or 3 blocks of 16 chars
31
+ helper = HTMLHelper(responsenonce)
32
+ nonce = helper.regex_first(r"\b[a-zA-Z0-9]{48}\b")
33
+ if not nonce:
34
+ # Fallback regex as per Kotlin: \b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b
35
+ # In Python we can just look for the combined matches if regex_first handles grouping poorly or try finding all
36
+ import re
37
+ m = re.search(r"\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b", responsenonce, re.DOTALL)
38
+ if m:
39
+ nonce = m.group(1) + m.group(2) + m.group(3)
40
+
41
+ if not nonce:
42
+ return None
43
+
44
+ # 2. Get Sources
45
+ api_url = f"https://videostr.net/embed-1/v3/e-1/getSources?id={id}&_k={nonce}"
46
+
47
+ api_resp = await self.httpx.get(api_url, headers=headers)
48
+ if api_resp.status_code != 200:
49
+ return None
50
+
51
+ # Parse JSON
52
+ try:
53
+ data = api_resp.json()
54
+ except:
55
+ return None
56
+
57
+ sources = data.get("sources", [])
58
+ if not sources:
59
+ return None
60
+
61
+ encrypted_file = sources[0].get("file")
62
+ if not encrypted_file:
63
+ return None
64
+
65
+ m3u8_url = None
66
+
67
+ if ".m3u8" in encrypted_file:
68
+ m3u8_url = encrypted_file
69
+ else:
70
+ # Decrypt
71
+ # Need key from github
72
+ key_url = "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"
73
+ key_resp = await self.httpx.get(key_url)
74
+ if key_resp.status_code == 200:
75
+ try:
76
+ keys = key_resp.json()
77
+ vidstr_key = keys.get("vidstr") # As per Kotlin code: gson.fromJson(keyJson, Megakey::class.java)?.vidstr
78
+
79
+ if vidstr_key:
80
+ # Use Google Script to decrypt
81
+ decode_url = "https://script.google.com/macros/s/AKfycbxHbYHbrGMXYD2-bC-C43D3njIbU-wGiYQuJL61H4vyy6YVXkybMNNEPJNPPuZrD1gRVA/exec"
82
+
83
+ full_url = f"{decode_url}?encrypted_data={quote(encrypted_file)}&nonce={quote(nonce)}&secret={quote(vidstr_key)}"
84
+
85
+ decrypted_resp = await self.httpx.get(full_url)
86
+ if decrypted_resp.status_code == 200:
87
+ # Response is JSON {"file": "..."} usually or text?
88
+ # Kotlin says: Regex("\"file\":\"(.*?)\"").find(decryptedResponse)
89
+ m_file = re.search(r'"file":"(.*?)"', decrypted_resp.text)
90
+ if m_file:
91
+ m3u8_url = m_file.group(1).replace(r"\/", "/")
92
+ except Exception as e:
93
+ # print(f"Decryption error: {e}")
94
+ pass
95
+
96
+ if not m3u8_url:
97
+ return None
98
+
99
+ # Subtitles
100
+ # Kotlin: response.tracks
101
+ subtitles = []
102
+ tracks = data.get("tracks", [])
103
+ for t in tracks:
104
+ if t.get("kind") in ["captions", "subtitles"]:
105
+ subtitles.append(Subtitle(
106
+ name = t.get("label", "Altyazı"),
107
+ url = t.get("file")
108
+ ))
109
+
110
+ return ExtractResult(
111
+ name = "Videostr",
112
+ url = m3u8_url,
113
+ referer = "https://videostr.net/",
114
+ subtitles= subtitles
115
+ )
@@ -0,0 +1,25 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper
4
+
5
+ class Vidoza(ExtractorBase):
6
+ name = "Vidoza"
7
+ main_url = "https://vidoza.net"
8
+
9
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
10
+ if referer:
11
+ self.httpx.headers.update({"Referer": referer})
12
+
13
+ istek = await self.httpx.get(url)
14
+ helper = HTMLHelper(istek.text)
15
+
16
+ video_url = helper.select_attr("source", "src")
17
+
18
+ if video_url:
19
+ return ExtractResult(
20
+ name = self.name,
21
+ url = video_url,
22
+ referer = url
23
+ )
24
+
25
+ return None
@@ -0,0 +1,247 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, HTMLHelper
4
+ from json import dumps, loads
5
+ import re
6
+
7
+ class DiziMom(PluginBase):
8
+ name = "DiziMom"
9
+ language = "tr"
10
+ main_url = "https://www.dizimom.one"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Binlerce yerli yabancı dizi arşivi, tüm sezonlar, kesintisiz bölümler. Sadece dizi izle, Dizimom heryerde seninle!"
13
+
14
+ main_page = {
15
+ f"{main_url}/tum-bolumler/page" : "Son Bölümler",
16
+ f"{main_url}/yerli-dizi-izle/page" : "Yerli Diziler",
17
+ f"{main_url}/yabanci-dizi-izle/page" : "Yabancı Diziler",
18
+ f"{main_url}/tv-programlari-izle/page" : "TV Programları",
19
+ }
20
+
21
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
22
+ istek = await self.httpx.get(f"{url}/{page}/")
23
+ helper = HTMLHelper(istek.text)
24
+
25
+ if "/tum-bolumler/" in url:
26
+ items = helper.select("div.episode-box")
27
+ results = []
28
+ for item in items:
29
+ name_el = helper.select_first("div.episode-name a", item)
30
+ if not name_el: continue
31
+ name = name_el.text(strip=True).split(" izle")[0]
32
+ title = name.replace(".Sezon ", "x").replace(".Bölüm", "")
33
+
34
+ ep_href = self.fix_url(name_el.attrs.get("href"))
35
+ pass
36
+
37
+ # Revert to standard categories if "tum-bolumler" is complex
38
+ return []
39
+ else:
40
+ items = helper.select("div.single-item")
41
+ return [
42
+ MainPageResult(
43
+ category = category,
44
+ title = helper.select_text("div.categorytitle a", item).split(" izle")[0],
45
+ url = self.fix_url(helper.select_attr("div.categorytitle a", "href", item)),
46
+ poster = self.fix_url(helper.select_attr("div.cat-img img", "src", item))
47
+ )
48
+ for item in items
49
+ ]
50
+
51
+ async def search(self, query: str) -> list[SearchResult]:
52
+ url = f"{self.main_url}/?s={query}"
53
+ istek = await self.httpx.get(url)
54
+ helper = HTMLHelper(istek.text)
55
+ items = helper.select("div.single-item")
56
+
57
+ return [
58
+ SearchResult(
59
+ title = helper.select_text("div.categorytitle a", item).split(" izle")[0],
60
+ url = self.fix_url(helper.select_attr("div.categorytitle a", "href", item)),
61
+ poster = self.fix_url(helper.select_attr("div.cat-img img", "src", item))
62
+ )
63
+ for item in items
64
+ ]
65
+
66
+ async def load_item(self, url: str) -> SeriesInfo:
67
+ istek = await self.httpx.get(url)
68
+ helper = HTMLHelper(istek.text)
69
+
70
+ title_raw = helper.select_text("div.title h1")
71
+ title = title_raw.split(" izle")[0] if title_raw else "Bilinmiyor"
72
+
73
+ poster = self.fix_url(helper.select_attr("div.category_image img", "src"))
74
+
75
+ # Custom extraction for fields that were using xpath_text
76
+ year = None
77
+ rating = None
78
+ actors = None
79
+
80
+ # Regex approach based on debug output (multiline support)
81
+ # Context: <span class="dizimeta"><i class="fas fa-globe"></i> Yapım Yılı : </span>\n 2022
82
+ year_val_all = helper.regex_all(r"Yapım Yılı\s*:\s*(?:</span>)?\s*(\d{4})", flags=re.DOTALL)
83
+ if year_val_all:
84
+ year = int(year_val_all[0])
85
+
86
+ # Context: <span class="dizimeta"><i class="fas fa-star"></i> IMDB : </span>\n 4.5
87
+ rating_val_all = helper.regex_all(r"IMDB\s*:\s*(?:</span>)?\s*([\d\.]+)", flags=re.DOTALL)
88
+ if rating_val_all:
89
+ rating = rating_val_all[0]
90
+
91
+ actors_val = helper.regex_first(r"Oyuncular\s*:\s*(.+?)(?:</div>|<br|$)")
92
+ if not actors_val:
93
+ # Try selecting the div text directly if regex fails due to HTML tags
94
+ # Find div containing "Oyuncular :"
95
+ all_divs = helper.select("div")
96
+ for div in all_divs:
97
+ txt = div.text()
98
+ if "Oyuncular :" in txt:
99
+ actors_val = txt.split("Oyuncular :")[1].strip()
100
+ break
101
+
102
+ if actors_val:
103
+ # Remove footer / junk from actors
104
+ if "IMDB :" in actors_val:
105
+ actors_val = actors_val.split("IMDB :")[0].strip()
106
+
107
+ if "IMDB :" in actors_val:
108
+ actors_val = actors_val.split("IMDB :")[0].strip()
109
+
110
+ # Remove '×' and other junk if present at end
111
+ if "×" in actors_val:
112
+ actors_val = actors_val.split("×")[0].strip()
113
+
114
+ # Remove simple tags if any remaining
115
+ clean_actors = [a.strip() for a in actors_val.split(",")]
116
+ # Filter empty
117
+ clean_actors = [a for a in clean_actors if a]
118
+
119
+ actors = ", ".join(clean_actors)
120
+
121
+ description_raw = helper.select_text("div.category_desc")
122
+ description = None
123
+ if description_raw:
124
+ # Clean header "The Librarians izle..." etc. if present, usually it is fine.
125
+ # Clean "IMDB :" if attached
126
+ if "IMDB :" in description_raw:
127
+ description_raw = description_raw.split("IMDB :")[0].strip()
128
+
129
+ # Clean footer text start
130
+ # The footer block usually starts with "Dizimom, dizi ve film..."
131
+ if "Dizimom," in description_raw:
132
+ description = description_raw.split("Dizimom,")[0].strip()
133
+ elif "dizi izle film izle" in description_raw:
134
+ description = description_raw.split("dizi izle film izle")[0].strip()
135
+ else:
136
+ description = description_raw
137
+
138
+ # Fallback cleanup for JSON
139
+ if description and "{" in description:
140
+ description = description.split("{")[0].strip()
141
+
142
+ tags = helper.select_all_text("div.genres a")
143
+
144
+ # Improved year regex
145
+ if not year:
146
+ # Look for "Yapım Yılı : 2014" pattern in ANY text
147
+ # Get all text from category_text which usually contains it
148
+ meta_text = helper.select_text("div.category_text")
149
+ if meta_text:
150
+ match = re.search(r"Yapım Yılı\s*:\s*(\d{4})", meta_text)
151
+ if match:
152
+ year = int(match.group(1))
153
+
154
+ episodes = []
155
+ ep_items = helper.select("div.bolumust")
156
+ for item in ep_items:
157
+ ep_name_raw = helper.select_text("div.baslik", item)
158
+ ep_href = self.fix_url(helper.select_attr("a", "href", item))
159
+
160
+ if ep_name_raw:
161
+ # 1.Sezon 1.Bölüm
162
+ s_m = re.search(r"(\d+)\.Sezon", ep_name_raw)
163
+ e_m = re.search(r"(\d+)\.Bölüm", ep_name_raw)
164
+
165
+ season = int(s_m.group(1)) if s_m else 1
166
+ episode = int(e_m.group(1)) if e_m else 1
167
+
168
+ name = ep_name_raw.split(" izle")[0].replace(title, "").strip()
169
+
170
+ episodes.append(Episode(
171
+ season = season,
172
+ episode = episode,
173
+ title = name,
174
+ url = ep_href
175
+ ))
176
+
177
+ return SeriesInfo(
178
+ url = url,
179
+ poster = poster,
180
+ title = title,
181
+ description = description,
182
+ tags = tags,
183
+ rating = rating,
184
+ year = str(year) if year else None,
185
+ actors = actors,
186
+ episodes = episodes
187
+ )
188
+
189
+ async def load_links(self, url: str) -> list[ExtractResult]:
190
+ # Login simulation headers
191
+ headers = {
192
+ "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
193
+ "sec-ch-ua": 'Not/A)Brand";v="8", "Chromium";v="137", "Google Chrome";v="137"',
194
+ "sec-ch-ua-mobile": "?1",
195
+ "sec-ch-ua-platform": "Android"
196
+ }
197
+
198
+ # Simulate login (as seen in Kotlin)
199
+ login_url = f"{self.main_url}/wp-login.php"
200
+ login_data = {
201
+ "log": "keyiflerolsun",
202
+ "pwd": "12345",
203
+ "rememberme": "forever",
204
+ "redirect_to": self.main_url
205
+ }
206
+
207
+ await self.httpx.post(login_url, headers=headers, data=login_data)
208
+
209
+ istek = await self.httpx.get(url, headers=headers)
210
+ helper = HTMLHelper(istek.text)
211
+
212
+ iframes = []
213
+
214
+ main_iframe = helper.select_attr("iframe[src]", "src")
215
+ if main_iframe:
216
+ iframes.append(main_iframe)
217
+
218
+ sources = helper.select("div.sources a")
219
+ for source in sources:
220
+ href = source.attrs.get("href")
221
+ if href:
222
+ sub_istek = await self.httpx.get(href, headers=headers)
223
+ sub_helper = HTMLHelper(sub_istek.text)
224
+ sub_iframe = sub_helper.select_attr("div.video p iframe", "src")
225
+ if sub_iframe:
226
+ iframes.append(sub_iframe)
227
+
228
+ results = []
229
+ for iframe_url in iframes:
230
+ # Check for known extractors
231
+ if iframe_url.startswith("//"):
232
+ iframe_url = f"https:{iframe_url}"
233
+
234
+ extract_result = await self.extract(iframe_url)
235
+ if extract_result:
236
+ if isinstance(extract_result, list):
237
+ results.extend(extract_result)
238
+ else:
239
+ results.append(extract_result)
240
+ else:
241
+ results.append(ExtractResult(
242
+ url = iframe_url,
243
+ name = f"{self.name} | External",
244
+ referer = self.main_url
245
+ ))
246
+
247
+ return results
@@ -0,0 +1,140 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, ExtractResult, HTMLHelper
4
+ from json import dumps, loads
5
+ import re
6
+
7
+ class FilmEkseni(PluginBase):
8
+ name = "FilmEkseni"
9
+ language = "tr"
10
+ main_url = "https://filmekseni.cc"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Film Ekseni ⚡️ Vizyonda ki, en güncel ve en yeni filmleri full hd kalitesinde türkçe dublaj ve altyazı seçenekleriyle 1080p olarak izleyebileceğiniz adresiniz."
13
+
14
+ main_page = {
15
+ f"{main_url}/tur/aile-filmleri/page" : "Aile Filmleri",
16
+ f"{main_url}/tur/aksiyon-filmleri/page" : "Aksiyon Filmleri",
17
+ f"{main_url}/tur/animasyon-film-izle/page" : "Animasyon Filmleri",
18
+ f"{main_url}/tur/bilim-kurgu-filmleri/page" : "Bilim Kurgu Filmleri",
19
+ f"{main_url}/tur/biyografi-filmleri/page" : "Biyografi Filmleri",
20
+ f"{main_url}/tur/dram-filmleri-izle/page" : "Dram Filmleri",
21
+ f"{main_url}/tur/fantastik-filmler/page" : "Fantastik Filmleri",
22
+ f"{main_url}/tur/gerilim-filmleri/page" : "Gerilim Filmleri",
23
+ f"{main_url}/tur/gizem-filmleri/page" : "Gizem Filmleri",
24
+ f"{main_url}/tur/komedi-filmleri/page" : "Komedi Filmleri",
25
+ f"{main_url}/tur/korku-filmleri/page" : "Korku Filmleri",
26
+ f"{main_url}/tur/macera-filmleri/page" : "Macera Filmleri",
27
+ f"{main_url}/tur/romantik-filmler/page" : "Romantik Filmleri",
28
+ f"{main_url}/tur/savas-filmleri/page" : "Savaş Filmleri",
29
+ f"{main_url}/tur/suc-filmleri/page" : "Suç Filmleri",
30
+ f"{main_url}/tur/tarih-filmleri/page" : "Tarih Filmleri",
31
+ }
32
+
33
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
34
+ istek = await self.httpx.get(f"{url}/{page}/")
35
+ helper = HTMLHelper(istek.text)
36
+ posters = helper.select("div.poster")
37
+
38
+ return [
39
+ MainPageResult(
40
+ category = category,
41
+ title = self.clean_title(helper.select_text("h2", veri)),
42
+ url = helper.select_attr("a", "href", veri),
43
+ poster = helper.select_attr("img", "data-src", veri)
44
+ )
45
+ for veri in posters
46
+ ]
47
+
48
+ async def search(self, query: str) -> list[SearchResult]:
49
+ url = f"{self.main_url}/search/"
50
+ headers = {
51
+ "X-Requested-With" : "XMLHttpRequest",
52
+ "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
53
+ "Referer" : self.main_url,
54
+ }
55
+ data = {"query": query}
56
+
57
+ istek = await self.httpx.post(url, headers=headers, data=data)
58
+ veriler = istek.json().get("result", [])
59
+
60
+ return [
61
+ SearchResult(
62
+ title = veri.get("title"),
63
+ url = f"{self.main_url}/{veri.get('slug')}",
64
+ poster = f"{self.main_url}/uploads/poster/{veri.get('cover')}" if veri.get('cover') else None,
65
+ )
66
+ for veri in veriler
67
+ ]
68
+
69
+ async def load_item(self, url: str) -> MovieInfo:
70
+ istek = await self.httpx.get(url)
71
+ helper = HTMLHelper(istek.text)
72
+
73
+ raw_title = helper.select_text("div.page-title h1")
74
+ title = raw_title.replace(" izle", "").strip() if raw_title else "Bilinmiyor"
75
+
76
+ poster = helper.select_attr("picture.poster-auto > source:nth-child(2)", "data-srcset")
77
+ description = helper.select_text("article.text-white")
78
+ year = helper.select_text("strong a")
79
+
80
+ tags_raw = helper.select_all_text("div.pb-2")
81
+ tags = []
82
+ for tag_str in tags_raw:
83
+ if tag_str.startswith("Tür:"):
84
+ tags.extend([t.strip() for t in tag_str.replace("Tür:", "").split(",")])
85
+
86
+ rating = helper.select_text("div.rate")
87
+
88
+ duration = None
89
+ duration_text = helper.select_text("div.d-flex.flex-column.text-nowrap")
90
+ if duration_text:
91
+ m = re.search(r"(\d+)", duration_text)
92
+ if m:
93
+ duration = int(m.group(1))
94
+
95
+ actors_raw = helper.select("div.card-body.p-0.pt-2 .story-item")
96
+ actors = []
97
+ for actor in actors_raw:
98
+ name = helper.select_text(".story-item-title", actor)
99
+ if name:
100
+ actors.append(name)
101
+
102
+ return MovieInfo(
103
+ url = url,
104
+ poster = self.fix_url(poster),
105
+ title = title,
106
+ description = description,
107
+ tags = tags,
108
+ rating = rating,
109
+ year = year,
110
+ actors = actors if actors else None,
111
+ duration = duration
112
+ )
113
+
114
+ async def load_links(self, url: str) -> list[ExtractResult]:
115
+ istek = await self.httpx.get(url)
116
+ helper = HTMLHelper(istek.text)
117
+
118
+ iframe = helper.select_first("div.card-video iframe")
119
+ if not iframe:
120
+ return []
121
+
122
+ iframe_url = iframe.attrs.get("data-src") or iframe.attrs.get("src")
123
+ if not iframe_url:
124
+ return []
125
+
126
+ if iframe_url.startswith("//"):
127
+ iframe_url = f"https:{iframe_url}"
128
+
129
+ video_id = iframe_url.split("/")[-1]
130
+ master_url = f"https://eksenload.site/uploads/encode/{video_id}/master.m3u8"
131
+
132
+ results = [
133
+ ExtractResult(
134
+ url = master_url,
135
+ name = f"{self.name} | 1080p",
136
+ referer = self.main_url
137
+ )
138
+ ]
139
+
140
+ return results