KekikStream 2.3.8__py3-none-any.whl → 2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of KekikStream might be problematic. Click here for more details.

@@ -113,8 +113,8 @@ class HTMLHelper:
113
113
  return int(m.group(1)), int(m.group(2))
114
114
 
115
115
  # Ayrı ayrı ara
116
- s = re.search(r"(\d+)\.\s*[Ss]ezon|[Ss]ezon[- ]?(\d+)|-(\d+)-sezon", text, re.I)
117
- e = re.search(r"(\d+)\.\s*[Bb]ölüm|[Bb]olum[- ]?(\d+)|-(\d+)-bolum|[Ee](\d+)", text, re.I)
116
+ s = re.search(r"(\d+)\.\s*[Ss]ezon|[Ss]ezon[- ]?(\d+)|-(\d+)-sezon|S(\d+)|(\d+)\.[Ss]", text, re.I)
117
+ e = re.search(r"(\d+)\.\s*[Bb][öo]l[üu]m|[Bb][öo]l[üu]m[- ]?(\d+)|-(\d+)-bolum|[Ee](\d+)", text, re.I)
118
118
 
119
119
  # İlk bulunan grubu al (None değilse)
120
120
  s_val = next((int(g) for g in s.groups() if g), None) if s else None
@@ -51,9 +51,6 @@ class Episode(BaseModel):
51
51
  if not self.title:
52
52
  self.title = ""
53
53
 
54
- if any(keyword in self.title.lower() for keyword in ["bölüm", "sezon", "episode"]):
55
- self.title = ""
56
-
57
54
  return self
58
55
 
59
56
  class SeriesInfo(BaseModel):
@@ -0,0 +1,62 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, Subtitle, HTMLHelper
4
+ from Kekik.Sifreleme import AESManager
5
+ import re
6
+ import json
7
+
8
+ class HDMomPlayer(ExtractorBase):
9
+ name = "HDMomPlayer"
10
+ main_url = "hdmomplayer.com"
11
+
12
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
13
+ if referer:
14
+ self.httpx.headers.update({"Referer": referer})
15
+
16
+ try:
17
+ response = await self.httpx.get(url)
18
+ page_source = response.text
19
+
20
+ m3u_link = None
21
+
22
+ # Regex for bePlayer matches
23
+ # Matches: bePlayer('PASS', '{DATA}');
24
+ helper = HTMLHelper(page_source)
25
+ be_matches = helper.regex_all(r"bePlayer\('([^']+)',\s*'(\{[^\}]+\})'\);")
26
+
27
+ if be_matches:
28
+ pass_val, data_val = be_matches[0]
29
+
30
+ try:
31
+ # Use Kekik.Sifreleme.AESManager as requested
32
+ decrypted = AESManager.decrypt(data_val, pass_val).replace("\\", "")
33
+
34
+ # Search for video_location in decrypted string
35
+ # Kotlin: video_location":"([^"]+)
36
+ m_loc = re.search(r'video_location":"([^"]+)"', decrypted)
37
+ if m_loc:
38
+ m3u_link = m_loc.group(1).replace(r"\/", "/")
39
+ except Exception:
40
+ pass
41
+
42
+ if not m3u_link:
43
+ # Fallback regex
44
+ # file:"..."
45
+ m_file = re.search(r'file:"([^"]+)"', page_source)
46
+ if m_file:
47
+ m3u_link = m_file.group(1)
48
+
49
+ if not m3u_link:
50
+ return None
51
+
52
+ # Fix URL if needed
53
+ if m3u_link.startswith("//"):
54
+ m3u_link = "https:" + m3u_link
55
+
56
+ return ExtractResult(
57
+ name = "HDMomPlayer",
58
+ url = m3u_link,
59
+ referer = url,
60
+ )
61
+ except Exception:
62
+ return None
@@ -19,54 +19,13 @@ class MolyStream(ExtractorBase):
19
19
  return any(domain in url for domain in self.supported_domains)
20
20
 
21
21
  async def extract(self, url, referer=None) -> ExtractResult:
22
- if "doctype html" in url.lower():
23
- text = url
22
+ if "doctype html" in url:
23
+ secici = HTMLHelper(url)
24
+ video = secici.select_attr("video#sheplayer source", "src")
24
25
  else:
25
- # Sheila-style referer fix
26
- if "/embed/sheila/" in url:
27
- referer = url.replace("/embed/sheila/", "/embed/")
26
+ video = url
28
27
 
29
- self.httpx.headers.update({
30
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
31
- "Referer" : referer or self.main_url
32
- })
33
- istek = await self.httpx.get(url, follow_redirects=True)
34
- text = istek.text
35
-
36
- # 1. Sheila-style links often have the m3u8 directly as the first http line or in script
37
- m3u8 = None
38
- if "#EXTM3U" in text:
39
- for line in text.splitlines():
40
- line = line.strip().replace('"', '').replace("'", "")
41
- if line.startswith("http"):
42
- m3u8 = line
43
- break
44
-
45
- if not m3u8:
46
- for line in text.splitlines():
47
- line = line.strip().replace('"', '').replace("'", "")
48
- if line.startswith("http") and ".m3u8" in line:
49
- m3u8 = line
50
- break
51
-
52
- if not m3u8:
53
- secici = HTMLHelper(text)
54
- # 2. Try video tag
55
- m3u8 = secici.select_attr("video#sheplayer source", "src") or secici.select_attr("video source", "src")
56
-
57
- if not m3u8:
58
- # 3. Try regex
59
- m3u8 = HTMLHelper(text).regex_first(r'["\'](https?://[^"\']+\.m3u8[^"\']*)["\']')
60
-
61
- if not m3u8:
62
- # Fallback to any http link in a script if it looks like a video link
63
- m3u8 = HTMLHelper(text).regex_first(r'["\'](https?://[^"\']+/q/\d+)["\']')
64
-
65
- if not m3u8:
66
- m3u8 = url # Final fallback
67
-
68
- # Subtitles (Sheila style addSrtFile)
69
- resp_sec = HTMLHelper(text)
28
+ resp_sec = HTMLHelper(url)
70
29
  matches = resp_sec.regex_all(r"addSrtFile\(['\"]([^'\"]+\.srt)['\"]\s*,\s*['\"][a-z]{2}['\"]\s*,\s*['\"]([^'\"]+)['\"]")
71
30
 
72
31
  subtitles = [
@@ -76,8 +35,8 @@ class MolyStream(ExtractorBase):
76
35
 
77
36
  return ExtractResult(
78
37
  name = self.name,
79
- url = m3u8,
80
- referer = url,
81
- user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
38
+ url = video,
39
+ referer = video.replace("/sheila", "") if video else None,
40
+ user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0",
82
41
  subtitles = subtitles
83
42
  )
@@ -0,0 +1,115 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import ExtractorBase, ExtractResult, HTMLHelper, Subtitle
4
+ from urllib.parse import quote
5
+ from json import loads
6
+
7
+ class Videostr(ExtractorBase):
8
+ name = "Videostr"
9
+ main_url = "videostr.net"
10
+
11
+ async def extract(self, url: str, referer: str = None) -> ExtractResult | None:
12
+ headers = {
13
+ "Accept": "*/*",
14
+ "X-Requested-With": "XMLHttpRequest",
15
+ "Referer": "https://videostr.net",
16
+ }
17
+
18
+ # 1. Get nonce page
19
+ # Kotlin: url.substringAfterLast("/").substringBefore("?")
20
+ id = url.split("?")[0].split("/")[-1]
21
+
22
+ istek = await self.httpx.get(url, headers=headers)
23
+ if istek.status_code != 200:
24
+ return None
25
+
26
+ responsenonce = istek.text
27
+
28
+ # Find nonce
29
+ # Regex: \b[a-zA-Z0-9]{48}\b
30
+ # Or 3 blocks of 16 chars
31
+ helper = HTMLHelper(responsenonce)
32
+ nonce = helper.regex_first(r"\b[a-zA-Z0-9]{48}\b")
33
+ if not nonce:
34
+ # Fallback regex as per Kotlin: \b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b
35
+ # In Python we can just look for the combined matches if regex_first handles grouping poorly or try finding all
36
+ import re
37
+ m = re.search(r"\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b.*?\b([a-zA-Z0-9]{16})\b", responsenonce, re.DOTALL)
38
+ if m:
39
+ nonce = m.group(1) + m.group(2) + m.group(3)
40
+
41
+ if not nonce:
42
+ return None
43
+
44
+ # 2. Get Sources
45
+ api_url = f"https://videostr.net/embed-1/v3/e-1/getSources?id={id}&_k={nonce}"
46
+
47
+ api_resp = await self.httpx.get(api_url, headers=headers)
48
+ if api_resp.status_code != 200:
49
+ return None
50
+
51
+ # Parse JSON
52
+ try:
53
+ data = api_resp.json()
54
+ except:
55
+ return None
56
+
57
+ sources = data.get("sources", [])
58
+ if not sources:
59
+ return None
60
+
61
+ encrypted_file = sources[0].get("file")
62
+ if not encrypted_file:
63
+ return None
64
+
65
+ m3u8_url = None
66
+
67
+ if ".m3u8" in encrypted_file:
68
+ m3u8_url = encrypted_file
69
+ else:
70
+ # Decrypt
71
+ # Need key from github
72
+ key_url = "https://raw.githubusercontent.com/yogesh-hacker/MegacloudKeys/refs/heads/main/keys.json"
73
+ key_resp = await self.httpx.get(key_url)
74
+ if key_resp.status_code == 200:
75
+ try:
76
+ keys = key_resp.json()
77
+ vidstr_key = keys.get("vidstr") # As per Kotlin code: gson.fromJson(keyJson, Megakey::class.java)?.vidstr
78
+
79
+ if vidstr_key:
80
+ # Use Google Script to decrypt
81
+ decode_url = "https://script.google.com/macros/s/AKfycbxHbYHbrGMXYD2-bC-C43D3njIbU-wGiYQuJL61H4vyy6YVXkybMNNEPJNPPuZrD1gRVA/exec"
82
+
83
+ full_url = f"{decode_url}?encrypted_data={quote(encrypted_file)}&nonce={quote(nonce)}&secret={quote(vidstr_key)}"
84
+
85
+ decrypted_resp = await self.httpx.get(full_url)
86
+ if decrypted_resp.status_code == 200:
87
+ # Response is JSON {"file": "..."} usually or text?
88
+ # Kotlin says: Regex("\"file\":\"(.*?)\"").find(decryptedResponse)
89
+ m_file = re.search(r'"file":"(.*?)"', decrypted_resp.text)
90
+ if m_file:
91
+ m3u8_url = m_file.group(1).replace(r"\/", "/")
92
+ except Exception as e:
93
+ # print(f"Decryption error: {e}")
94
+ pass
95
+
96
+ if not m3u8_url:
97
+ return None
98
+
99
+ # Subtitles
100
+ # Kotlin: response.tracks
101
+ subtitles = []
102
+ tracks = data.get("tracks", [])
103
+ for t in tracks:
104
+ if t.get("kind") in ["captions", "subtitles"]:
105
+ subtitles.append(Subtitle(
106
+ name = t.get("label", "Altyazı"),
107
+ url = t.get("file")
108
+ ))
109
+
110
+ return ExtractResult(
111
+ name = "Videostr",
112
+ url = m3u8_url,
113
+ referer = "https://videostr.net/",
114
+ subtitles= subtitles
115
+ )
@@ -167,44 +167,34 @@ class DiziBox(PluginBase):
167
167
 
168
168
  crypt_data = iframe_secici.regex_first(r"CryptoJS\.AES\.decrypt\(\"(.*)\",\"", iframe_istek.text)
169
169
  crypt_pass = iframe_secici.regex_first(r"\",\"(.*)\"\);", iframe_istek.text)
170
- if crypt_data and crypt_pass:
171
- decode = CryptoJS.decrypt(crypt_pass, crypt_data)
172
- if video_match := iframe_secici.regex_first(r"file: '(.*)',", decode):
173
- results.append(video_match)
174
- else:
175
- results.append(decode)
170
+ decode = CryptoJS.decrypt(crypt_pass, crypt_data)
171
+
172
+ if video_match := iframe_secici.regex_first(r"file: '(.*)',", decode):
173
+ results.append(video_match)
174
+ else:
175
+ results.append(decode)
176
176
 
177
177
  elif "/player/moly/moly.php" in iframe_link:
178
178
  iframe_link = iframe_link.replace("moly.php?h=", "moly.php?wmode=opaque&h=")
179
- for _ in range(3): # Max 3 attempts
179
+ while True:
180
180
  await asyncio.sleep(.3)
181
181
  with contextlib.suppress(Exception):
182
182
  moly_istek = await self.httpx.get(iframe_link)
183
183
  moly_secici = HTMLHelper(moly_istek.text)
184
184
 
185
- if atob_data := moly_secici.regex_first(r"unescape\(\"(.*)\"\)"):
185
+ if atob_data := moly_secici.regex_first(r"unescape\(\"(.*)\"\)", moly_istek.text):
186
186
  decoded_atob = urllib.parse.unquote(atob_data)
187
187
  str_atob = base64.b64decode(decoded_atob).decode("utf-8")
188
188
 
189
- iframe_src = HTMLHelper(str_atob).select_attr("div#Player iframe", "src")
190
- if iframe_src:
191
- # ! Sheila replacement (Kotlin referansı)
192
- if "/embed/" in iframe_src:
193
- iframe_src = iframe_src.replace("/embed/", "/embed/sheila/").replace("vidmoly.me", "vidmoly.net")
194
-
195
- results.append(iframe_src)
196
- break
197
- elif embed_matches := moly_secici.regex_all(r'iframe.*?src="(.*?)"'):
198
- for src in embed_matches:
199
- if "/embed/" in src:
200
- src = src.replace("/embed/", "/embed/sheila/").replace("vidmoly.me", "vidmoly.net")
201
- results.append(src)
202
- break
189
+ iframe_src = HTMLHelper(str_atob).select_attr("div#Player iframe", "src")
190
+ if iframe_src:
191
+ results.append(iframe_src)
192
+
193
+ break
203
194
 
204
195
  elif "/player/haydi.php" in iframe_link:
205
- with contextlib.suppress(Exception):
206
- okru_url = base64.b64decode(iframe_link.split("?v=")[-1]).decode("utf-8")
207
- results.append(okru_url)
196
+ okru_url = base64.b64decode(iframe_link.split("?v=")[-1]).decode("utf-8")
197
+ results.append(okru_url)
208
198
 
209
199
  return results
210
200
 
@@ -0,0 +1,248 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, SeriesInfo, Episode, ExtractResult, HTMLHelper
4
+ from json import dumps, loads
5
+ import re
6
+
7
+ class DiziMom(PluginBase):
8
+ name = "DiziMom"
9
+ language = "tr"
10
+ main_url = "https://www.dizimom.one"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Binlerce yerli yabancı dizi arşivi, tüm sezonlar, kesintisiz bölümler. Sadece dizi izle, Dizimom heryerde seninle!"
13
+
14
+ main_page = {
15
+ f"{main_url}/tum-bolumler/page" : "Son Bölümler",
16
+ f"{main_url}/yerli-dizi-izle/page" : "Yerli Diziler",
17
+ f"{main_url}/yabanci-dizi-izle/page" : "Yabancı Diziler",
18
+ f"{main_url}/tv-programlari-izle/page" : "TV Programları",
19
+ }
20
+
21
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
22
+ istek = await self.httpx.get(f"{url}/{page}/")
23
+ helper = HTMLHelper(istek.text)
24
+
25
+ if "/tum-bolumler/" in url:
26
+ items = helper.select("div.episode-box")
27
+ results = []
28
+ for item in items:
29
+ name_el = helper.select_first("div.episode-name a", item)
30
+ if not name_el: continue
31
+ name = name_el.text(strip=True).split(" izle")[0]
32
+ title = name.replace(".Sezon ", "x").replace(".Bölüm", "")
33
+
34
+ ep_href = self.fix_url(name_el.attrs.get("href"))
35
+ pass
36
+
37
+ # Revert to standard categories if "tum-bolumler" is complex
38
+ return []
39
+ else:
40
+ items = helper.select("div.single-item")
41
+ return [
42
+ MainPageResult(
43
+ category = category,
44
+ title = helper.select_text("div.categorytitle a", item).split(" izle")[0],
45
+ url = self.fix_url(helper.select_attr("div.categorytitle a", "href", item)),
46
+ poster = self.fix_url(helper.select_attr("div.cat-img img", "src", item))
47
+ )
48
+ for item in items
49
+ ]
50
+
51
+ async def search(self, query: str) -> list[SearchResult]:
52
+ url = f"{self.main_url}/?s={query}"
53
+ istek = await self.httpx.get(url)
54
+ helper = HTMLHelper(istek.text)
55
+ items = helper.select("div.single-item")
56
+
57
+ return [
58
+ SearchResult(
59
+ title = helper.select_text("div.categorytitle a", item).split(" izle")[0],
60
+ url = self.fix_url(helper.select_attr("div.categorytitle a", "href", item)),
61
+ poster = self.fix_url(helper.select_attr("div.cat-img img", "src", item))
62
+ )
63
+ for item in items
64
+ ]
65
+
66
+ async def load_item(self, url: str) -> SeriesInfo:
67
+ istek = await self.httpx.get(url)
68
+ helper = HTMLHelper(istek.text)
69
+
70
+ title_raw = helper.select_text("div.title h1")
71
+ title = title_raw.split(" izle")[0] if title_raw else "Bilinmiyor"
72
+
73
+ poster = self.fix_url(helper.select_attr("div.category_image img", "src"))
74
+
75
+ # Custom extraction for fields that were using xpath_text
76
+ year = None
77
+ rating = None
78
+ actors = None
79
+
80
+ # Regex approach for specific fields might be safer/easier if structure varies
81
+ # Matches: Yapım Yılı : </span> 2025
82
+ year_val = helper.regex_first(r"Yapım Yılı\s*:\s*(?:</span>)?\s*(\d{4})")
83
+ if year_val:
84
+ year = int(year_val)
85
+
86
+ rating_val = helper.regex_first(r"IMDB\s*:\s*([\d\.]+)")
87
+ if rating_val:
88
+ rating = rating_val
89
+
90
+ actors_val = helper.regex_first(r"Oyuncular\s*:\s*(.+?)(?:</div>|<br|$)")
91
+ if not actors_val:
92
+ # Try selecting the div text directly if regex fails due to HTML tags
93
+ # Find div containing "Oyuncular :"
94
+ all_divs = helper.select("div")
95
+ for div in all_divs:
96
+ txt = div.text()
97
+ if "Oyuncular :" in txt:
98
+ actors_val = txt.split("Oyuncular :")[1].strip()
99
+ break
100
+
101
+ if actors_val:
102
+ # Remove footer / junk from actors
103
+ if "IMDB :" in actors_val:
104
+ actors_val = actors_val.split("IMDB :")[0].strip()
105
+
106
+ if "IMDB :" in actors_val:
107
+ actors_val = actors_val.split("IMDB :")[0].strip()
108
+
109
+ # Remove '×' and other junk if present at end
110
+ if "×" in actors_val:
111
+ actors_val = actors_val.split("×")[0].strip()
112
+
113
+ # Remove simple tags if any remaining
114
+ clean_actors = [a.strip() for a in actors_val.split(",")]
115
+ # Filter empty
116
+ clean_actors = [a for a in clean_actors if a]
117
+
118
+ actors = ", ".join(clean_actors)
119
+
120
+ description_raw = helper.select_text("div.category_desc")
121
+ description = None
122
+ if description_raw:
123
+ # Clean header "The Librarians izle..." etc. if present, usually it is fine.
124
+ # Clean "IMDB :" if attached
125
+ if "IMDB :" in description_raw:
126
+ description_raw = description_raw.split("IMDB :")[0].strip()
127
+
128
+ # Clean footer text start
129
+ # The footer block usually starts with "Dizimom, dizi ve film..."
130
+ # If we find "Dizimom," and it's not at the start (meaning it's part of the footer appended), split there.
131
+ # Note: The description might legitimately start with "Dizimom," strictly speaking, but unlikely for a series description.
132
+ if "Dizimom," in description_raw:
133
+ description = description_raw.split("Dizimom,")[0].strip()
134
+ elif "dizi izle film izle" in description_raw:
135
+ description = description_raw.split("dizi izle film izle")[0].strip()
136
+ else:
137
+ description = description_raw
138
+
139
+ # Fallback cleanup for JSON
140
+ if description and "{" in description:
141
+ description = description.split("{")[0].strip()
142
+
143
+ tags = helper.select_all_text("div.genres a")
144
+
145
+ # Improved year regex
146
+ if not year:
147
+ # Look for "Yapım Yılı : 2014" pattern in ANY text
148
+ # Get all text from category_text which usually contains it
149
+ meta_text = helper.select_text("div.category_text")
150
+ if meta_text:
151
+ match = re.search(r"Yapım Yılı\s*:\s*(\d{4})", meta_text)
152
+ if match:
153
+ year = int(match.group(1))
154
+
155
+ episodes = []
156
+ ep_items = helper.select("div.bolumust")
157
+ for item in ep_items:
158
+ ep_name_raw = helper.select_text("div.baslik", item)
159
+ ep_href = self.fix_url(helper.select_attr("a", "href", item))
160
+
161
+ if ep_name_raw:
162
+ # 1.Sezon 1.Bölüm
163
+ s_m = re.search(r"(\d+)\.Sezon", ep_name_raw)
164
+ e_m = re.search(r"(\d+)\.Bölüm", ep_name_raw)
165
+
166
+ season = int(s_m.group(1)) if s_m else 1
167
+ episode = int(e_m.group(1)) if e_m else 1
168
+
169
+ name = ep_name_raw.split(" izle")[0].replace(title, "").strip()
170
+
171
+ episodes.append(Episode(
172
+ season = season,
173
+ episode = episode,
174
+ title = name,
175
+ url = ep_href
176
+ ))
177
+
178
+ return SeriesInfo(
179
+ url = url,
180
+ poster = poster,
181
+ title = title,
182
+ description = description,
183
+ tags = tags,
184
+ rating = rating,
185
+ year = str(year) if year else None,
186
+ actors = actors,
187
+ episodes = episodes
188
+ )
189
+
190
+ async def load_links(self, url: str) -> list[ExtractResult]:
191
+ # Login simulation headers
192
+ headers = {
193
+ "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
194
+ "sec-ch-ua": 'Not/A)Brand";v="8", "Chromium";v="137", "Google Chrome";v="137"',
195
+ "sec-ch-ua-mobile": "?1",
196
+ "sec-ch-ua-platform": "Android"
197
+ }
198
+
199
+ # Simulate login (as seen in Kotlin)
200
+ login_url = f"{self.main_url}/wp-login.php"
201
+ login_data = {
202
+ "log": "keyiflerolsun",
203
+ "pwd": "12345",
204
+ "rememberme": "forever",
205
+ "redirect_to": self.main_url
206
+ }
207
+
208
+ await self.httpx.post(login_url, headers=headers, data=login_data)
209
+
210
+ istek = await self.httpx.get(url, headers=headers)
211
+ helper = HTMLHelper(istek.text)
212
+
213
+ iframes = []
214
+
215
+ main_iframe = helper.select_attr("iframe[src]", "src")
216
+ if main_iframe:
217
+ iframes.append(main_iframe)
218
+
219
+ sources = helper.select("div.sources a")
220
+ for source in sources:
221
+ href = source.attrs.get("href")
222
+ if href:
223
+ sub_istek = await self.httpx.get(href, headers=headers)
224
+ sub_helper = HTMLHelper(sub_istek.text)
225
+ sub_iframe = sub_helper.select_attr("div.video p iframe", "src")
226
+ if sub_iframe:
227
+ iframes.append(sub_iframe)
228
+
229
+ results = []
230
+ for iframe_url in iframes:
231
+ # Check for known extractors
232
+ if iframe_url.startswith("//"):
233
+ iframe_url = f"https:{iframe_url}"
234
+
235
+ extract_result = await self.extract(iframe_url)
236
+ if extract_result:
237
+ if isinstance(extract_result, list):
238
+ results.extend(extract_result)
239
+ else:
240
+ results.append(extract_result)
241
+ else:
242
+ results.append(ExtractResult(
243
+ url = iframe_url,
244
+ name = f"{self.name} | External",
245
+ referer = self.main_url
246
+ ))
247
+
248
+ return results