KekikStream 2.2.8__py3-none-any.whl → 2.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. KekikStream/Core/HTMLHelper.py +134 -0
  2. KekikStream/Core/Plugin/PluginBase.py +22 -4
  3. KekikStream/Core/Plugin/PluginLoader.py +3 -2
  4. KekikStream/Core/Plugin/PluginManager.py +2 -2
  5. KekikStream/Core/__init__.py +2 -0
  6. KekikStream/Extractors/CloseLoad.py +12 -13
  7. KekikStream/Extractors/ContentX.py +33 -31
  8. KekikStream/Extractors/DonilasPlay.py +10 -10
  9. KekikStream/Extractors/DzenRu.py +3 -3
  10. KekikStream/Extractors/ExPlay.py +10 -10
  11. KekikStream/Extractors/Filemoon.py +47 -37
  12. KekikStream/Extractors/JetTv.py +4 -4
  13. KekikStream/Extractors/MixPlayHD.py +10 -11
  14. KekikStream/Extractors/MolyStream.py +16 -9
  15. KekikStream/Extractors/Odnoklassniki.py +4 -4
  16. KekikStream/Extractors/PeaceMakerst.py +3 -3
  17. KekikStream/Extractors/PixelDrain.py +6 -5
  18. KekikStream/Extractors/PlayerFilmIzle.py +6 -10
  19. KekikStream/Extractors/RapidVid.py +8 -7
  20. KekikStream/Extractors/SetPlay.py +10 -10
  21. KekikStream/Extractors/SetPrime.py +3 -6
  22. KekikStream/Extractors/SibNet.py +4 -5
  23. KekikStream/Extractors/Sobreatsesuyp.py +5 -5
  24. KekikStream/Extractors/TRsTX.py +5 -5
  25. KekikStream/Extractors/TurboImgz.py +3 -4
  26. KekikStream/Extractors/TurkeyPlayer.py +5 -5
  27. KekikStream/Extractors/VidHide.py +4 -7
  28. KekikStream/Extractors/VidMoly.py +37 -25
  29. KekikStream/Extractors/VidMoxy.py +8 -9
  30. KekikStream/Extractors/VidPapi.py +5 -7
  31. KekikStream/Extractors/VideoSeyred.py +3 -3
  32. KekikStream/Plugins/BelgeselX.py +40 -51
  33. KekikStream/Plugins/DiziBox.py +53 -81
  34. KekikStream/Plugins/DiziPal.py +50 -72
  35. KekikStream/Plugins/DiziYou.py +96 -83
  36. KekikStream/Plugins/Dizilla.py +101 -86
  37. KekikStream/Plugins/FilmBip.py +29 -50
  38. KekikStream/Plugins/FilmMakinesi.py +84 -46
  39. KekikStream/Plugins/FilmModu.py +27 -41
  40. KekikStream/Plugins/FullHDFilm.py +57 -62
  41. KekikStream/Plugins/FullHDFilmizlesene.py +32 -57
  42. KekikStream/Plugins/HDFilmCehennemi.py +51 -65
  43. KekikStream/Plugins/JetFilmizle.py +38 -51
  44. KekikStream/Plugins/KultFilmler.py +43 -67
  45. KekikStream/Plugins/RecTV.py +34 -9
  46. KekikStream/Plugins/RoketDizi.py +89 -111
  47. KekikStream/Plugins/SelcukFlix.py +102 -93
  48. KekikStream/Plugins/SetFilmIzle.py +110 -117
  49. KekikStream/Plugins/SezonlukDizi.py +88 -106
  50. KekikStream/Plugins/Sinefy.py +70 -70
  51. KekikStream/Plugins/SinemaCX.py +31 -55
  52. KekikStream/Plugins/Sinezy.py +27 -54
  53. KekikStream/Plugins/SuperFilmGeldi.py +25 -44
  54. KekikStream/Plugins/UgurFilm.py +23 -48
  55. KekikStream/Plugins/YabanciDizi.py +285 -0
  56. {kekikstream-2.2.8.dist-info → kekikstream-2.3.9.dist-info}/METADATA +1 -1
  57. kekikstream-2.3.9.dist-info/RECORD +84 -0
  58. kekikstream-2.2.8.dist-info/RECORD +0 -82
  59. {kekikstream-2.2.8.dist-info → kekikstream-2.3.9.dist-info}/WHEEL +0 -0
  60. {kekikstream-2.2.8.dist-info → kekikstream-2.3.9.dist-info}/entry_points.txt +0 -0
  61. {kekikstream-2.2.8.dist-info → kekikstream-2.3.9.dist-info}/licenses/LICENSE +0 -0
  62. {kekikstream-2.2.8.dist-info → kekikstream-2.3.9.dist-info}/top_level.txt +0 -0
@@ -1,8 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult
4
- from selectolax.parser import HTMLParser
5
- import re, base64
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult, HTMLHelper
4
+ import base64
6
5
 
7
6
  class Sinezy(PluginBase):
8
7
  name = "Sinezy"
@@ -45,16 +44,13 @@ class Sinezy(PluginBase):
45
44
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
46
45
  full_url = f"{url}page/{page}/"
47
46
  resp = await self.httpx.get(full_url)
48
- sel = HTMLParser(resp.text)
47
+ secici = HTMLHelper(resp.text)
49
48
 
50
49
  results = []
51
- for item in sel.css("div.container div.content div.movie_box.move_k"):
52
- link_el = item.css_first("a")
53
- img_el = item.css_first("img")
54
-
55
- title = link_el.attrs.get("title") if link_el else None
56
- href = link_el.attrs.get("href") if link_el else None
57
- poster = img_el.attrs.get("data-src") if img_el else None
50
+ for item in secici.select("div.container div.content div.movie_box.move_k"):
51
+ title = secici.select_attr("a", "title", item)
52
+ href = secici.select_attr("a", "href", item)
53
+ poster = secici.select_attr("img", "data-src", item)
58
54
 
59
55
  if title and href:
60
56
  results.append(MainPageResult(
@@ -69,16 +65,13 @@ class Sinezy(PluginBase):
69
65
  async def search(self, query: str) -> list[SearchResult]:
70
66
  url = f"{self.main_url}/arama/?s={query}"
71
67
  resp = await self.httpx.get(url)
72
- sel = HTMLParser(resp.text)
68
+ secici = HTMLHelper(resp.text)
73
69
 
74
70
  results = []
75
- for item in sel.css("div.movie_box.move_k"):
76
- link_el = item.css_first("a")
77
- img_el = item.css_first("img")
78
-
79
- title = link_el.attrs.get("title") if link_el else None
80
- href = link_el.attrs.get("href") if link_el else None
81
- poster = img_el.attrs.get("data-src") if img_el else None
71
+ for item in secici.select("div.movie_box.move_k"):
72
+ title = secici.select_attr("a", "title", item)
73
+ href = secici.select_attr("a", "href", item)
74
+ poster = secici.select_attr("img", "data-src", item)
82
75
 
83
76
  if title and href:
84
77
  results.append(SearchResult(
@@ -91,38 +84,19 @@ class Sinezy(PluginBase):
91
84
 
92
85
  async def load_item(self, url: str) -> MovieInfo:
93
86
  resp = await self.httpx.get(url)
94
- sel = HTMLParser(resp.text)
95
-
96
- detail_el = sel.css_first("div.detail")
97
- title = detail_el.attrs.get("title") if detail_el else None
98
-
99
- poster_el = sel.css_first("div.move_k img")
100
- poster = poster_el.attrs.get("data-src") if poster_el else None
87
+ secici = HTMLHelper(resp.text)
101
88
 
102
- desc_el = sel.css_first("div.desc.yeniscroll p")
103
- description = desc_el.text(strip=True) if desc_el else None
89
+ title = secici.select_attr("div.detail", "title")
90
+ poster = secici.select_attr("div.move_k img", "data-src")
91
+ description = secici.select_text("div.desc.yeniscroll p")
92
+ rating = secici.select_text("span.info span.imdb")
104
93
 
105
- rating_el = sel.css_first("span.info span.imdb")
106
- rating = rating_el.text(strip=True) if rating_el else None
94
+ tags = secici.select_all_text("div.detail span a")
95
+ actors = secici.select_all_text("span.oyn p")
107
96
 
108
- tags = [a.text(strip=True) for a in sel.css("div.detail span a") if a.text(strip=True)]
109
- actors = [p.text(strip=True) for p in sel.css("span.oyn p") if p.text(strip=True)]
110
-
111
- year = None
112
- info_el = sel.css_first("span.info")
113
- info_text = info_el.text(strip=True) if info_el else ""
114
- if info_text:
115
- year_match = re.search(r'\b(19\d{2}|20\d{2})\b', info_text)
116
- if year_match:
117
- year = year_match.group(1)
118
-
119
- # Bulunamadıysa tüm sayfada ara
97
+ year = secici.regex_first(r"\b(19\d{2}|20\d{2})\b")
120
98
  if not year:
121
- all_text = sel.body.text() if sel.body else ""
122
- year_match = re.search(r'\b(19\d{2}|20\d{2})\b', all_text)
123
- if year_match:
124
- year = year_match.group(1)
125
-
99
+ year = secici.regex_first(r"\b(19\d{2}|20\d{2})\b", secici.html)
126
100
  return MovieInfo(
127
101
  title = title,
128
102
  url = url,
@@ -136,18 +110,17 @@ class Sinezy(PluginBase):
136
110
 
137
111
  async def load_links(self, url: str) -> list[ExtractResult]:
138
112
  resp = await self.httpx.get(url)
113
+ secici = HTMLHelper(resp.text)
139
114
 
140
- match = re.search(r"ilkpartkod\s*=\s*'([^']+)'", resp.text, re.IGNORECASE)
141
- if match:
142
- encoded = match.group(1)
115
+ encoded = secici.regex_first(r"ilkpartkod\s*=\s*'([^']+)'", secici.html)
116
+ if encoded:
143
117
  try:
144
118
  decoded = base64.b64decode(encoded).decode('utf-8')
145
- iframe_match = re.search(r'src="([^"]*)"', decoded)
119
+ decoded_sec = HTMLHelper(decoded)
120
+ iframe = decoded_sec.select_attr('iframe', 'src')
146
121
 
147
- if iframe_match:
148
- iframe = iframe_match.group(1)
122
+ if iframe:
149
123
  iframe = self.fix_url(iframe)
150
-
151
124
  data = await self.extract(iframe)
152
125
  if data:
153
126
  return [data]
@@ -1,8 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult
4
- from selectolax.parser import HTMLParser
5
- import re
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult, HTMLHelper
6
4
 
7
5
  class SuperFilmGeldi(PluginBase):
8
6
  name = "SuperFilmGeldi"
@@ -36,21 +34,16 @@ class SuperFilmGeldi(PluginBase):
36
34
 
37
35
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
38
36
  istek = await self.httpx.get(url.replace("SAYFA", str(page)))
39
- secici = HTMLParser(istek.text)
37
+ secici = HTMLHelper(istek.text)
40
38
 
41
39
  results = []
42
- for veri in secici.css("div.movie-preview-content"):
43
- link_el = veri.css_first("span.movie-title a")
44
- if not link_el:
45
- continue
46
-
47
- title_text = link_el.text(strip=True)
40
+ for veri in secici.select("div.movie-preview-content"):
41
+ title_text = secici.select_text("span.movie-title a", veri)
48
42
  if not title_text:
49
43
  continue
50
44
 
51
- img_el = veri.css_first("img")
52
- href = link_el.attrs.get("href")
53
- poster = img_el.attrs.get("src") if img_el else None
45
+ href = secici.select_attr("span.movie-title a", "href", veri)
46
+ poster = secici.select_attr("img", "src", veri)
54
47
 
55
48
  results.append(MainPageResult(
56
49
  category = category,
@@ -63,21 +56,16 @@ class SuperFilmGeldi(PluginBase):
63
56
 
64
57
  async def search(self, query: str) -> list[SearchResult]:
65
58
  istek = await self.httpx.get(f"{self.main_url}?s={query}")
66
- secici = HTMLParser(istek.text)
59
+ secici = HTMLHelper(istek.text)
67
60
 
68
61
  results = []
69
- for veri in secici.css("div.movie-preview-content"):
70
- link_el = veri.css_first("span.movie-title a")
71
- if not link_el:
72
- continue
73
-
74
- title_text = link_el.text(strip=True)
62
+ for veri in secici.select("div.movie-preview-content"):
63
+ title_text = secici.select_text("span.movie-title a", veri)
75
64
  if not title_text:
76
65
  continue
77
66
 
78
- img_el = veri.css_first("img")
79
- href = link_el.attrs.get("href")
80
- poster = img_el.attrs.get("src") if img_el else None
67
+ href = secici.select_attr("span.movie-title a", "href", veri)
68
+ poster = secici.select_attr("img", "src", veri)
81
69
 
82
70
  results.append(SearchResult(
83
71
  title = self.clean_title(title_text.split(" izle")[0]),
@@ -89,26 +77,19 @@ class SuperFilmGeldi(PluginBase):
89
77
 
90
78
  async def load_item(self, url: str) -> MovieInfo:
91
79
  istek = await self.httpx.get(url)
92
- secici = HTMLParser(istek.text)
80
+ secici = HTMLHelper(istek.text)
93
81
 
94
- title_el = secici.css_first("div.title h1")
95
- title = title_el.text(strip=True) if title_el else ""
96
- title = self.clean_title(title.split(" izle")[0]) if title else ""
82
+ title = secici.select_text("div.title h1") or ""
83
+ title = self.clean_title(title.split(" izle")[0]) if title else ""
97
84
 
98
- poster_el = secici.css_first("div.poster img")
99
- poster = poster_el.attrs.get("src") if poster_el else None
85
+ poster = secici.select_attr("div.poster img", "src")
100
86
 
101
- # year: re_first kullanılamaz, re.search kullanıyoruz
102
- year_el = secici.css_first("div.release a")
103
- year_text = year_el.text(strip=True) if year_el else ""
104
- year_match = re.search(r"(\d{4})", year_text)
105
- year = year_match.group(1) if year_match else None
87
+ year = secici.extract_year("div.release a")
106
88
 
107
- desc_el = secici.css_first("div.excerpt p")
108
- description = desc_el.text(strip=True) if desc_el else None
89
+ description = secici.select_text("div.excerpt p")
109
90
 
110
- tags = [a.text(strip=True) for a in secici.css("div.categories a") if a.text(strip=True)]
111
- actors = [a.text(strip=True) for a in secici.css("div.actor a") if a.text(strip=True)]
91
+ tags = secici.select_all_text("div.categories a")
92
+ actors = secici.select_all_text("div.actor a")
112
93
 
113
94
  return MovieInfo(
114
95
  url = url,
@@ -122,11 +103,10 @@ class SuperFilmGeldi(PluginBase):
122
103
 
123
104
  async def load_links(self, url: str) -> list[ExtractResult]:
124
105
  istek = await self.httpx.get(url)
125
- secici = HTMLParser(istek.text)
106
+ secici = HTMLHelper(istek.text)
126
107
 
127
- iframe_el = secici.css_first("div#vast iframe")
128
- iframe = iframe_el.attrs.get("src") if iframe_el else None
129
- iframe = self.fix_url(iframe) if iframe else None
108
+ iframe = secici.select_attr("div#vast iframe", "src")
109
+ iframe = self.fix_url(iframe) if iframe else None
130
110
 
131
111
  if not iframe:
132
112
  return []
@@ -136,10 +116,11 @@ class SuperFilmGeldi(PluginBase):
136
116
  # Mix player özel işleme
137
117
  if "mix" in iframe and "index.php?data=" in iframe:
138
118
  iframe_istek = await self.httpx.get(iframe, headers={"Referer": f"{self.main_url}/"})
139
- mix_point = re.search(r'videoUrl"\s*:\s*"(.*?)"\s*,\s*"videoServer', iframe_istek.text)
119
+ iframe_sec = HTMLHelper(iframe_istek.text)
120
+ mix_point = iframe_sec.regex_first(r'videoUrl"\s*:\s*"(.*?)"\s*,\s*"videoServer')
140
121
 
141
122
  if mix_point:
142
- mix_point = mix_point[1].replace("\\", "")
123
+ mix_point = mix_point.replace("\\", "")
143
124
 
144
125
  # Endpoint belirleme
145
126
  if "mixlion" in iframe:
@@ -1,8 +1,6 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult
4
- from selectolax.parser import HTMLParser
5
- import re
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, ExtractResult, HTMLHelper
6
4
 
7
5
  class UgurFilm(PluginBase):
8
6
  name = "UgurFilm"
@@ -26,21 +24,17 @@ class UgurFilm(PluginBase):
26
24
 
27
25
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
28
26
  istek = await self.httpx.get(f"{url}{page}", follow_redirects=True)
29
- secici = HTMLParser(istek.text)
27
+ secici = HTMLHelper(istek.text)
30
28
 
31
29
  results = []
32
- for veri in secici.css("div.icerik div"):
30
+ for veri in secici.select("div.icerik div"):
33
31
  # Title is in the second span (a.baslik > span), not the first span (class="sol" which is empty)
34
- title_el = veri.css_first("a.baslik span")
35
- title = title_el.text(strip=True) if title_el else None
32
+ title = secici.select_text("a.baslik span", veri)
36
33
  if not title:
37
34
  continue
38
35
 
39
- link_el = veri.css_first("a")
40
- img_el = veri.css_first("img")
41
-
42
- href = link_el.attrs.get("href") if link_el else None
43
- poster = img_el.attrs.get("src") if img_el else None
36
+ href = secici.select_attr("a", "href", veri)
37
+ poster = secici.select_attr("img", "src", veri)
44
38
 
45
39
  results.append(MainPageResult(
46
40
  category = category,
@@ -53,19 +47,13 @@ class UgurFilm(PluginBase):
53
47
 
54
48
  async def search(self, query: str) -> list[SearchResult]:
55
49
  istek = await self.httpx.get(f"{self.main_url}/?s={query}")
56
- secici = HTMLParser(istek.text)
50
+ secici = HTMLHelper(istek.text)
57
51
 
58
52
  results = []
59
- for film in secici.css("div.icerik div"):
60
- # Title is in a.baslik > span, not the first span
61
- title_el = film.css_first("a.baslik span")
62
- title = title_el.text(strip=True) if title_el else None
63
-
64
- link_el = film.css_first("a")
65
- img_el = film.css_first("img")
66
-
67
- href = link_el.attrs.get("href") if link_el else None
68
- poster = img_el.attrs.get("src") if img_el else None
53
+ for film in secici.select("div.icerik div"):
54
+ title = secici.select_text("a.baslik span", film)
55
+ href = secici.select_attr("a", "href", film)
56
+ poster = secici.select_attr("img", "src", film)
69
57
 
70
58
  if title and href:
71
59
  results.append(SearchResult(
@@ -78,30 +66,18 @@ class UgurFilm(PluginBase):
78
66
 
79
67
  async def load_item(self, url: str) -> MovieInfo:
80
68
  istek = await self.httpx.get(url)
81
- secici = HTMLParser(istek.text)
82
-
83
- title_el = secici.css_first("div.bilgi h2")
84
- title = title_el.text(strip=True) if title_el else ""
85
-
86
- poster_el = secici.css_first("div.resim img")
87
- poster = poster_el.attrs.get("src", "").strip() if poster_el else ""
69
+ secici = HTMLHelper(istek.text)
88
70
 
89
- desc_el = secici.css_first("div.slayt-aciklama")
90
- description = desc_el.text(strip=True) if desc_el else ""
71
+ title = secici.select_text("div.bilgi h2") or ""
72
+ poster = secici.select_attr("div.resim img", "src") or ""
73
+ description = secici.select_text("div.slayt-aciklama") or ""
91
74
 
92
- tags = [a.text(strip=True) for a in secici.css("p.tur a[href*='/category/']") if a.text(strip=True)]
75
+ tags = secici.select_all_text("p.tur a[href*='/category/']")
93
76
 
94
- # re_first yerine re.search
95
- year_el = secici.css_first("a[href*='/yil/']")
96
- year_text = year_el.text(strip=True) if year_el else ""
97
- year_match = re.search(r"\d+", year_text)
98
- year = year_match.group() if year_match else None
77
+ year_val = secici.extract_year("a[href*='/yil/']")
78
+ year = str(year_val) if year_val else None
99
79
 
100
- actors = []
101
- for actor in secici.css("li.oyuncu-k"):
102
- span_el = actor.css_first("span")
103
- if span_el and span_el.text(strip=True):
104
- actors.append(span_el.text(strip=True))
80
+ actors = secici.select_all_text("li.oyuncu-k span")
105
81
 
106
82
  return MovieInfo(
107
83
  url = self.fix_url(url),
@@ -115,17 +91,16 @@ class UgurFilm(PluginBase):
115
91
 
116
92
  async def load_links(self, url: str) -> list[ExtractResult]:
117
93
  istek = await self.httpx.get(url)
118
- secici = HTMLParser(istek.text)
94
+ secici = HTMLHelper(istek.text)
119
95
  results = []
120
96
 
121
- part_links = [a.attrs.get("href") for a in secici.css("li.parttab a") if a.attrs.get("href")]
97
+ part_links = secici.select_all_attr("li.parttab a", "href")
122
98
 
123
99
  for part_link in part_links:
124
100
  sub_response = await self.httpx.get(part_link)
125
- sub_selector = HTMLParser(sub_response.text)
101
+ sub_selector = HTMLHelper(sub_response.text)
126
102
 
127
- iframe_el = sub_selector.css_first("div#vast iframe")
128
- iframe = iframe_el.attrs.get("src") if iframe_el else None
103
+ iframe = sub_selector.select_attr("div#vast iframe", "src")
129
104
 
130
105
  if iframe and self.main_url in iframe:
131
106
  post_data = {
@@ -0,0 +1,285 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, MovieInfo, Episode, ExtractResult, HTMLHelper
4
+ import json, asyncio, time
5
+
6
+ class YabanciDizi(PluginBase):
7
+ name = "YabanciDizi"
8
+ language = "tr"
9
+ main_url = "https://yabancidizi.so"
10
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
11
+ description = "Yabancidizi.so platformu üzerinden en güncel yabancı dizileri ve filmleri izleyebilir, favori içeriklerinizi takip edebilirsiniz."
12
+
13
+ main_page = {
14
+ f"{main_url}/kesfet/eyJvcmRlciI6ImRhdGVfYm90dG9tIiwia2F0ZWdvcnkiOlsiMTciXX0=" : "Diziler",
15
+ f"{main_url}/kesfet/eyJvcmRlciI6ImRhdGVfYm90dG9tIiwia2F0ZWdvcnkiOlsiMTgiXX0=" : "Filmler",
16
+ f"{main_url}/kesfet/eyJvcmRlciI6ImRhdGVfYm90dG9tIiwiY291bnRyeSI6eyJLUiI6IktSIn19" : "Kdrama",
17
+ f"{main_url}/kesfet/eyJvcmRlciI6ImRhdGVfYm90dG9tIiwiY291bnRyeSI6eyJKUCI6IkpQIn0sImNhdGVnb3J5IjpbXX0=" : "Jdrama",
18
+ f"{main_url}/kesfet/eyJvcmRlciI6ImRhdGVfYm90dG9tIiwiY2F0ZWdvcnkiOnsiMyI6IjMifX0=" : "Animasyon",
19
+ }
20
+
21
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
22
+ full_url = url if page == 1 else f"{url}/{page}"
23
+
24
+ resp = await self.httpx.get(full_url, headers={"Referer": f"{self.main_url}/"})
25
+ sel = HTMLHelper(resp.text)
26
+
27
+ results = []
28
+ for item in sel.select("li.mb-lg, li.segment-poster"):
29
+ title = sel.select_text("h2", item)
30
+ href = sel.select_attr("a", "href", item)
31
+ poster = sel.select_attr("img", "src", item)
32
+ score = sel.select_text("span.rating", item)
33
+
34
+ if title and href:
35
+ results.append(MainPageResult(
36
+ category = category,
37
+ title = title,
38
+ url = self.fix_url(href),
39
+ poster = self.fix_url(poster) if poster else None,
40
+ ))
41
+
42
+ return results
43
+
44
+ async def search(self, query: str) -> list[SearchResult]:
45
+ search_url = f"{self.main_url}/search?qr={query}"
46
+
47
+ headers = {
48
+ "X-Requested-With" : "XMLHttpRequest",
49
+ "Referer" : f"{self.main_url}/"
50
+ }
51
+
52
+ resp = await self.httpx.post(search_url, headers=headers)
53
+
54
+ try:
55
+ raw = resp.json()
56
+ # Kotlin mapping: JsonResponse -> Data -> ResultItem
57
+ res_array = raw.get("data", {}).get("result", [])
58
+
59
+ results = []
60
+ for item in res_array:
61
+ title = item.get("s_name")
62
+ image = item.get("s_image")
63
+ slug = item.get("s_link")
64
+ s_type = item.get("s_type") # 0: dizi, 1: film
65
+
66
+ poster = f"{self.main_url}/uploads/series/{image}" if image else None
67
+
68
+ if s_type == "1":
69
+ href = f"{self.main_url}/film/{slug}"
70
+ else:
71
+ href = f"{self.main_url}/dizi/{slug}"
72
+
73
+ if title and slug:
74
+ results.append(SearchResult(
75
+ title = title,
76
+ url = self.fix_url(href),
77
+ poster = self.fix_url(poster) if poster else None
78
+ ))
79
+ return results
80
+ except Exception:
81
+ return []
82
+
83
+ async def load_item(self, url: str) -> SeriesInfo | MovieInfo:
84
+ resp = await self.httpx.get(url, follow_redirects=True)
85
+ sel = HTMLHelper(resp.text)
86
+
87
+ og_title = sel.select_attr("meta[property='og:title']", "content")
88
+ title = og_title.split("|")[0].strip() if og_title else sel.select_text("h1")
89
+
90
+ poster = sel.select_attr("meta[property='og:image']", "content")
91
+ description = sel.select_text("p#tv-series-desc")
92
+
93
+ # Try to extract year from table first
94
+ year_cell = sel.select_text("td div.truncate")
95
+ year = None
96
+ if year_cell:
97
+ year_match = sel.regex_first(r"(\d{4})", year_cell)
98
+ if year_match:
99
+ year = year_match
100
+
101
+ tags = []
102
+ rating = None
103
+ duration = None
104
+ actors = []
105
+ for item in sel.select("div.item"):
106
+ text = item.text(strip=True)
107
+ if "T\u00fcr\u00fc:" in text:
108
+ tags = [t.strip() for t in text.replace("T\u00fcr\u00fc:", "").split(",")]
109
+ elif "IMDb Puan\u0131" in text:
110
+ rating = text.replace("IMDb Puan\u0131", "").strip()
111
+ elif "Yap\u0131m Y\u0131l\u0131" in text:
112
+ year_match = sel.regex_first(r"(\d{4})", text)
113
+ if year_match:
114
+ year = year_match
115
+ elif "Takip\u00e7iler" in text:
116
+ continue
117
+ elif "S\u00fcre" in text:
118
+ dur_match = sel.regex_first(r"(\d+)", text)
119
+ if dur_match:
120
+ duration = dur_match
121
+ elif "Oyuncular:" in text:
122
+ actors = [a.text(strip=True) for a in sel.select("a", item)]
123
+
124
+ if not actors:
125
+ actors = [a.text(strip=True) for a in sel.select("div#common-cast-list div.item h5")]
126
+
127
+ trailer_match = sel.regex_first(r"embed\/(.*)\?rel", resp.text)
128
+ trailer = f"https://www.youtube.com/embed/{trailer_match}" if trailer_match else None
129
+
130
+ if "/film/" in url:
131
+ return MovieInfo(
132
+ title = title,
133
+ url = url,
134
+ poster = self.fix_url(poster) if poster else None,
135
+ description = description,
136
+ rating = rating,
137
+ tags = tags,
138
+ actors = actors,
139
+ year = year,
140
+ duration = int(duration) if duration and duration.isdigit() else None
141
+ )
142
+ else:
143
+ episodes = []
144
+ for bolum_item in sel.select("div.episodes-list div.ui td:has(h6)"):
145
+ link_el = sel.select_first("a", bolum_item)
146
+ if not link_el: continue
147
+
148
+ bolum_href = link_el.attrs.get("href")
149
+ bolum_name = sel.select_text("h6", bolum_item) or link_el.text(strip=True)
150
+
151
+ season = sel.regex_first(r"sezon-(\d+)", bolum_href)
152
+ episode = sel.regex_first(r"bolum-(\d+)", bolum_href)
153
+
154
+ ep_season = int(season) if season and season.isdigit() else None
155
+ ep_episode = int(episode) if episode and episode.isdigit() else None
156
+
157
+ episodes.append(Episode(
158
+ season = ep_season,
159
+ episode = ep_episode,
160
+ title = bolum_name,
161
+ url = self.fix_url(bolum_href)
162
+ ))
163
+
164
+ if episodes and (episodes[0].episode or 0) > (episodes[-1].episode or 0):
165
+ episodes.reverse()
166
+
167
+ return SeriesInfo(
168
+ title = title,
169
+ url = url,
170
+ poster = self.fix_url(poster) if poster else None,
171
+ description = description,
172
+ rating = rating,
173
+ tags = tags,
174
+ actors = actors,
175
+ year = year,
176
+ episodes = episodes
177
+ )
178
+
179
+ async def load_links(self, url: str) -> list[ExtractResult]:
180
+ # Use cloudscraper to bypass Cloudflare
181
+ resp = self.cloudscraper.get(url, headers={"Referer": f"{self.main_url}/"})
182
+ sel = HTMLHelper(resp.text)
183
+
184
+ results = []
185
+
186
+ # Method 1: alternatives-for-this (include active too)
187
+ for alt in sel.select("div.alternatives-for-this div.item"):
188
+ data_hash = alt.attrs.get("data-hash")
189
+ data_link = alt.attrs.get("data-link")
190
+ q_type = alt.attrs.get("data-querytype")
191
+
192
+ if not data_hash or not data_link: continue
193
+
194
+ try:
195
+ post_resp = self.cloudscraper.post(
196
+ f"{self.main_url}/ajax/service",
197
+ data = {
198
+ "link" : data_link,
199
+ "hash" : data_hash,
200
+ "querytype" : q_type,
201
+ "type" : "videoGet"
202
+ },
203
+ headers = {
204
+ "X-Requested-With" : "XMLHttpRequest",
205
+ "Referer" : f"{self.main_url}/"
206
+ },
207
+ cookies = {"udys": "1760709729873", "level": "1"}
208
+ )
209
+
210
+ service_data = post_resp.json()
211
+ api_iframe = service_data.get("api_iframe")
212
+ if api_iframe:
213
+ extract_res = await self._fetch_and_extract(api_iframe, prefix="Alt")
214
+ if extract_res:
215
+ results.extend(extract_res if isinstance(extract_res, list) else [extract_res])
216
+ except Exception:
217
+ continue
218
+
219
+ # Method 2: pointing[data-eid]
220
+ for id_el in sel.select("a.ui.pointing[data-eid]"):
221
+ dil = id_el.text(strip=True)
222
+ v_lang = "tr" if "Dublaj" in dil else "en"
223
+ data_eid = id_el.attrs.get("data-eid")
224
+
225
+ try:
226
+ post_resp = self.cloudscraper.post(
227
+ f"{self.main_url}/ajax/service",
228
+ data = {
229
+ "e_id" : data_eid,
230
+ "v_lang" : v_lang,
231
+ "type" : "get_whatwehave"
232
+ },
233
+ headers = {
234
+ "X-Requested-With" : "XMLHttpRequest",
235
+ "Referer" : f"{self.main_url}/"
236
+ },
237
+ cookies = {"udys": "1760709729873", "level": "1"}
238
+ )
239
+
240
+ service_data = post_resp.json()
241
+ api_iframe = service_data.get("api_iframe")
242
+ if api_iframe:
243
+ extract_res = await self._fetch_and_extract(api_iframe, prefix=dil)
244
+ if extract_res:
245
+ results.extend(extract_res if isinstance(extract_res, list) else [extract_res])
246
+ except Exception:
247
+ continue
248
+
249
+ return results
250
+
251
+ def _fetch_and_extract_sync(self, iframe_url, prefix=""):
252
+ """Synchronous helper for _fetch_and_extract using cloudscraper."""
253
+ # Initial fetch
254
+ resp = self.cloudscraper.get(
255
+ iframe_url,
256
+ headers = {"Referer": f"{self.main_url}/"},
257
+ cookies = {"udys": "1760709729873", "level": "1"}
258
+ )
259
+
260
+ # Handle "Lütfen bekleyiniz" check from Kotlin
261
+ if "Lütfen bekleyiniz" in resp.text:
262
+ import time as time_module
263
+ time_module.sleep(1)
264
+ timestamp = int(time_module.time())
265
+ # Retry with t=timestamp as in Kotlin
266
+ sep = "&" if "?" in iframe_url else "?"
267
+ resp = self.cloudscraper.get(
268
+ f"{iframe_url}{sep}t={timestamp}",
269
+ headers = {"Referer": f"{self.main_url}/"},
270
+ cookies = resp.cookies # Use cookies from first response
271
+ )
272
+
273
+ sel = HTMLHelper(resp.text)
274
+ final_iframe = sel.select_attr("iframe", "src")
275
+
276
+ return final_iframe
277
+
278
+ async def _fetch_and_extract(self, iframe_url, prefix=""):
279
+ final_iframe = self._fetch_and_extract_sync(iframe_url, prefix)
280
+
281
+ if final_iframe:
282
+ final_url = self.fix_url(final_iframe)
283
+ return await self.extract(final_url, referer=f"{self.main_url}/", prefix=prefix)
284
+
285
+ return None