KekikStream 1.4.4__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. KekikStream/CLI/pypi_kontrol.py +6 -6
  2. KekikStream/Core/Extractor/ExtractorBase.py +13 -12
  3. KekikStream/Core/Extractor/ExtractorLoader.py +25 -17
  4. KekikStream/Core/Extractor/ExtractorManager.py +53 -9
  5. KekikStream/Core/Extractor/ExtractorModels.py +5 -7
  6. KekikStream/Core/Extractor/YTDLPCache.py +35 -0
  7. KekikStream/Core/Media/MediaHandler.py +52 -31
  8. KekikStream/Core/Media/MediaManager.py +0 -3
  9. KekikStream/Core/Plugin/PluginBase.py +47 -21
  10. KekikStream/Core/Plugin/PluginLoader.py +11 -7
  11. KekikStream/Core/Plugin/PluginModels.py +25 -25
  12. KekikStream/Core/__init__.py +1 -0
  13. KekikStream/Extractors/CloseLoad.py +6 -26
  14. KekikStream/Extractors/ContentX_.py +40 -0
  15. KekikStream/Extractors/DzenRu.py +38 -0
  16. KekikStream/Extractors/ExPlay.py +53 -0
  17. KekikStream/Extractors/FirePlayer.py +60 -0
  18. KekikStream/Extractors/HDPlayerSystem.py +41 -0
  19. KekikStream/Extractors/JetTv.py +45 -0
  20. KekikStream/Extractors/MailRu.py +2 -4
  21. KekikStream/Extractors/MixTiger.py +57 -0
  22. KekikStream/Extractors/MolyStream.py +25 -7
  23. KekikStream/Extractors/Odnoklassniki.py +16 -11
  24. KekikStream/Extractors/{OkRuHTTP.py → Odnoklassniki_.py} +5 -1
  25. KekikStream/Extractors/{HDStreamAble.py → PeaceMakerst_.py} +1 -1
  26. KekikStream/Extractors/PixelDrain.py +0 -1
  27. KekikStream/Extractors/PlayerFilmIzle.py +62 -0
  28. KekikStream/Extractors/RapidVid.py +30 -13
  29. KekikStream/Extractors/RapidVid_.py +7 -0
  30. KekikStream/Extractors/SetPlay.py +57 -0
  31. KekikStream/Extractors/SetPrime.py +45 -0
  32. KekikStream/Extractors/SibNet.py +0 -1
  33. KekikStream/Extractors/TurkeyPlayer.py +34 -0
  34. KekikStream/Extractors/VidHide.py +72 -0
  35. KekikStream/Extractors/VidMoly.py +20 -19
  36. KekikStream/Extractors/{VidMolyMe.py → VidMoly_.py} +1 -1
  37. KekikStream/Extractors/VidMoxy.py +0 -1
  38. KekikStream/Extractors/VidPapi.py +89 -0
  39. KekikStream/Extractors/YTDLP.py +177 -0
  40. KekikStream/Extractors/YildizKisaFilm.py +41 -0
  41. KekikStream/Plugins/DiziBox.py +28 -16
  42. KekikStream/Plugins/DiziPal.py +246 -0
  43. KekikStream/Plugins/DiziYou.py +58 -31
  44. KekikStream/Plugins/Dizilla.py +97 -68
  45. KekikStream/Plugins/FilmBip.py +145 -0
  46. KekikStream/Plugins/FilmMakinesi.py +61 -52
  47. KekikStream/Plugins/FilmModu.py +138 -0
  48. KekikStream/Plugins/FullHDFilm.py +164 -0
  49. KekikStream/Plugins/FullHDFilmizlesene.py +38 -37
  50. KekikStream/Plugins/HDFilmCehennemi.py +44 -54
  51. KekikStream/Plugins/JetFilmizle.py +68 -42
  52. KekikStream/Plugins/KultFilmler.py +219 -0
  53. KekikStream/Plugins/RecTV.py +41 -37
  54. KekikStream/Plugins/RoketDizi.py +232 -0
  55. KekikStream/Plugins/SelcukFlix.py +309 -0
  56. KekikStream/Plugins/SezonlukDizi.py +16 -14
  57. KekikStream/Plugins/SineWix.py +39 -30
  58. KekikStream/Plugins/Sinefy.py +238 -0
  59. KekikStream/Plugins/SinemaCX.py +157 -0
  60. KekikStream/Plugins/Sinezy.py +146 -0
  61. KekikStream/Plugins/SuperFilmGeldi.py +121 -0
  62. KekikStream/Plugins/UgurFilm.py +10 -10
  63. KekikStream/__init__.py +296 -319
  64. KekikStream/requirements.txt +3 -4
  65. kekikstream-2.0.2.dist-info/METADATA +309 -0
  66. kekikstream-2.0.2.dist-info/RECORD +82 -0
  67. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/WHEEL +1 -1
  68. KekikStream/Extractors/FourCX.py +0 -7
  69. KekikStream/Extractors/FourPichive.py +0 -7
  70. KekikStream/Extractors/FourPlayRu.py +0 -7
  71. KekikStream/Extractors/Hotlinger.py +0 -7
  72. KekikStream/Extractors/OkRuSSL.py +0 -7
  73. KekikStream/Extractors/Pichive.py +0 -7
  74. KekikStream/Extractors/PlayRu.py +0 -7
  75. KekikStream/Helpers/Unpack.py +0 -75
  76. KekikStream/Plugins/Shorten.py +0 -225
  77. kekikstream-1.4.4.dist-info/METADATA +0 -108
  78. kekikstream-1.4.4.dist-info/RECORD +0 -63
  79. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/entry_points.txt +0 -0
  80. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info/licenses}/LICENSE +0 -0
  81. {kekikstream-1.4.4.dist-info → kekikstream-2.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,309 @@
1
+ # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
+
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
4
+ from parsel import Selector
5
+ import re, base64, json, urllib.parse
6
+
7
+ class SelcukFlix(PluginBase):
8
+ name = "SelcukFlix"
9
+ lang = "tr"
10
+ main_url = "https://selcukflix.net"
11
+ favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
12
+ description = "Selcukflix'te her türden en yeni ve en popüler dizi ve filmleri izlemenin keyfini çıkarın. Aksiyondan romantiğe, bilim kurgudan dramaya, geniş kütüphanemizde herkes için bir şey var."
13
+
14
+ main_page = {
15
+ f"{main_url}/tum-bolumler" : "Yeni Eklenen Bölümler",
16
+ "" : "Yeni Diziler",
17
+ "" : "Kore Dizileri",
18
+ "" : "Yerli Diziler",
19
+ "15" : "Aile",
20
+ "17" : "Animasyon",
21
+ "9" : "Aksiyon",
22
+ "5" : "Bilim Kurgu",
23
+ "2" : "Dram",
24
+ "12" : "Fantastik",
25
+ "18" : "Gerilim",
26
+ "3" : "Gizem",
27
+ "8" : "Korku",
28
+ "4" : "Komedi",
29
+ "7" : "Romantik"
30
+ }
31
+
32
+ async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
33
+ results = []
34
+ if "tum-bolumler" in url:
35
+ try:
36
+ resp = await self.httpx.get(url)
37
+ sel = Selector(resp.text)
38
+
39
+ for item in sel.css("div.col-span-3 a"):
40
+ name = item.css("h2::text").get()
41
+ ep_info = item.css("div.opacity-80::text").get()
42
+ href = item.css("::attr(href)").get()
43
+ poster = item.css("div.image img::attr(src)").get()
44
+
45
+ if name and href:
46
+ title = f"{name} - {ep_info}" if ep_info else name
47
+ final_url = self.fix_url(href)
48
+
49
+ if "/dizi/" in final_url and "/sezon-" in final_url:
50
+ final_url = final_url.split("/sezon-")[0]
51
+
52
+ results.append(MainPageResult(
53
+ category = category,
54
+ title = title,
55
+ url = final_url,
56
+ poster = self.fix_url(poster)
57
+ ))
58
+ except Exception:
59
+ pass
60
+ return results
61
+
62
+ base_api = f"{self.main_url}/api/bg/findSeries"
63
+
64
+ params = {
65
+ "releaseYearStart" : "1900",
66
+ "releaseYearEnd" : "2026",
67
+ "imdbPointMin" : "1",
68
+ "imdbPointMax" : "10",
69
+ "categoryIdsComma" : "",
70
+ "countryIdsComma" : "",
71
+ "orderType" : "date_desc",
72
+ "languageId" : "-1",
73
+ "currentPage" : page,
74
+ "currentPageCount" : "24",
75
+ "queryStr" : "",
76
+ "categorySlugsComma" : "",
77
+ "countryCodesComma" : ""
78
+ }
79
+
80
+ if "Yerli Diziler" in category:
81
+ params["imdbPointMin"] = "5"
82
+ params["countryIdsComma"] = "29"
83
+ elif "Kore Dizileri" in category:
84
+ params["countryIdsComma"] = "21"
85
+ params["countryCodesComma"] = "KR"
86
+ else:
87
+ params["categoryIdsComma"] = url
88
+
89
+ full_url = f"{base_api}?{urllib.parse.urlencode(params)}"
90
+
91
+ headers = {
92
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:137.0) Gecko/20100101 Firefox/137.0",
93
+ "Accept" : "application/json, text/plain, */*",
94
+ "Accept-Language" : "en-US,en;q=0.5",
95
+ "X-Requested-With" : "XMLHttpRequest",
96
+ "Sec-Fetch-Site" : "same-origin",
97
+ "Sec-Fetch-Mode" : "cors",
98
+ "Sec-Fetch-Dest" : "empty",
99
+ "Referer" : f"{self.main_url}/"
100
+ }
101
+
102
+ try:
103
+ post_resp = await self.httpx.post(full_url, headers=headers)
104
+ resp_json = post_resp.json()
105
+ response_data = resp_json.get("response")
106
+
107
+ raw_data = base64.b64decode(response_data)
108
+ try:
109
+ decoded_str = raw_data.decode('utf-8')
110
+ except UnicodeDecodeError:
111
+ decoded_str = raw_data.decode('iso-8859-1').encode('utf-8').decode('utf-8')
112
+
113
+ data = json.loads(decoded_str)
114
+
115
+ for item in data.get("result", []):
116
+ title = item.get("title")
117
+ slug = item.get("slug")
118
+ poster = item.get("poster")
119
+
120
+ if poster:
121
+ poster = self.clean_image_url(poster)
122
+
123
+ if slug:
124
+ results.append(MainPageResult(
125
+ category = category,
126
+ title = title,
127
+ url = self.fix_url(slug),
128
+ poster = poster
129
+ ))
130
+
131
+ except Exception:
132
+ pass
133
+
134
+ return results
135
+
136
+ async def search(self, query: str) -> list[SearchResult]:
137
+ search_url = f"{self.main_url}/api/bg/searchcontent?searchterm={query}"
138
+
139
+ headers = {
140
+ "Accept" : "application/json, text/plain, */*",
141
+ "X-Requested-With" : "XMLHttpRequest",
142
+ "Referer" : f"{self.main_url}/"
143
+ }
144
+
145
+ post_resp = await self.httpx.post(search_url, headers=headers)
146
+
147
+ try:
148
+ resp_json = post_resp.json()
149
+ response_data = resp_json.get("response")
150
+ raw_data = base64.b64decode(response_data)
151
+ try:
152
+ decoded_str = raw_data.decode('utf-8')
153
+ except UnicodeDecodeError:
154
+ decoded_str = raw_data.decode('iso-8859-1').encode('utf-8').decode('utf-8')
155
+
156
+ search_data = json.loads(decoded_str)
157
+
158
+ results = []
159
+ for item in search_data.get("result", []):
160
+ title = item.get("title")
161
+ slug = item.get("slug")
162
+ poster = item.get("poster")
163
+
164
+ if poster:
165
+ poster = self.clean_image_url(poster)
166
+
167
+ if slug and "/seri-filmler/" not in slug:
168
+ results.append(SearchResult(
169
+ title = title,
170
+ url = self.fix_url(slug),
171
+ poster = poster
172
+ ))
173
+
174
+ return results
175
+
176
+ except Exception:
177
+ return []
178
+
179
+ async def load_item(self, url: str) -> SeriesInfo:
180
+ resp = await self.httpx.get(url)
181
+ sel = Selector(resp.text)
182
+
183
+ next_data = sel.css("script#__NEXT_DATA__::text").get()
184
+ if not next_data:
185
+ return None
186
+
187
+ data = json.loads(next_data)
188
+ secure_data = data["props"]["pageProps"]["secureData"]
189
+ raw_data = base64.b64decode(secure_data.replace('"', ''))
190
+ try:
191
+ decoded_str = raw_data.decode('utf-8')
192
+ except UnicodeDecodeError:
193
+ decoded_str = raw_data.decode('iso-8859-1') # .encode('utf-8').decode('utf-8') implied
194
+
195
+ content_details = json.loads(decoded_str)
196
+ item = content_details.get("contentItem", {})
197
+
198
+ title = item.get("original_title") or item.get("originalTitle")
199
+ poster = self.clean_image_url(item.get("poster_url") or item.get("posterUrl"))
200
+ description = item.get("description") or item.get("used_description")
201
+ rating = str(item.get("imdb_point") or item.get("imdbPoint", ""))
202
+
203
+ series_data = content_details.get("relatedData", {}).get("seriesData")
204
+ if not series_data and "RelatedResults" in content_details:
205
+ series_data = content_details["RelatedResults"].get("getSerieSeasonAndEpisodes", {}).get("result")
206
+ if series_data and isinstance(series_data, list):
207
+ pass
208
+
209
+ episodes = []
210
+ if series_data:
211
+ seasons_list = []
212
+ if isinstance(series_data, dict):
213
+ seasons_list = series_data.get("seasons", [])
214
+ elif isinstance(series_data, list):
215
+ seasons_list = series_data
216
+
217
+ for season in seasons_list:
218
+ if not isinstance(season, dict): continue
219
+ s_no = season.get("season_no") or season.get("seasonNo") # Try snake_case too
220
+ ep_list = season.get("episodes", [])
221
+ for ep in ep_list:
222
+ episodes.append(Episode(
223
+ season = s_no,
224
+ episode = ep.get("episode_no") or ep.get("episodeNo"),
225
+ title = ep.get("ep_text") or ep.get("epText"),
226
+ url = self.fix_url(ep.get("used_slug") or ep.get("usedSlug"))
227
+ ))
228
+
229
+ return SeriesInfo(
230
+ title = title,
231
+ url = url,
232
+ poster = poster,
233
+ description = description,
234
+ rating = rating,
235
+ episodes = episodes
236
+ )
237
+
238
+ async def load_links(self, url: str) -> list[dict]:
239
+ resp = await self.httpx.get(url)
240
+ sel = Selector(resp.text)
241
+
242
+ next_data = sel.css("script#__NEXT_DATA__::text").get()
243
+ if not next_data: return []
244
+
245
+ try:
246
+ data = json.loads(next_data)
247
+ secure_data = data["props"]["pageProps"]["secureData"]
248
+ raw_data = base64.b64decode(secure_data.replace('"', ''))
249
+ try:
250
+ decoded_str = raw_data.decode('utf-8')
251
+ except UnicodeDecodeError:
252
+ decoded_str = raw_data.decode('iso-8859-1')
253
+
254
+ content_details = json.loads(decoded_str)
255
+ related_data = content_details.get("relatedData", {})
256
+
257
+ source_content = None
258
+
259
+ # Check if Series (episode) or Movie
260
+ if "/dizi/" in url:
261
+ if related_data.get("episodeSources", {}).get("state"):
262
+ res = related_data["episodeSources"].get("result", [])
263
+ if res:
264
+ source_content = res[0].get("sourceContent")
265
+ else:
266
+ # Movie
267
+ if related_data.get("movieParts", {}).get("state"):
268
+ # Looking for first part source
269
+ movie_parts = related_data["movieParts"].get("result", [])
270
+ if movie_parts:
271
+ first_part_id = movie_parts[0].get("id")
272
+ # RelatedResults -> getMoviePartSourcesById_ID
273
+ rr = content_details.get("RelatedResults", {})
274
+ key = f"getMoviePartSourcesById_{first_part_id}"
275
+ if key in rr:
276
+ res = rr[key].get("result", [])
277
+ if res:
278
+ source_content = res[0].get("source_content")
279
+
280
+ results = []
281
+ if source_content:
282
+ iframe_sel = Selector(source_content)
283
+ iframe_src = iframe_sel.css("iframe::attr(src)").get()
284
+ if iframe_src:
285
+ iframe_src = self.fix_url(iframe_src)
286
+ # Domain replace
287
+ if "sn.dplayer74.site" in iframe_src:
288
+ iframe_src = iframe_src.replace("sn.dplayer74.site", "sn.hotlinger.com")
289
+
290
+ extractor = self.ex_manager.find_extractor(iframe_src)
291
+ results.append({
292
+ "url": iframe_src,
293
+ "name": extractor.name if extractor else "Iframe"
294
+ })
295
+
296
+ return results
297
+
298
+ except Exception:
299
+ return []
300
+
301
+ def clean_image_url(self, url: str) -> str:
302
+ if not url: return None
303
+ url = url.replace("images-macellan-online.cdn.ampproject.org/i/s/", "")
304
+ url = url.replace("file.dizilla.club", "file.macellan.online")
305
+ url = url.replace("images.dizilla.club", "images.macellan.online")
306
+ url = url.replace("images.dizimia4.com", "images.macellan.online")
307
+ url = url.replace("file.dizimia4.com", "file.macellan.online")
308
+ url = url.replace("/f/f/", "/630/910/")
309
+ return self.fix_url(url)
@@ -1,12 +1,12 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import kekik_cache, PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, SeriesInfo, Episode
4
4
  from parsel import Selector
5
5
 
6
6
  class SezonlukDizi(PluginBase):
7
7
  name = "SezonlukDizi"
8
8
  language = "tr"
9
- main_url = "https://sezonlukdizi6.com"
9
+ main_url = "https://sezonlukdizi8.com"
10
10
  favicon = f"https://www.google.com/s2/favicons?domain={main_url}&sz=64"
11
11
  description = "Güncel ve eski dizileri en iyi görüntü kalitesiyle bulabileceğiniz yabancı dizi izleme siteniz."
12
12
 
@@ -21,7 +21,6 @@ class SezonlukDizi(PluginBase):
21
21
  f"{main_url}/diziler.asp?siralama_tipi=id&kat=6&s=" : "Belgeseller",
22
22
  }
23
23
 
24
- @kekik_cache(ttl=60*60)
25
24
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
26
25
  istek = await self.httpx.get(f"{url}{page}")
27
26
  secici = Selector(istek.text)
@@ -36,7 +35,6 @@ class SezonlukDizi(PluginBase):
36
35
  for veri in secici.css("div.afis a") if veri.css("div.description::text").get()
37
36
  ]
38
37
 
39
- @kekik_cache(ttl=60*60)
40
38
  async def search(self, query: str) -> list[SearchResult]:
41
39
  istek = await self.httpx.get(f"{self.main_url}/diziler.asp?adi={query}")
42
40
  secici = Selector(istek.text)
@@ -50,7 +48,6 @@ class SezonlukDizi(PluginBase):
50
48
  for afis in secici.css("div.afis a.column")
51
49
  ]
52
50
 
53
- @kekik_cache(ttl=60*60)
54
51
  async def load_item(self, url: str) -> SeriesInfo:
55
52
  istek = await self.httpx.get(url)
56
53
  secici = Selector(istek.text)
@@ -102,8 +99,7 @@ class SezonlukDizi(PluginBase):
102
99
  actors = actors
103
100
  )
104
101
 
105
- @kekik_cache(ttl=15*60)
106
- async def load_links(self, url: str) -> list[str]:
102
+ async def load_links(self, url: str) -> list[dict]:
107
103
  istek = await self.httpx.get(url)
108
104
  secici = Selector(istek.text)
109
105
 
@@ -111,8 +107,8 @@ class SezonlukDizi(PluginBase):
111
107
  if not bid:
112
108
  return []
113
109
 
114
- links = []
115
- for dil, label in [("1", "AltYazı"), ("0", "Dublaj")]:
110
+ results = []
111
+ for dil, label in [("1", "Altyazı"), ("0", "Dublaj")]:
116
112
  dil_istek = await self.httpx.post(
117
113
  url = f"{self.main_url}/ajax/dataAlternatif22.asp",
118
114
  headers = {"X-Requested-With": "XMLHttpRequest"},
@@ -125,7 +121,7 @@ class SezonlukDizi(PluginBase):
125
121
  continue
126
122
 
127
123
  if dil_json.get("status") == "success":
128
- for veri in dil_json.get("data", []):
124
+ for idx, veri in enumerate(dil_json.get("data", [])):
129
125
  veri_response = await self.httpx.post(
130
126
  url = f"{self.main_url}/ajax/dataEmbed22.asp",
131
127
  headers = {"X-Requested-With": "XMLHttpRequest"},
@@ -134,7 +130,13 @@ class SezonlukDizi(PluginBase):
134
130
  secici = Selector(veri_response.text)
135
131
 
136
132
  if iframe := secici.css("iframe::attr(src)").get():
137
- video_url = self.fix_url(iframe)
138
- links.append(video_url)
139
-
140
- return links
133
+ if "link.asp" in iframe:
134
+ continue
135
+
136
+ extractor = self.ex_manager.find_extractor(self.fix_url(iframe))
137
+ results.append({
138
+ "url" : self.fix_url(iframe),
139
+ "name" : f"{extractor.name if extractor else f'{label} - Player {idx + 1}'}"
140
+ })
141
+
142
+ return results
@@ -1,11 +1,12 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
- from KekikStream.Core import kekik_cache, PluginBase, MainPageResult, SearchResult, MovieInfo, Episode, SeriesInfo, ExtractResult, Subtitle
3
+ from KekikStream.Core import PluginBase, MainPageResult, SearchResult, MovieInfo, Episode, SeriesInfo, ExtractResult, Subtitle
4
+ import json
4
5
 
5
6
  class SineWix(PluginBase):
6
7
  name = "SineWix"
7
8
  language = "tr"
8
- main_url = "https://ythls.kekikakademi.org"
9
+ main_url = "http://10.0.0.2:2585"
9
10
  favicon = "https://play-lh.googleusercontent.com/brwGNmr7IjA_MKk_TTPs0va10hdKE_bD_a1lnKoiMuCayW98EHpRv55edA6aEoJlmwfX"
10
11
  description = "Sinewix | Ücretsiz Film - Dizi - Anime İzleme Uygulaması"
11
12
 
@@ -34,7 +35,6 @@ class SineWix(PluginBase):
34
35
  f"{main_url}/sinewix/movies/36" : "Tarih",
35
36
  }
36
37
 
37
- @kekik_cache(ttl=60*60)
38
38
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
39
39
  istek = await self.httpx.get(f"{url}/{page}")
40
40
  veriler = istek.json()
@@ -49,7 +49,6 @@ class SineWix(PluginBase):
49
49
  for veri in veriler.get("data")
50
50
  ]
51
51
 
52
- @kekik_cache(ttl=60*60)
53
52
  async def search(self, query: str) -> list[SearchResult]:
54
53
  istek = await self.httpx.get(f"{self.main_url}/sinewix/search/{query}")
55
54
 
@@ -62,7 +61,6 @@ class SineWix(PluginBase):
62
61
  for veri in istek.json().get("search")
63
62
  ]
64
63
 
65
- @kekik_cache(ttl=60*60)
66
64
  async def load_item(self, url: str) -> MovieInfo | SeriesInfo:
67
65
  item_type = url.split("?type=")[-1].split("&id=")[0]
68
66
  item_id = url.split("&id=")[-1]
@@ -97,22 +95,22 @@ class SineWix(PluginBase):
97
95
  if not episode.get("videos"):
98
96
  continue
99
97
 
98
+ # Bölüm için gerekli bilgileri JSON olarak sakla
99
+ ep_data = {
100
+ "url" : self.fix_url(episode.get("videos")[0].get("link")),
101
+ "title" : self.name,
102
+ "is_episode" : True
103
+ }
104
+
100
105
  ep_model = Episode(
101
106
  season = season.get("season_number"),
102
107
  episode = episode.get("episode_number"),
103
108
  title = episode.get("name"),
104
- url = self.fix_url(episode.get("videos")[0].get("link")),
109
+ url = json.dumps(ep_data),
105
110
  )
106
111
 
107
112
  episodes.append(ep_model)
108
113
 
109
- self._data[ep_model.url] = {
110
- "ext_name" : self.name,
111
- "name" : f"{title} | {ep_model.season}x{ep_model.episode}",
112
- "referer" : self.main_url,
113
- "subtitles" : []
114
- }
115
-
116
114
  return SeriesInfo(
117
115
  url = self.fix_url(f"{self.main_url}/sinewix/{item_type}/{item_id}"),
118
116
  poster = self.fix_url(veri.get("poster_path")),
@@ -125,10 +123,21 @@ class SineWix(PluginBase):
125
123
  episodes = episodes,
126
124
  )
127
125
 
128
- @kekik_cache(ttl=15*60)
129
- async def load_links(self, url: str) -> list[str]:
130
- if not url.startswith(self.main_url):
131
- return [url]
126
+ async def load_links(self, url: str) -> list[dict]:
127
+ try:
128
+ veri = json.loads(url)
129
+ if veri.get("is_episode"):
130
+ return [{
131
+ "url" : veri.get("url"),
132
+ "name" : veri.get("title"),
133
+ "referer" : self.main_url
134
+ }]
135
+ except Exception:
136
+ pass
137
+
138
+ # Eğer JSON değilse ve direkt URL ise (eski yapı veya harici link)
139
+ if not url.startswith(self.main_url) and not url.startswith("{"):
140
+ return [{"url": url, "name": "Video"}]
132
141
 
133
142
  istek = await self.httpx.get(url)
134
143
  veri = istek.json()
@@ -137,21 +146,21 @@ class SineWix(PluginBase):
137
146
  alt_title = veri.get("original_name") or ""
138
147
  title = f"{org_title} - {alt_title}" if (alt_title and org_title != alt_title) else org_title
139
148
 
149
+ results = []
140
150
  for video in veri.get("videos"):
141
151
  video_link = video.get("link").split("_blank\">")[-1]
142
- self._data[video_link] = {
143
- "ext_name" : self.name,
144
- "name" : f"{self.name} | {title}",
145
- "referer" : self.main_url,
146
- "subtitles" : []
147
- }
148
-
149
- return list(self._data.keys())
150
-
151
- async def play(self, name: str, url: str, referer: str, subtitles: list[Subtitle]):
152
- extract_result = ExtractResult(name=name, url=url, referer=referer, subtitles=subtitles)
153
- self.media_handler.title = name
152
+ results.append({
153
+ "url" : video_link,
154
+ "name" : f"{self.name}",
155
+ "referer" : self.main_url
156
+ })
157
+
158
+ return results
159
+
160
+ async def play(self, **kwargs):
161
+ extract_result = ExtractResult(**kwargs)
162
+ self.media_handler.title = kwargs.get("name")
154
163
  if self.name not in self.media_handler.title:
155
164
  self.media_handler.title = f"{self.name} | {self.media_handler.title}"
156
165
 
157
- self.media_handler.play_media(extract_result)
166
+ self.media_handler.play_media(extract_result)