KekikStream 1.6.4__py3-none-any.whl → 1.6.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. KekikStream/Core/Extractor/ExtractorBase.py +6 -11
  2. KekikStream/Core/Plugin/PluginBase.py +6 -12
  3. KekikStream/Extractors/CloseLoad.py +2 -3
  4. KekikStream/Extractors/ContentX.py +4 -4
  5. KekikStream/Extractors/MailRu.py +2 -3
  6. KekikStream/Extractors/MixPlayHD.py +2 -2
  7. KekikStream/Extractors/Odnoklassniki.py +14 -10
  8. KekikStream/Extractors/PeaceMakerst.py +4 -4
  9. KekikStream/Extractors/PixelDrain.py +1 -2
  10. KekikStream/Extractors/RapidVid.py +2 -3
  11. KekikStream/Extractors/SibNet.py +2 -3
  12. KekikStream/Extractors/Sobreatsesuyp.py +4 -4
  13. KekikStream/Extractors/TRsTX.py +4 -4
  14. KekikStream/Extractors/TauVideo.py +2 -2
  15. KekikStream/Extractors/TurboImgz.py +2 -2
  16. KekikStream/Extractors/VidMoly.py +4 -6
  17. KekikStream/Extractors/VidMoxy.py +2 -3
  18. KekikStream/Extractors/VideoSeyred.py +3 -3
  19. KekikStream/Plugins/DiziBox.py +16 -16
  20. KekikStream/Plugins/DiziYou.py +4 -4
  21. KekikStream/Plugins/Dizilla.py +5 -5
  22. KekikStream/Plugins/FilmMakinesi.py +3 -3
  23. KekikStream/Plugins/FullHDFilmizlesene.py +3 -3
  24. KekikStream/Plugins/HDFilmCehennemi.py +9 -9
  25. KekikStream/Plugins/JetFilmizle.py +6 -6
  26. KekikStream/Plugins/RecTV.py +7 -7
  27. KekikStream/Plugins/SezonlukDizi.py +8 -8
  28. KekikStream/Plugins/SineWix.py +170 -0
  29. KekikStream/Plugins/UgurFilm.py +6 -6
  30. KekikStream/requirements.txt +1 -1
  31. {kekikstream-1.6.4.dist-info → kekikstream-1.6.7.dist-info}/METADATA +2 -2
  32. kekikstream-1.6.7.dist-info/RECORD +61 -0
  33. kekikstream-1.6.4.dist-info/RECORD +0 -60
  34. {kekikstream-1.6.4.dist-info → kekikstream-1.6.7.dist-info}/WHEEL +0 -0
  35. {kekikstream-1.6.4.dist-info → kekikstream-1.6.7.dist-info}/entry_points.txt +0 -0
  36. {kekikstream-1.6.4.dist-info → kekikstream-1.6.7.dist-info}/licenses/LICENSE +0 -0
  37. {kekikstream-1.6.4.dist-info → kekikstream-1.6.7.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from httpx import AsyncClient, Timeout
4
+ from curl_cffi import AsyncSession
5
5
  from cloudscraper import CloudScraper
6
6
  from typing import Optional
7
7
  from .ExtractorModels import ExtractResult
@@ -14,15 +14,10 @@ class ExtractorBase(ABC):
14
14
 
15
15
  def __init__(self):
16
16
  # HTTP istekleri için oturum oluştur
17
- self.httpx = AsyncClient(
18
- headers = {
19
- "User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5)",
20
- "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
21
- },
22
- timeout = Timeout(10.0)
23
- )
24
- # CloudFlare korumalı siteler için scraper ayarla
25
- self.cloudscraper = CloudScraper()
17
+ self.cffi = AsyncSession(impersonate="firefox135")
18
+ self.cloudscraper = CloudScraper()
19
+ self.cffi.cookies.update(self.cloudscraper.cookies)
20
+ self.cffi.headers.update({"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 15.7; rv:135.0) Gecko/20100101 Firefox/135.0"})
26
21
 
27
22
  def can_handle_url(self, url: str) -> bool:
28
23
  # URL'nin bu çıkarıcı tarafından işlenip işlenemeyeceğini kontrol et
@@ -35,7 +30,7 @@ class ExtractorBase(ABC):
35
30
 
36
31
  async def close(self):
37
32
  # HTTP oturumunu güvenli bir şekilde kapat
38
- await self.httpx.aclose()
33
+ await self.cffi.close()
39
34
 
40
35
  def fix_url(self, url: str) -> str:
41
36
  # Eksik URL'leri düzelt ve tam URL formatına çevir
@@ -1,7 +1,7 @@
1
1
  # Bu araç @keyiflerolsun tarafından | @KekikAkademi için yazılmıştır.
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from httpx import AsyncClient, Timeout
4
+ from curl_cffi import AsyncSession
5
5
  from cloudscraper import CloudScraper
6
6
  from .PluginModels import MainPageResult, SearchResult, MovieInfo
7
7
  from ..Media.MediaHandler import MediaHandler
@@ -24,18 +24,12 @@ class PluginBase(ABC):
24
24
  self.main_url = new_url
25
25
 
26
26
  def __init__(self):
27
- self.httpx = AsyncClient(
28
- headers = {
29
- "User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5)",
30
- "Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
31
- },
32
- timeout = Timeout(10.0),
33
- )
27
+ self.cffi = AsyncSession(impersonate="firefox135")
28
+ self.cloudscraper = CloudScraper()
29
+ self.cffi.cookies.update(self.cloudscraper.cookies)
30
+ self.cffi.headers.update({"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 15.7; rv:135.0) Gecko/20100101 Firefox/135.0"})
34
31
  self.media_handler = MediaHandler()
35
- self.cloudscraper = CloudScraper()
36
32
  self.ex_manager = ExtractorManager()
37
- self.httpx.headers.update(self.cloudscraper.headers)
38
- self.httpx.cookies.update(self.cloudscraper.cookies)
39
33
 
40
34
  # @abstractmethod
41
35
  # async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
@@ -78,7 +72,7 @@ class PluginBase(ABC):
78
72
  pass
79
73
 
80
74
  async def close(self):
81
- await self.httpx.aclose()
75
+ await self.cffi.close()
82
76
 
83
77
  def fix_url(self, url: str) -> str:
84
78
  if not url:
@@ -10,15 +10,14 @@ class CloseLoadExtractor(ExtractorBase):
10
10
 
11
11
  async def extract(self, url, referer=None) -> ExtractResult:
12
12
  if referer:
13
- self.httpx.headers.update({"Referer": referer})
13
+ self.cffi.headers.update({"Referer": referer})
14
14
 
15
- istek = await self.httpx.get(url)
15
+ istek = await self.cffi.get(url)
16
16
  istek.raise_for_status()
17
17
 
18
18
  eval_func = re.compile(r'\s*(eval\(function[\s\S].*)\s*').findall(istek.text)[0]
19
19
  m3u_link = StreamDecoder.extract_stream_url(Packer.unpack(eval_func))
20
20
 
21
- await self.close()
22
21
  return ExtractResult(
23
22
  name = self.name,
24
23
  url = m3u_link,
@@ -9,9 +9,9 @@ class ContentX(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> list[ExtractResult]:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.httpx.get(url)
14
+ istek = await self.cffi.get(url)
15
15
  istek.raise_for_status()
16
16
  i_source = istek.text
17
17
 
@@ -39,7 +39,7 @@ class ContentX(ExtractorBase):
39
39
  )
40
40
  )
41
41
 
42
- vid_source_request = await self.httpx.get(f"{self.main_url}/source2.php?v={i_extract_value}", headers={"Referer": referer or self.main_url})
42
+ vid_source_request = await self.cffi.get(f"{self.main_url}/source2.php?v={i_extract_value}", headers={"Referer": referer or self.main_url})
43
43
  vid_source_request.raise_for_status()
44
44
 
45
45
  vid_source = vid_source_request.text
@@ -60,7 +60,7 @@ class ContentX(ExtractorBase):
60
60
 
61
61
  if i_dublaj := re.search(r',\"([^"]+)\",\"Türkçe"', i_source):
62
62
  dublaj_value = i_dublaj[1]
63
- dublaj_source_request = await self.httpx.get(f"{self.main_url}/source2.php?v={dublaj_value}", headers={"Referer": referer or self.main_url})
63
+ dublaj_source_request = await self.cffi.get(f"{self.main_url}/source2.php?v={dublaj_value}", headers={"Referer": referer or self.main_url})
64
64
  dublaj_source_request.raise_for_status()
65
65
 
66
66
  dublaj_source = dublaj_source_request.text
@@ -11,9 +11,9 @@ class MailRuExtractor(ExtractorBase):
11
11
  video_meta_url = f"{self.main_url}/+/video/meta/{vid_id}"
12
12
 
13
13
  if referer:
14
- self.httpx.headers.update({"Referer": referer})
14
+ self.cffi.headers.update({"Referer": referer})
15
15
 
16
- istek = await self.httpx.get(video_meta_url)
16
+ istek = await self.cffi.get(video_meta_url)
17
17
  istek.raise_for_status()
18
18
 
19
19
  video_key = istek.cookies.get("video_key")
@@ -30,7 +30,6 @@ class MailRuExtractor(ExtractorBase):
30
30
  if video_url.startswith("//"):
31
31
  video_url = f"https:{video_url}"
32
32
 
33
- await self.close()
34
33
  return ExtractResult(
35
34
  name = self.name,
36
35
  url = video_url,
@@ -10,9 +10,9 @@ class MixPlayHD(ExtractorBase):
10
10
 
11
11
  async def extract(self, url, referer=None) -> ExtractResult:
12
12
  if referer:
13
- self.httpx.headers.update({"Referer": referer})
13
+ self.cffi.headers.update({"Referer": referer})
14
14
 
15
- istek = await self.httpx.get(url)
15
+ istek = await self.cffi.get(url)
16
16
  istek.raise_for_status()
17
17
 
18
18
  be_player_match = re.search(r"bePlayer\('([^']+)',\s*'(\{[^\}]+\})'\);", istek.text)
@@ -8,16 +8,20 @@ class Odnoklassniki(ExtractorBase):
8
8
  main_url = "https://odnoklassniki.ru"
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
- if referer:
12
- self.httpx.headers.update({"Referer": referer})
13
-
14
- self.httpx.headers.update({
15
- "User-Agent": "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36"
16
- })
17
-
18
11
  if "/video/" in url:
19
12
  url = url.replace("/video/", "/videoembed/")
20
13
 
14
+ headers = {
15
+ "Accept" : "*/*",
16
+ "Connection" : "keep-alive",
17
+ "Sec-Fetch-Dest" : "empty",
18
+ "Sec-Fetch-Mode" : "cors",
19
+ "Sec-Fetch-Site" : "cross-site",
20
+ "Origin" : self.main_url,
21
+ "User-Agent" : "Mozilla/5.0 (X11; Linux x86_64; rv:101.0) Gecko/20100101 Firefox/101.0",
22
+ }
23
+ self.cffi.headers.update(headers)
24
+
21
25
  try:
22
26
  istek = await self.fetch_with_redirects(url)
23
27
  istek.raise_for_status()
@@ -80,8 +84,8 @@ class Odnoklassniki(ExtractorBase):
80
84
  return ExtractResult(
81
85
  name = self.name,
82
86
  url = best_video,
83
- referer = self.main_url,
84
- headers = {},
87
+ referer = referer,
88
+ headers = headers,
85
89
  subtitles = []
86
90
  )
87
91
 
@@ -89,7 +93,7 @@ class Odnoklassniki(ExtractorBase):
89
93
  """Yönlendirmeleri takip eden bir fonksiyon"""
90
94
  redirects = 0
91
95
  while redirects < max_redirects:
92
- istek = await self.httpx.get(url, follow_redirects=False)
96
+ istek = await self.cffi.get(url, allow_redirects=False)
93
97
 
94
98
  if istek.status_code not in [301, 302]:
95
99
  break # Yönlendirme yoksa çık
@@ -9,14 +9,14 @@ class PeaceMakerst(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- self.httpx.headers.update({
14
+ self.cffi.headers.update({
15
15
  "Content-Type" : "application/x-www-form-urlencoded; charset=UTF-8",
16
16
  "X-Requested-With" : "XMLHttpRequest"
17
17
  })
18
18
 
19
- response = await self.httpx.post(
19
+ response = await self.cffi.post(
20
20
  url = f"{url}?do=getVideo",
21
21
  data = {
22
22
  "hash" : url.split("video/")[-1],
@@ -33,7 +33,7 @@ class PeaceMakerst(ExtractorBase):
33
33
  teve2_id = re.search(r"teve2\.com\.tr\\\/embed\\\/(\d+)", response_text)[1]
34
34
  teve2_url = f"https://www.teve2.com.tr/action/media/{teve2_id}"
35
35
 
36
- teve2_response = await self.httpx.get(teve2_url, headers={"Referer": f"https://www.teve2.com.tr/embed/{teve2_id}"})
36
+ teve2_response = await self.cffi.get(teve2_url, headers={"Referer": f"https://www.teve2.com.tr/embed/{teve2_id}"})
37
37
  teve2_response.raise_for_status()
38
38
  teve2_json = teve2_response.json()
39
39
 
@@ -9,7 +9,7 @@ class PixelDrain(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
14
  pixel_id_match = re.search(r"/u/([^/?]+)|([^\/]+)(?=\?download)", url)
15
15
  if not pixel_id_match:
@@ -19,7 +19,6 @@ class PixelDrain(ExtractorBase):
19
19
  download_link = f"{self.main_url}/api/file/{pixel_id}?download"
20
20
  referer_link = f"{self.main_url}/u/{pixel_id}?download"
21
21
 
22
- await self.close()
23
22
  return ExtractResult(
24
23
  name = f"{self.name} - {pixel_id}",
25
24
  url = download_link,
@@ -10,9 +10,9 @@ class RapidVid(ExtractorBase):
10
10
 
11
11
  async def extract(self, url, referer=None) -> ExtractResult:
12
12
  if referer:
13
- self.httpx.headers.update({"Referer": referer})
13
+ self.cffi.headers.update({"Referer": referer})
14
14
 
15
- istek = await self.httpx.get(url)
15
+ istek = await self.cffi.get(url)
16
16
  istek.raise_for_status()
17
17
 
18
18
  subtitles = []
@@ -45,7 +45,6 @@ class RapidVid(ExtractorBase):
45
45
  except Exception as hata:
46
46
  raise RuntimeError(f"Extraction failed: {hata}") from hata
47
47
 
48
- await self.close()
49
48
  return ExtractResult(
50
49
  name = self.name,
51
50
  url = decoded_url,
@@ -9,9 +9,9 @@ class SibNet(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- response = await self.httpx.get(url)
14
+ response = await self.cffi.get(url)
15
15
  response.raise_for_status()
16
16
 
17
17
  match = re.search(r'player\.src\(\[\{src: \"([^\"]+)\"', response.text)
@@ -20,7 +20,6 @@ class SibNet(ExtractorBase):
20
20
 
21
21
  m3u_link = f"{self.main_url}{match[1]}"
22
22
 
23
- await self.close()
24
23
  return ExtractResult(
25
24
  name = self.name,
26
25
  url = m3u_link,
@@ -9,9 +9,9 @@ class Sobreatsesuyp(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.httpx.get(url)
14
+ istek = await self.cffi.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  file_match = re.search(r'file\":\"([^\"]+)', istek.text)
@@ -21,7 +21,7 @@ class Sobreatsesuyp(ExtractorBase):
21
21
  file_path = file_match[1].replace("\\", "")
22
22
  post_link = f"{self.main_url}/{file_path}"
23
23
 
24
- post_istek = await self.httpx.post(post_link)
24
+ post_istek = await self.cffi.post(post_link)
25
25
  post_istek.raise_for_status()
26
26
 
27
27
  try:
@@ -41,7 +41,7 @@ class Sobreatsesuyp(ExtractorBase):
41
41
  continue
42
42
 
43
43
  playlist_url = f"{self.main_url}/playlist/{file.lstrip('/')}.txt"
44
- playlist_request = await self.httpx.post(playlist_url, headers={"Referer": referer or self.main_url})
44
+ playlist_request = await self.cffi.post(playlist_url, headers={"Referer": referer or self.main_url})
45
45
  playlist_request.raise_for_status()
46
46
 
47
47
  all_results.append(
@@ -9,9 +9,9 @@ class TRsTX(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> list[ExtractResult]:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.httpx.get(url)
14
+ istek = await self.cffi.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  file_match = re.search(r'file\":\"([^\"]+)', istek.text)
@@ -21,7 +21,7 @@ class TRsTX(ExtractorBase):
21
21
  file_path = file_match[1].replace("\\", "")
22
22
  post_link = f"{self.main_url}/{file_path}"
23
23
 
24
- post_istek = await self.httpx.post(post_link)
24
+ post_istek = await self.cffi.post(post_link)
25
25
  post_istek.raise_for_status()
26
26
 
27
27
  try:
@@ -42,7 +42,7 @@ class TRsTX(ExtractorBase):
42
42
  continue
43
43
 
44
44
  playlist_url = f"{self.main_url}/playlist/{file.lstrip('/')}.txt"
45
- playlist_request = await self.httpx.post(playlist_url, headers={"Referer": referer or self.main_url})
45
+ playlist_request = await self.cffi.post(playlist_url, headers={"Referer": referer or self.main_url})
46
46
  playlist_request.raise_for_status()
47
47
 
48
48
  video_data = playlist_request.text
@@ -8,12 +8,12 @@ class TauVideo(ExtractorBase):
8
8
 
9
9
  async def extract(self, url, referer=None) -> list[ExtractResult]:
10
10
  if referer:
11
- self.httpx.headers.update({"Referer": referer})
11
+ self.cffi.headers.update({"Referer": referer})
12
12
 
13
13
  video_key = url.split("/")[-1]
14
14
  api_url = f"{self.main_url}/api/video/{video_key}"
15
15
 
16
- response = await self.httpx.get(api_url)
16
+ response = await self.cffi.get(api_url)
17
17
  response.raise_for_status()
18
18
 
19
19
  api_data = response.json()
@@ -9,9 +9,9 @@ class TurboImgz(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
- istek = await self.httpx.get(url)
14
+ istek = await self.cffi.get(url)
15
15
  istek.raise_for_status()
16
16
 
17
17
  if video_match := re.search(r'file: "(.*)",', istek.text):
@@ -11,10 +11,9 @@ class VidMoly(ExtractorBase):
11
11
 
12
12
  async def extract(self, url: str, referer: str = None) -> ExtractResult:
13
13
  if referer:
14
- self.httpx.headers.update({"Referer": referer})
14
+ self.cffi.headers.update({"Referer": referer})
15
15
 
16
- self.httpx.headers.update({
17
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
16
+ self.cffi.headers.update({
18
17
  "Sec-Fetch-Dest" : "iframe",
19
18
  })
20
19
 
@@ -22,10 +21,10 @@ class VidMoly(ExtractorBase):
22
21
  self.main_url = self.main_url.replace(".me", ".net")
23
22
  url = url.replace(".me", ".net")
24
23
 
25
- response = await self.httpx.get(url)
24
+ response = await self.cffi.get(url)
26
25
  if "Select number" in response.text:
27
26
  secici = Selector(response.text)
28
- response = await self.httpx.post(
27
+ response = await self.cffi.post(
29
28
  url = url,
30
29
  data = {
31
30
  "op" : secici.css("input[name='op']::attr(value)").get(),
@@ -77,7 +76,6 @@ class VidMoly(ExtractorBase):
77
76
  if not video_url:
78
77
  raise ValueError("Video URL bulunamadı.")
79
78
 
80
- await self.close()
81
79
  return ExtractResult(
82
80
  name = self.name,
83
81
  url = video_url,
@@ -10,9 +10,9 @@ class VidMoxy(ExtractorBase):
10
10
 
11
11
  async def extract(self, url, referer=None) -> ExtractResult:
12
12
  if referer:
13
- self.httpx.headers.update({"Referer": referer})
13
+ self.cffi.headers.update({"Referer": referer})
14
14
 
15
- istek = await self.httpx.get(url)
15
+ istek = await self.cffi.get(url)
16
16
  istek.raise_for_status()
17
17
 
18
18
  subtitles = []
@@ -41,7 +41,6 @@ class VidMoxy(ExtractorBase):
41
41
 
42
42
  m3u_link = HexCodec.decode(escaped_hex)
43
43
 
44
- await self.close()
45
44
  return ExtractResult(
46
45
  name = self.name,
47
46
  url = m3u_link,
@@ -9,18 +9,18 @@ class VideoSeyred(ExtractorBase):
9
9
 
10
10
  async def extract(self, url, referer=None) -> ExtractResult:
11
11
  if referer:
12
- self.httpx.headers.update({"Referer": referer})
12
+ self.cffi.headers.update({"Referer": referer})
13
13
 
14
14
  video_id = url.split("embed/")[1].split("?")[0]
15
15
  if len(video_id) > 10:
16
- kontrol = await self.httpx.get(url)
16
+ kontrol = await self.cffi.get(url)
17
17
  kontrol.raise_for_status()
18
18
 
19
19
  video_id = re.search(r"playlist\/(.*)\.json", kontrol.text)[1]
20
20
 
21
21
  video_url = f"{self.main_url}/playlist/{video_id}.json"
22
22
 
23
- response = await self.httpx.get(video_url)
23
+ response = await self.cffi.get(video_url)
24
24
  response.raise_for_status()
25
25
 
26
26
  try:
@@ -42,13 +42,13 @@ class DiziBox(PluginBase):
42
42
 
43
43
  #@kekik_cache(ttl=60*60)
44
44
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
45
- self.httpx.cookies.update({
45
+ self.cffi.cookies.update({
46
46
  "isTrustedUser" : "true",
47
47
  "dbxu" : str(time.time() * 1000).split(".")[0]
48
48
  })
49
- istek = await self.httpx.get(
49
+ istek = await self.cffi.get(
50
50
  url = f"{url.replace('SAYFA', str(page))}",
51
- follow_redirects = True
51
+ allow_redirects = True
52
52
  )
53
53
  secici = Selector(istek.text)
54
54
 
@@ -64,11 +64,11 @@ class DiziBox(PluginBase):
64
64
 
65
65
  #@kekik_cache(ttl=60*60)
66
66
  async def search(self, query: str) -> list[SearchResult]:
67
- self.httpx.cookies.update({
67
+ self.cffi.cookies.update({
68
68
  "isTrustedUser" : "true",
69
69
  "dbxu" : str(time.time() * 1000).split(".")[0]
70
70
  })
71
- istek = await self.httpx.get(f"{self.main_url}/?s={query}")
71
+ istek = await self.cffi.get(f"{self.main_url}/?s={query}")
72
72
  secici = Selector(istek.text)
73
73
 
74
74
  return [
@@ -82,7 +82,7 @@ class DiziBox(PluginBase):
82
82
 
83
83
  #@kekik_cache(ttl=60*60)
84
84
  async def load_item(self, url: str) -> SeriesInfo:
85
- istek = await self.httpx.get(url)
85
+ istek = await self.cffi.get(url)
86
86
  secici = Selector(istek.text)
87
87
 
88
88
  title = secici.css("div.tv-overview h1 a::text").get()
@@ -96,7 +96,7 @@ class DiziBox(PluginBase):
96
96
  episodes = []
97
97
  for sezon_link in secici.css("div#seasons-list a::attr(href)").getall():
98
98
  sezon_url = self.fix_url(sezon_link)
99
- sezon_istek = await self.httpx.get(sezon_url)
99
+ sezon_istek = await self.cffi.get(sezon_url)
100
100
  sezon_secici = Selector(sezon_istek.text)
101
101
 
102
102
  for bolum in sezon_secici.css("article.grid-box"):
@@ -131,8 +131,8 @@ class DiziBox(PluginBase):
131
131
  async def _iframe_decode(self, name:str, iframe_link:str, referer:str) -> list[str]:
132
132
  results = []
133
133
 
134
- self.httpx.headers.update({"Referer": referer})
135
- self.httpx.cookies.update({
134
+ self.cffi.headers.update({"Referer": referer})
135
+ self.cffi.cookies.update({
136
136
  "isTrustedUser" : "true",
137
137
  "dbxu" : str(time.time() * 1000).split(".")[0]
138
138
  })
@@ -140,12 +140,12 @@ class DiziBox(PluginBase):
140
140
  if "/player/king/king.php" in iframe_link:
141
141
  iframe_link = iframe_link.replace("king.php?v=", "king.php?wmode=opaque&v=")
142
142
 
143
- istek = await self.httpx.get(iframe_link)
143
+ istek = await self.cffi.get(iframe_link)
144
144
  secici = Selector(istek.text)
145
145
  iframe = secici.css("div#Player iframe::attr(src)").get()
146
146
 
147
- self.httpx.headers.update({"Referer": self.main_url})
148
- istek = await self.httpx.get(iframe)
147
+ self.cffi.headers.update({"Referer": self.main_url})
148
+ istek = await self.cffi.get(iframe)
149
149
 
150
150
  crypt_data = re.search(r"CryptoJS\.AES\.decrypt\(\"(.*)\",\"", istek.text)[1]
151
151
  crypt_pass = re.search(r"\",\"(.*)\"\);", istek.text)[1]
@@ -161,7 +161,7 @@ class DiziBox(PluginBase):
161
161
  while True:
162
162
  await asyncio.sleep(.3)
163
163
  with contextlib.suppress(Exception):
164
- istek = await self.httpx.get(iframe_link)
164
+ istek = await self.cffi.get(iframe_link)
165
165
 
166
166
  if atob_data := re.search(r"unescape\(\"(.*)\"\)", istek.text):
167
167
  decoded_atob = urllib.parse.unquote(atob_data[1])
@@ -180,7 +180,7 @@ class DiziBox(PluginBase):
180
180
 
181
181
  #@kekik_cache(ttl=15*60)
182
182
  async def load_links(self, url: str) -> list[dict]:
183
- istek = await self.httpx.get(url)
183
+ istek = await self.cffi.get(url)
184
184
  secici = Selector(istek.text)
185
185
 
186
186
  results = []
@@ -200,8 +200,8 @@ class DiziBox(PluginBase):
200
200
  if not alt_link:
201
201
  continue
202
202
 
203
- self.httpx.headers.update({"Referer": url})
204
- alt_istek = await self.httpx.get(alt_link)
203
+ self.cffi.headers.update({"Referer": url})
204
+ alt_istek = await self.cffi.get(alt_link)
205
205
  alt_istek.raise_for_status()
206
206
 
207
207
  alt_secici = Selector(alt_istek.text)
@@ -31,7 +31,7 @@ class DiziYou(PluginBase):
31
31
 
32
32
  #@kekik_cache(ttl=60*60)
33
33
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
34
- istek = await self.httpx.get(f"{url.replace('SAYFA', str(page))}")
34
+ istek = await self.cffi.get(f"{url.replace('SAYFA', str(page))}")
35
35
  secici = Selector(istek.text)
36
36
 
37
37
  return [
@@ -46,7 +46,7 @@ class DiziYou(PluginBase):
46
46
 
47
47
  #@kekik_cache(ttl=60*60)
48
48
  async def search(self, query: str) -> list[SearchResult]:
49
- istek = await self.httpx.get(f"{self.main_url}/?s={query}")
49
+ istek = await self.cffi.get(f"{self.main_url}/?s={query}")
50
50
  secici = Selector(istek.text)
51
51
 
52
52
  return [
@@ -60,7 +60,7 @@ class DiziYou(PluginBase):
60
60
 
61
61
  #@kekik_cache(ttl=60*60)
62
62
  async def load_item(self, url: str) -> SeriesInfo:
63
- istek = await self.httpx.get(url)
63
+ istek = await self.cffi.get(url)
64
64
  secici = Selector(istek.text)
65
65
 
66
66
  title = secici.css("h1::text").get().strip()
@@ -109,7 +109,7 @@ class DiziYou(PluginBase):
109
109
 
110
110
  #@kekik_cache(ttl=15*60)
111
111
  async def load_links(self, url: str) -> list[dict]:
112
- istek = await self.httpx.get(url)
112
+ istek = await self.cffi.get(url)
113
113
  secici = Selector(istek.text)
114
114
 
115
115
  item_title = secici.css("div.title h1::text").get()
@@ -26,7 +26,7 @@ class Dizilla(PluginBase):
26
26
 
27
27
  #@kekik_cache(ttl=60*60)
28
28
  async def get_main_page(self, page: int, url: str, category: str) -> list[MainPageResult]:
29
- istek = await self.httpx.get(url)
29
+ istek = await self.cffi.get(url)
30
30
  secici = Selector(istek.text)
31
31
 
32
32
  ana_sayfa = []
@@ -51,7 +51,7 @@ class Dizilla(PluginBase):
51
51
  ep_name = ep_name.replace(". Sezon", "x").replace(". Bölüm", "").replace("x ", "x")
52
52
  title = f"{name} - {ep_name}"
53
53
 
54
- ep_req = await self.httpx.get(self.fix_url(veri.css("::attr(href)").get()))
54
+ ep_req = await self.cffi.get(self.fix_url(veri.css("::attr(href)").get()))
55
55
  ep_secici = Selector(ep_req.text)
56
56
  href = self.fix_url(ep_secici.css("nav li:nth-of-type(3) a::attr(href)").get())
57
57
  poster = self.fix_url(ep_secici.css("img.imgt::attr(src)").get())
@@ -90,7 +90,7 @@ class Dizilla(PluginBase):
90
90
 
91
91
  #@kekik_cache(ttl=60*60)
92
92
  async def search(self, query: str) -> list[SearchResult]:
93
- arama_istek = await self.httpx.post(f"{self.main_url}/api/bg/searchcontent?searchterm={query}")
93
+ arama_istek = await self.cffi.post(f"{self.main_url}/api/bg/searchcontent?searchterm={query}")
94
94
  decrypted = await self.decrypt_response(arama_istek.json().get("response"))
95
95
  arama_veri = decrypted.get("result", [])
96
96
 
@@ -116,7 +116,7 @@ class Dizilla(PluginBase):
116
116
 
117
117
  #@kekik_cache(ttl=60*60)
118
118
  async def load_item(self, url: str) -> SeriesInfo:
119
- istek = await self.httpx.get(url)
119
+ istek = await self.cffi.get(url)
120
120
  secici = Selector(istek.text)
121
121
  veri = loads(secici.xpath("//script[@type='application/ld+json']/text()").getall()[-1])
122
122
 
@@ -156,7 +156,7 @@ class Dizilla(PluginBase):
156
156
 
157
157
  #@kekik_cache(ttl=15*60)
158
158
  async def load_links(self, url: str) -> list[dict]:
159
- istek = await self.httpx.get(url)
159
+ istek = await self.cffi.get(url)
160
160
  secici = Selector(istek.text)
161
161
 
162
162
  next_data = loads(secici.css("script#__NEXT_DATA__::text").get())