quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,237 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import base64
6
+ from io import BytesIO
7
+
8
+ from Cryptodome.Cipher import AES
9
+ from PIL import Image, ImageChops
10
+
11
+ from quasarr.providers.log import info, debug
12
+
13
+
14
+ class CNL:
15
+ """
16
+ Given a dict with the same structure as your `chosen_data` (i.e.
17
+ {
18
+ "links": [...],
19
+ "cnl": {
20
+ "jk": "<obfuscated_hex_string>",
21
+ "crypted": "<base64_ciphertext>"
22
+ }
23
+ }),
24
+ this class will decrypt the Base64 payload, strip padding, and return a list of URLs.
25
+ """
26
+
27
+ def __init__(self, chosen_data: dict):
28
+ """
29
+ chosen_data should contain at least:
30
+ - "cnl": {
31
+ "jk": "<hex‐encoded string, length > 16>",
32
+ "crypted": "<Base64‐encoded ciphertext>"
33
+ }
34
+ """
35
+ self.cnl_info = chosen_data.get("cnl", {})
36
+ self.jk = self.cnl_info.get("jk")
37
+ self.crypted_blob = self.cnl_info.get("crypted")
38
+
39
+ if not self.jk or not self.crypted_blob:
40
+ raise KeyError("Missing 'jk' or 'crypted' fields in JSON.")
41
+
42
+ # Swap positions 15 and 16 in the hex string
43
+ k_list = list(self.jk)
44
+ if len(k_list) <= 16:
45
+ raise ValueError("Invalid 'jk' string length; must be > 16 characters.")
46
+ k_list[15], k_list[16] = k_list[16], k_list[15]
47
+ self.fixed_key_hex = "".join(k_list)
48
+
49
+ def _aes_decrypt(self, data_b64: str, key_hex: str) -> bytes:
50
+ """
51
+ Decode the Base64‐encoded payload, interpret key_hex as hex,
52
+ then use AES-CBC with IV=key_bytes to decrypt.
53
+ Returns raw bytes (still possibly containing padding).
54
+ """
55
+ try:
56
+ encrypted_data = base64.b64decode(data_b64)
57
+ except Exception as e:
58
+ raise ValueError("Failed to decode base64 data") from e
59
+
60
+ try:
61
+ key_bytes = bytes.fromhex(key_hex)
62
+ except Exception as e:
63
+ raise ValueError("Failed to convert key to bytes (invalid hex)") from e
64
+
65
+ iv = key_bytes
66
+ cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
67
+
68
+ try:
69
+ decrypted = cipher.decrypt(encrypted_data)
70
+ except Exception as e:
71
+ raise ValueError("AES decryption failed") from e
72
+
73
+ return decrypted
74
+
75
+ def decrypt(self) -> list[str]:
76
+ """
77
+ Runs the full decryption pipeline and returns a list of non‐empty URLs.
78
+ Strips out null and backspace padding bytes, decodes to UTF-8, and
79
+ splits on CRLF.
80
+ """
81
+ raw_plain = self._aes_decrypt(self.crypted_blob, self.fixed_key_hex)
82
+
83
+ # Remove any 0x00 or 0x08 bytes
84
+ try:
85
+ cleaned = raw_plain.replace(b"\x00", b"").replace(b"\x08", b"")
86
+ text = cleaned.decode("utf-8")
87
+ except Exception as e:
88
+ raise ValueError("Failed to decode decrypted data to UTF-8") from e
89
+
90
+ # Split on CRLF, discard any empty lines
91
+ urls = [line for line in text.splitlines() if line.strip()]
92
+ return urls
93
+
94
+
95
+ def decrypt_content(content_items: list[dict], mirror: str | None) -> list[str]:
96
+ """
97
+ Go through every item in `content_items`, but if `mirror` is not None,
98
+ only attempt to decrypt those whose "hoster" field contains `mirror`.
99
+ If no items match that filter, falls back to decrypting every single item.
100
+
101
+ Returns a flat list of all decrypted URLs.
102
+ """
103
+ if mirror:
104
+ filtered = [item for item in content_items if mirror in item.get("hoster", "")]
105
+ else:
106
+ filtered = []
107
+
108
+ if mirror and not filtered:
109
+ info(f"No items found for mirror='{mirror}'. Falling back to all content_items.")
110
+ filtered = content_items.copy()
111
+
112
+ if not mirror:
113
+ filtered = content_items.copy()
114
+
115
+ decrypted_links: list[str] = []
116
+
117
+ # If 'filtered' is a dictionary, iterate over its values; otherwise, assume it's a list.
118
+ items_to_process = filtered.values() if isinstance(filtered, dict) else filtered
119
+
120
+ for idx, item in enumerate(items_to_process):
121
+ if not isinstance(item, dict):
122
+ info(f"[Item {idx}] Invalid item format; expected dict, got {type(item).__name__}")
123
+ continue
124
+
125
+ hoster_name = item.get("hoster", "<unknown>")
126
+ cnl_info = item.get("cnl", {})
127
+ jnk = cnl_info.get("jk", "")
128
+ crypted = cnl_info.get("crypted", "")
129
+
130
+ if not jnk or not crypted:
131
+ info(f"[Item {idx} | hoster={hoster_name}] Missing 'jk' or 'crypted' → skipping")
132
+ continue
133
+
134
+ try:
135
+ decryptor = CNL(item)
136
+ urls = decryptor.decrypt()
137
+ decrypted_links.extend(urls)
138
+ debug(f"[Item {idx} | hoster={hoster_name}] Decrypted {len(urls)} URLs")
139
+ except Exception as e:
140
+ # Log and keep going; one bad item won’t stop the rest.
141
+ info(f"[Item {idx} | hoster={hoster_name}] Error during decryption: {e}")
142
+
143
+ return decrypted_links
144
+
145
+
146
+ def calculate_pixel_based_difference(img1, img2):
147
+ """Pillow-based absolute-difference % over all channels."""
148
+ # ensure same mode and size
149
+ diff = ImageChops.difference(img1, img2).convert("RGB")
150
+ w, h = diff.size
151
+ # histogram is [R0, R1, ..., R255, G0, ..., B255]
152
+ hist = diff.histogram()
153
+ zero_R = hist[0]
154
+ zero_G = hist[256]
155
+ zero_B = hist[512]
156
+ total_elements = w * h * 3
157
+ zero_elements = zero_R + zero_G + zero_B
158
+ non_zero = total_elements - zero_elements
159
+ return (non_zero * 100) / total_elements
160
+
161
+
162
+ def solve_captcha(hostname, shared_state, fetch_via_flaresolverr, fetch_via_requests_session):
163
+ al = shared_state.values["config"]("Hostnames").get(hostname)
164
+ captcha_base = f"https://www.{al}/files/captcha"
165
+
166
+ result = fetch_via_flaresolverr(
167
+ shared_state,
168
+ method="POST",
169
+ target_url=captcha_base,
170
+ post_data={"cID": 0, "rT": 1},
171
+ timeout=30
172
+ )
173
+
174
+ try:
175
+ image_ids = result["json"]
176
+ except ValueError:
177
+ raise RuntimeError(f"Cannot decode captcha IDs: {result['text']}")
178
+
179
+ if not isinstance(image_ids, list) or len(image_ids) < 2:
180
+ raise RuntimeError("Unexpected captcha IDs format.")
181
+
182
+ # Download each image
183
+ images = []
184
+ for img_id in image_ids:
185
+ img_url = f"{captcha_base}?cid=0&hash={img_id}"
186
+ r_img = fetch_via_requests_session(shared_state, method="GET", target_url=img_url, timeout=30)
187
+ if r_img.status_code != 200:
188
+ raise RuntimeError(f"Failed to download captcha image {img_id} (HTTP {r_img.status_code})")
189
+ elif not r_img.content:
190
+ raise RuntimeError(f"Captcha image {img_id} is empty or invalid.")
191
+ images.append((img_id, r_img.content))
192
+
193
+ # Convert to internal representation
194
+ image_objects = []
195
+ for image_id, raw_bytes in images:
196
+ img = Image.open(BytesIO(raw_bytes))
197
+
198
+ # if it’s a palette (P) image with an indexed transparency, go through RGBA
199
+ if img.mode == "P" and "transparency" in img.info:
200
+ img = img.convert("RGBA")
201
+
202
+ # if it has an alpha channel, composite it over white
203
+ if img.mode == "RGBA":
204
+ background = Image.new("RGB", img.size, (255, 255, 255))
205
+ background.paste(img, mask=img.split()[3])
206
+ img = background
207
+ else:
208
+ # for all other modes, just convert to plain RGB
209
+ img = img.convert("RGB")
210
+
211
+ image_objects.append((image_id, img))
212
+
213
+ images_pixel_differences = []
214
+ for idx_i, (img_id_i, img_i) in enumerate(image_objects):
215
+ total_difference = 0.0
216
+ for idx_j, (img_id_j, img_j) in enumerate(image_objects):
217
+ if idx_i == idx_j:
218
+ continue # skip self-comparison
219
+ total_difference += calculate_pixel_based_difference(img_i, img_j)
220
+ images_pixel_differences.append((img_id_i, total_difference))
221
+
222
+ identified_captcha_image, cumulated_percentage = max(images_pixel_differences, key=lambda x: x[1])
223
+ different_pixels_percentage = int(cumulated_percentage / len(images)) if images else int(cumulated_percentage)
224
+ info(f'CAPTCHA image "{identified_captcha_image}" - difference to others: {different_pixels_percentage}%')
225
+
226
+ result = fetch_via_flaresolverr(
227
+ shared_state,
228
+ method="POST",
229
+ target_url=captcha_base,
230
+ post_data={"cID": 0, "pC": identified_captcha_image, "rT": 2},
231
+ timeout=60
232
+ )
233
+
234
+ return {
235
+ "response": result["text"],
236
+ "captcha_id": identified_captcha_image
237
+ }
@@ -0,0 +1,444 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import base64
6
+ import json
7
+ import random
8
+ import re
9
+ import xml.dom.minidom
10
+ from urllib.parse import urlparse
11
+
12
+ import dukpy
13
+ import requests
14
+ from Cryptodome.Cipher import AES
15
+ from bs4 import BeautifulSoup
16
+
17
+ from quasarr.providers.cloudflare import is_cloudflare_challenge, ensure_session_cf_bypassed
18
+ from quasarr.providers.log import info, debug
19
+
20
+
21
+ class CNL:
22
+ def __init__(self, crypted_data):
23
+ debug("Initializing CNL with crypted_data.")
24
+ self.crypted_data = crypted_data
25
+
26
+ def jk_eval(self, f_def):
27
+ debug("Evaluating JavaScript key function.")
28
+ js_code = f"""
29
+ {f_def}
30
+ f();
31
+ """
32
+
33
+ result = dukpy.evaljs(js_code).strip()
34
+ debug("JavaScript evaluation complete.")
35
+ return result
36
+
37
+ def aes_decrypt(self, data, key):
38
+ debug("Starting AES decrypt.")
39
+ try:
40
+ encrypted_data = base64.b64decode(data)
41
+ debug("Base64 decode for AES decrypt successful.")
42
+ except Exception as e:
43
+ debug("Base64 decode for AES decrypt failed.")
44
+ raise ValueError("Failed to decode base64 data") from e
45
+
46
+ try:
47
+ key_bytes = bytes.fromhex(key)
48
+ debug("Key successfully converted from hex.")
49
+ except Exception as e:
50
+ debug("Failed converting key from hex.")
51
+ raise ValueError("Failed to convert key to bytes") from e
52
+
53
+ iv = key_bytes
54
+ cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
55
+
56
+ try:
57
+ decrypted_data = cipher.decrypt(encrypted_data)
58
+ debug("AES decrypt operation successful.")
59
+ except ValueError as e:
60
+ debug("AES decrypt operation failed.")
61
+ raise ValueError("Decryption failed") from e
62
+
63
+ try:
64
+ decoded = decrypted_data.decode('utf-8').replace('\x00', '').replace('\x08', '')
65
+ debug("Decoded AES output successfully.")
66
+ return decoded
67
+ except UnicodeDecodeError as e:
68
+ debug("Failed decoding decrypted AES output.")
69
+ raise ValueError("Failed to decode decrypted data") from e
70
+
71
+ def decrypt(self):
72
+ debug("Starting Click'N'Load decrypt sequence.")
73
+ crypted = self.crypted_data[2]
74
+ jk = "function f(){ return \'" + self.crypted_data[1] + "';}"
75
+ key = self.jk_eval(jk)
76
+ uncrypted = self.aes_decrypt(crypted, key)
77
+ urls = [result for result in uncrypted.split("\r\n") if len(result) > 0]
78
+ debug(f"Extracted {len(urls)} URLs from CNL decrypt.")
79
+ return urls
80
+
81
+
82
+ class DLC:
83
+ def __init__(self, shared_state, dlc_file):
84
+ debug("Initializing DLC decrypt handler.")
85
+ self.shared_state = shared_state
86
+ self.data = dlc_file
87
+ self.KEY = b"cb99b5cbc24db398"
88
+ self.IV = b"9bc24cb995cb8db3"
89
+ self.API_URL = "http://service.jdownloader.org/dlcrypt/service.php?srcType=dlc&destType=pylo&data="
90
+
91
+ def parse_packages(self, start_node):
92
+ debug("Parsing DLC packages from XML.")
93
+ return [
94
+ (
95
+ base64.b64decode(node.getAttribute("name")).decode("utf-8"),
96
+ self.parse_links(node)
97
+ )
98
+ for node in start_node.getElementsByTagName("package")
99
+ ]
100
+
101
+ def parse_links(self, start_node):
102
+ debug("Parsing DLC links in package.")
103
+ return [
104
+ base64.b64decode(node.getElementsByTagName("url")[0].firstChild.data).decode("utf-8")
105
+ for node in start_node.getElementsByTagName("file")
106
+ ]
107
+
108
+ def decrypt(self):
109
+ debug("Starting DLC decrypt flow.")
110
+ if not isinstance(self.data, bytes):
111
+ debug("DLC data type invalid.")
112
+ raise TypeError("data must be bytes.")
113
+
114
+ all_urls = []
115
+
116
+ try:
117
+ debug("Preparing DLC data buffer.")
118
+ data = self.data.strip()
119
+ data += b"=" * (-len(data) % 4)
120
+
121
+ dlc_key = data[-88:].decode("utf-8")
122
+ dlc_data = base64.b64decode(data[:-88])
123
+ debug("DLC base64 decode successful.")
124
+
125
+ headers = {'User-Agent': self.shared_state.values["user_agent"]}
126
+
127
+ debug("Requesting DLC decryption service.")
128
+ dlc_content = requests.get(self.API_URL + dlc_key, headers=headers, timeout=10).content.decode("utf-8")
129
+
130
+ rc = base64.b64decode(re.search(r"<rc>(.+)</rc>", dlc_content, re.S).group(1))[:16]
131
+ debug("Received DLC RC block.")
132
+
133
+ cipher = AES.new(self.KEY, AES.MODE_CBC, self.IV)
134
+ key = iv = cipher.decrypt(rc)
135
+ debug("Decrypted DLC key material.")
136
+
137
+ cipher = AES.new(key, AES.MODE_CBC, iv)
138
+ xml_data = base64.b64decode(cipher.decrypt(dlc_data)).decode("utf-8")
139
+ debug("Final DLC decrypt successful.")
140
+
141
+ root = xml.dom.minidom.parseString(xml_data).documentElement
142
+ content_node = root.getElementsByTagName("content")[0]
143
+ debug("Parsed DLC XML content.")
144
+
145
+ packages = self.parse_packages(content_node)
146
+ debug(f"Found {len(packages)} DLC packages.")
147
+
148
+ for package in packages:
149
+ urls = package[1]
150
+ all_urls.extend(urls)
151
+
152
+ except Exception as e:
153
+ info("DLC Error: " + str(e))
154
+ return None
155
+
156
+ debug(f"DLC decrypt yielded {len(all_urls)} URLs.")
157
+ return all_urls
158
+
159
+
160
+ def get_filecrypt_links(shared_state, token, title, url, password=None, mirror=None):
161
+ info("Attempting to decrypt Filecrypt link: " + url)
162
+ debug("Initializing Filecrypt session & headers.")
163
+ session = requests.Session()
164
+ headers = {'User-Agent': shared_state.values["user_agent"]}
165
+
166
+ debug("Ensuring Cloudflare bypass is ready.")
167
+ session, headers, output = ensure_session_cf_bypassed(info, shared_state, session, url, headers)
168
+ if not session or not output:
169
+ debug("Cloudflare bypass failed.")
170
+ return False
171
+
172
+ soup = BeautifulSoup(output.text, 'html.parser')
173
+ debug("Parsed initial Filecrypt HTML.")
174
+
175
+ password_field = None
176
+ try:
177
+ debug("Attempting password field auto-detection.")
178
+ input_elem = soup.find('input', attrs={'type': 'password'})
179
+ if not input_elem:
180
+ input_elem = soup.find('input', placeholder=lambda v: v and 'password' in v.lower())
181
+ if not input_elem:
182
+ input_elem = soup.find('input',
183
+ attrs={'name': lambda v: v and ('pass' in v.lower() or 'password' in v.lower())})
184
+ if input_elem and input_elem.has_attr('name'):
185
+ password_field = input_elem['name']
186
+ info("Password field name identified: " + password_field)
187
+ debug(f"Password field detected: {password_field}")
188
+ except Exception as e:
189
+ info(f"Password-field detection error: {e}")
190
+ debug("Password-field detection error raised.")
191
+
192
+ if password and password_field:
193
+ info("Using Password: " + password)
194
+ debug("Submitting password via POST.")
195
+ post_headers = {'User-Agent': shared_state.values["user_agent"],
196
+ 'Content-Type': 'application/x-www-form-urlencoded'}
197
+ data = {password_field: password}
198
+ try:
199
+ output = session.post(output.url, data=data, headers=post_headers, timeout=30)
200
+ debug("Password POST request successful.")
201
+ except requests.RequestException as e:
202
+ info(f"POSTing password failed: {e}")
203
+ debug("Password POST request failed.")
204
+ return False
205
+
206
+ if output.status_code == 403 or is_cloudflare_challenge(output.text):
207
+ info("Encountered Cloudflare after password POST. Re-running FlareSolverr...")
208
+ debug("Cloudflare reappeared after password submit, retrying bypass.")
209
+ session, headers, output = ensure_session_cf_bypassed(info, shared_state, session, output.url, headers)
210
+ if not session or not output:
211
+ debug("Cloudflare bypass failed after password POST.")
212
+ return False
213
+
214
+ url = output.url
215
+ soup = BeautifulSoup(output.text, 'html.parser')
216
+ debug("Re-parsed HTML after password submit or initial load.")
217
+
218
+ if bool(soup.find_all("input", {"id": "p4assw0rt"})):
219
+ info(f"Password was wrong or missing. Could not get links for {title}")
220
+ debug("Incorrect password detected via p4assw0rt.")
221
+ return False
222
+
223
+ no_captcha_present = bool(soup.find("form", {"class": "cnlform"}))
224
+ if no_captcha_present:
225
+ info("No CAPTCHA present. Skipping token!")
226
+ debug("Detected no CAPTCHA (CNL direct form).")
227
+ else:
228
+ circle_captcha = bool(soup.find_all("div", {"class": "circle_captcha"}))
229
+ debug(f"Circle captcha present: {circle_captcha}")
230
+ i = 0
231
+ while circle_captcha and i < 3:
232
+ debug(f"Submitting fake circle captcha click attempt {i+1}.")
233
+ random_x = str(random.randint(100, 200))
234
+ random_y = str(random.randint(100, 200))
235
+ output = session.post(url, data="buttonx.x=" + random_x + "&buttonx.y=" + random_y,
236
+ headers={'User-Agent': shared_state.values["user_agent"],
237
+ 'Content-Type': 'application/x-www-form-urlencoded'})
238
+ url = output.url
239
+ soup = BeautifulSoup(output.text, 'html.parser')
240
+ circle_captcha = bool(soup.find_all("div", {"class": "circle_captcha"}))
241
+ i += 1
242
+ debug(f"Circle captcha still present: {circle_captcha}")
243
+
244
+ debug("Submitting final CAPTCHA token.")
245
+ output = session.post(url, data="cap_token=" + token, headers={'User-Agent': shared_state.values["user_agent"],
246
+ 'Content-Type': 'application/x-www-form-urlencoded'})
247
+ url = output.url
248
+
249
+ if "/404.html" in url:
250
+ info("Filecrypt returned 404 - current IP is likely banned or the link is offline.")
251
+ debug("Detected Filecrypt 404 page.")
252
+
253
+ soup = BeautifulSoup(output.text, 'html.parser')
254
+ debug("Parsed post-captcha response HTML.")
255
+
256
+ solved = bool(soup.find_all("div", {"class": "container"}))
257
+ if not solved:
258
+ info("Token rejected by Filecrypt! Try another CAPTCHA to proceed...")
259
+ debug("Token rejected; no 'container' div found.")
260
+ return False
261
+ else:
262
+ debug("CAPTCHA token accepted by Filecrypt.")
263
+
264
+ season_number = ""
265
+ episode_number = ""
266
+ episode_in_title = re.findall(r'.*\.s(\d{1,3})e(\d{1,3})\..*', title, re.IGNORECASE)
267
+ season_in_title = re.findall(r'.*\.s(\d{1,3})\..*', title, re.IGNORECASE)
268
+ debug("Attempting episode/season number parsing from title.")
269
+
270
+ if episode_in_title:
271
+ try:
272
+ season_number = str(int(episode_in_title[0][0]))
273
+ episode_number = str(int(episode_in_title[0][1]))
274
+ debug(f"Detected S{season_number}E{episode_number} from title.")
275
+ except:
276
+ debug("Failed parsing S/E numbers from title.")
277
+ pass
278
+ elif season_in_title:
279
+ try:
280
+ season_number = str(int(season_in_title[0]))
281
+ debug(f"Detected season {season_number} from title.")
282
+ except:
283
+ debug("Failed parsing season number from title.")
284
+ pass
285
+
286
+ season = ""
287
+ episode = ""
288
+ tv_show_selector = soup.find("div", {"class": "dlpart"})
289
+ debug(f"TV show selector found: {bool(tv_show_selector)}")
290
+
291
+ if tv_show_selector:
292
+
293
+ season = "season="
294
+ episode = "episode="
295
+
296
+ season_selection = soup.find("div", {"id": "selbox_season"})
297
+ try:
298
+ if season_selection:
299
+ season += str(season_number)
300
+ debug(f"Assigned season parameter: {season}")
301
+ except:
302
+ debug("Failed assigning season parameter.")
303
+ pass
304
+
305
+ episode_selection = soup.find("div", {"id": "selbox_episode"})
306
+ try:
307
+ if episode_selection:
308
+ episode += str(episode_number)
309
+ debug(f"Assigned episode parameter: {episode}")
310
+ except:
311
+ debug("Failed assigning episode parameter.")
312
+ pass
313
+
314
+ if episode_number and not episode:
315
+ info(f"Missing select for episode number {episode_number}! Expect undesired links in the output.")
316
+ debug("Episode number present but no episode selector container found.")
317
+
318
+ links = []
319
+
320
+ mirrors = []
321
+ mirrors_available = soup.select("a[href*=mirror]")
322
+ debug(f"Mirrors available: {len(mirrors_available)}")
323
+
324
+ if not mirror and mirrors_available:
325
+ for mirror in mirrors_available:
326
+ try:
327
+ mirror_query = mirror.get("href").split("?")[1]
328
+ base_url = url.split("?")[0] if "mirror" in url else url
329
+ mirrors.append(f"{base_url}?{mirror_query}")
330
+ debug(f"Discovered mirror: {mirrors[-1]}")
331
+ except IndexError:
332
+ debug("Mirror parsing failed due to missing '?'.")
333
+ continue
334
+ else:
335
+ mirrors = [url]
336
+ debug("Using direct URL as only mirror.")
337
+
338
+ for mirror in mirrors:
339
+ if not len(mirrors) == 1:
340
+ debug(f"Loading mirror: {mirror}")
341
+ output = session.get(mirror, headers=headers)
342
+ url = output.url
343
+ soup = BeautifulSoup(output.text, 'html.parser')
344
+
345
+ try:
346
+ debug("Attempting Click'n'Load decrypt.")
347
+ crypted_payload = soup.find("form", {"class": "cnlform"}).get('onsubmit')
348
+ crypted_data = re.findall(r"'(.*?)'", crypted_payload)
349
+ if not title:
350
+ title = crypted_data[3]
351
+ crypted_data = [
352
+ crypted_data[0],
353
+ crypted_data[1],
354
+ crypted_data[2],
355
+ title
356
+ ]
357
+
358
+ if episode and season:
359
+ debug("Applying episode/season filtering to CNL.")
360
+ domain = urlparse(url).netloc
361
+ filtered_cnl_secret = soup.find("input", {"name": "hidden_cnl_id"}).attrs["value"]
362
+ filtered_cnl_link = f"https://{domain}/_CNL/{filtered_cnl_secret}.html?{season}&{episode}"
363
+ filtered_cnl_result = session.post(filtered_cnl_link,
364
+ headers=headers)
365
+ if filtered_cnl_result.status_code == 200:
366
+ filtered_cnl_data = json.loads(filtered_cnl_result.text)
367
+ if filtered_cnl_data["success"]:
368
+ debug("Season/Episode filter applied successfully.")
369
+ crypted_data = [
370
+ crypted_data[0],
371
+ filtered_cnl_data["data"][0],
372
+ filtered_cnl_data["data"][1],
373
+ title
374
+ ]
375
+ links.extend(CNL(crypted_data).decrypt())
376
+ except:
377
+ debug("CNL decrypt failed; trying DLC fallback.")
378
+ if "The owner of this folder has deactivated all hosts in this container in their settings." in soup.text:
379
+ info(f"Mirror deactivated by the owner: {mirror}")
380
+ debug("Mirror deactivated detected in page text.")
381
+ continue
382
+
383
+ info("Click'n'Load not found! Falling back to DLC...")
384
+ try:
385
+ debug("Attempting DLC fallback.")
386
+ crypted_payload = soup.find("button", {"class": "dlcdownload"}).get("onclick")
387
+ crypted_data = re.findall(r"'(.*?)'", crypted_payload)
388
+ dlc_secret = crypted_data[0]
389
+ domain = urlparse(url).netloc
390
+ if episode and season:
391
+ dlc_link = f"https://{domain}/DLC/{dlc_secret}.dlc?{episode}&{season}"
392
+ else:
393
+ dlc_link = f"https://{domain}/DLC/{dlc_secret}.dlc"
394
+ dlc_file = session.get(dlc_link, headers=headers).content
395
+ links.extend(DLC(shared_state, dlc_file).decrypt())
396
+ except:
397
+ debug("DLC fallback failed, trying button fallback.")
398
+ info("DLC not found! Falling back to first available download Button...")
399
+
400
+ base_url = urlparse(url).netloc
401
+ phpsessid = session.cookies.get('PHPSESSID')
402
+ if not phpsessid:
403
+ info("PHPSESSID cookie not found! Cannot proceed with download links extraction.")
404
+ debug("Missing PHPSESSID cookie.")
405
+ return False
406
+
407
+ results = []
408
+ debug("Parsing fallback buttons for download links.")
409
+
410
+ for button in soup.find_all('button'):
411
+ data_attrs = [v for k, v in button.attrs.items() if k.startswith('data-') and k != 'data-i18n']
412
+ if not data_attrs:
413
+ continue
414
+
415
+ link_id = data_attrs[0]
416
+ row = button.find_parent('tr')
417
+ mirror_tag = row.find('a', class_='external_link') if row else None
418
+ mirror_name = mirror_tag.get_text(strip=True) if mirror_tag else 'unknown'
419
+ full_url = f"http://{base_url}/Link/{link_id}.html"
420
+ results.append((full_url, mirror_name))
421
+
422
+ sorted_results = sorted(results, key=lambda x: 0 if 'rapidgator' in x[1].lower() else 1)
423
+ debug(f"Found {len(sorted_results)} fallback link candidates.")
424
+
425
+ for result_url, mirror in sorted_results:
426
+ info("You must solve circlecaptcha separately!")
427
+ debug(f'Session "{phpsessid}" for {result_url} will not live long. Submit new CAPTCHA quickly!')
428
+ return {
429
+ "status": "replaced",
430
+ "replace_url": result_url,
431
+ "mirror": mirror,
432
+ "session": phpsessid
433
+ }
434
+
435
+ if not links:
436
+ info("No links found in Filecrypt response!")
437
+ debug("Extraction completed but yielded no links.")
438
+ return False
439
+
440
+ debug(f"Returning success with {len(links)} extracted links.")
441
+ return {
442
+ "status": "success",
443
+ "links": links
444
+ }