quasarr 1.29.0__py3-none-any.whl → 1.31.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -25,6 +25,7 @@ from quasarr.downloads.sources.wx import get_wx_download_links
25
25
  from quasarr.providers.log import info
26
26
  from quasarr.providers.notifications import send_discord_message
27
27
  from quasarr.providers.statistics import StatsHelper
28
+ from quasarr.providers.utils import filter_offline_links
28
29
 
29
30
  # =============================================================================
30
31
  # CRYPTER CONFIGURATION
@@ -184,6 +185,12 @@ def process_links(shared_state, source_result, title, password, package_id, imdb
184
185
  reason=f'All links are offline or IP is banned for "{title}" on {label} - "{source_url}"')
185
186
  links = valid_links
186
187
 
188
+ # Filter out verifiably offline links
189
+ links = filter_offline_links(links, shared_state=shared_state, log_func=info)
190
+ if not links:
191
+ return fail(title, package_id, shared_state,
192
+ reason=f'All verifiable links are offline for "{title}" on {label} - "{source_url}"')
193
+
187
194
  classified = classify_links(links, shared_state)
188
195
 
189
196
  # PRIORITY 1: Direct hoster links
@@ -395,42 +395,8 @@ def get_filecrypt_links(shared_state, token, title, url, password=None, mirror=N
395
395
  links.extend(DLC(shared_state, dlc_file).decrypt())
396
396
  except:
397
397
  debug("DLC fallback failed, trying button fallback.")
398
- info("DLC not found! Falling back to first available download Button...")
399
-
400
- base_url = urlparse(url).netloc
401
- phpsessid = session.cookies.get('PHPSESSID')
402
- if not phpsessid:
403
- info("PHPSESSID cookie not found! Cannot proceed with download links extraction.")
404
- debug("Missing PHPSESSID cookie.")
405
- return False
406
-
407
- results = []
408
- debug("Parsing fallback buttons for download links.")
409
-
410
- for button in soup.find_all('button'):
411
- data_attrs = [v for k, v in button.attrs.items() if k.startswith('data-') and k != 'data-i18n']
412
- if not data_attrs:
413
- continue
414
-
415
- link_id = data_attrs[0]
416
- row = button.find_parent('tr')
417
- mirror_tag = row.find('a', class_='external_link') if row else None
418
- mirror_name = mirror_tag.get_text(strip=True) if mirror_tag else 'unknown'
419
- full_url = f"http://{base_url}/Link/{link_id}.html"
420
- results.append((full_url, mirror_name))
421
-
422
- sorted_results = sorted(results, key=lambda x: 0 if 'rapidgator' in x[1].lower() else 1)
423
- debug(f"Found {len(sorted_results)} fallback link candidates.")
424
-
425
- for result_url, mirror in sorted_results:
426
- info("You must solve circlecaptcha separately!")
427
- debug(f'Session "{phpsessid}" for {result_url} will not live long. Submit new CAPTCHA quickly!')
428
- return {
429
- "status": "replaced",
430
- "replace_url": result_url,
431
- "mirror": mirror,
432
- "session": phpsessid
433
- }
398
+ info("Click'n'Load and DLC not found. Please use the fallback userscript instead!")
399
+ return False
434
400
 
435
401
  if not links:
436
402
  info("No links found in Filecrypt response!")
@@ -6,6 +6,7 @@ import json
6
6
  from collections import defaultdict
7
7
  from urllib.parse import urlparse
8
8
 
9
+ from quasarr.providers.jd_cache import JDPackageCache
9
10
  from quasarr.providers.log import info, debug
10
11
  from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
11
12
 
@@ -102,9 +103,23 @@ def format_eta(seconds):
102
103
  return f"{hours:02}:{minutes:02}:{seconds:02}"
103
104
 
104
105
 
105
- def get_packages(shared_state):
106
+ def get_packages(shared_state, _cache=None):
107
+ """
108
+ Get all packages from protected DB, failed DB, linkgrabber, and downloader.
109
+
110
+ Args:
111
+ shared_state: The shared state object
112
+ _cache: INTERNAL USE ONLY. Used by delete_package() to share cached data
113
+ within a single request. External callers should never pass this.
114
+ """
106
115
  packages = []
107
116
 
117
+ # Create cache for this request - only valid for duration of this call
118
+ if _cache is None:
119
+ _cache = JDPackageCache(shared_state.get_device())
120
+
121
+ cache = _cache # Use shorter name internally
122
+
108
123
  protected_packages = shared_state.get_db("protected").retrieve_all_titles()
109
124
  if protected_packages:
110
125
  for package in protected_packages:
@@ -152,16 +167,15 @@ def get_packages(shared_state):
152
167
  "comment": package_id,
153
168
  "uuid": package_id
154
169
  })
155
- try:
156
- linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
157
- linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
158
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
159
- linkgrabber_packages = []
160
- linkgrabber_links = []
170
+
171
+ # Use cached queries instead of direct API calls
172
+ linkgrabber_packages = cache.linkgrabber_packages
173
+ linkgrabber_links = cache.linkgrabber_links
161
174
 
162
175
  if linkgrabber_packages:
163
176
  for package in linkgrabber_packages:
164
- comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
177
+ # Use cached linkgrabber_links instead of re-querying
178
+ comment = get_links_comment(package, linkgrabber_links)
165
179
  link_details = get_links_status(package, linkgrabber_links, is_archive=False)
166
180
 
167
181
  error = link_details["error"]
@@ -184,25 +198,21 @@ def get_packages(shared_state):
184
198
  "uuid": package.get("uuid"),
185
199
  "error": error
186
200
  })
187
- try:
188
- downloader_packages = shared_state.get_device().downloads.query_packages()
189
- downloader_links = shared_state.get_device().downloads.query_links()
190
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
191
- downloader_packages = []
192
- downloader_links = []
201
+
202
+ # Use cached queries instead of direct API calls
203
+ downloader_packages = cache.downloader_packages
204
+ downloader_links = cache.downloader_links
193
205
 
194
206
  if downloader_packages and downloader_links:
207
+ # Get all package UUIDs that contain archives (uses link data, fallback to single API call)
208
+ archive_package_uuids = cache.get_archive_package_uuids(downloader_packages, downloader_links)
209
+
195
210
  for package in downloader_packages:
196
211
  comment = get_links_comment(package, downloader_links)
197
212
 
198
- # Check if package is actually archived/extracted using archive info
199
- is_archive = False
200
- try:
201
- archive_info = shared_state.get_device().extraction.get_archive_info([], [package.get("uuid")])
202
- is_archive = True if archive_info and archive_info[0] else False
203
- except:
204
- # On error, don't assume it's an archive - check bytes instead
205
- pass
213
+ # Check if this package contains any archive files
214
+ package_uuid = package.get("uuid")
215
+ is_archive = package_uuid in archive_package_uuids if package_uuid else False
206
216
 
207
217
  link_details = get_links_status(package, downloader_links, is_archive)
208
218
 
@@ -250,7 +260,7 @@ def get_packages(shared_state):
250
260
  time_left = "23:59:59"
251
261
  if package["type"] == "linkgrabber":
252
262
  details = package["details"]
253
- name = f"[Linkgrabber] {details["name"]}"
263
+ name = f"[Linkgrabber] {details['name']}"
254
264
  try:
255
265
  mb = mb_left = int(details["bytesTotal"]) / (1024 * 1024)
256
266
  except KeyError:
@@ -302,7 +312,7 @@ def get_packages(shared_state):
302
312
  package_uuid = package["uuid"]
303
313
  else:
304
314
  details = package["details"]
305
- name = f"[CAPTCHA not solved!] {details["title"]}"
315
+ name = f"[CAPTCHA not solved!] {details['title']}"
306
316
  mb = mb_left = details["size_mb"]
307
317
  try:
308
318
  package_id = package["package_id"]
@@ -386,23 +396,23 @@ def get_packages(shared_state):
386
396
  else:
387
397
  info(f"Invalid package location {package['location']}")
388
398
 
389
- if not shared_state.get_device().linkgrabber.is_collecting():
390
- linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
391
- linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
392
-
399
+ # Use cached is_collecting check
400
+ if not cache.is_collecting:
401
+ # Reuse cached data instead of re-querying
393
402
  packages_to_start = []
394
403
  links_to_start = []
395
404
 
396
405
  for package in linkgrabber_packages:
397
- comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
406
+ # Use cached linkgrabber_links instead of re-querying
407
+ comment = get_links_comment(package, linkgrabber_links)
398
408
  if comment and comment.startswith("Quasarr_"):
399
409
  package_uuid = package.get("uuid")
400
410
  if package_uuid:
401
- linkgrabber_links = [link.get("uuid") for link in linkgrabber_links if
402
- link.get("packageUUID") == package_uuid]
403
- if linkgrabber_links:
411
+ package_link_ids = [link.get("uuid") for link in linkgrabber_links if
412
+ link.get("packageUUID") == package_uuid]
413
+ if package_link_ids:
404
414
  packages_to_start.append(package_uuid)
405
- links_to_start.extend(linkgrabber_links)
415
+ links_to_start.extend(package_link_ids)
406
416
  else:
407
417
  info(f"Package {package_uuid} has no links in linkgrabber - skipping start")
408
418
 
@@ -420,13 +430,17 @@ def delete_package(shared_state, package_id):
420
430
  try:
421
431
  deleted_title = ""
422
432
 
423
- packages = get_packages(shared_state)
433
+ # Create cache for this single delete operation
434
+ # Safe to reuse within this request since we fetch->find->delete atomically
435
+ cache = JDPackageCache(shared_state.get_device())
436
+
437
+ packages = get_packages(shared_state, _cache=cache)
424
438
  for package_location in packages:
425
439
  for package in packages[package_location]:
426
440
  if package["nzo_id"] == package_id:
427
441
  if package["type"] == "linkgrabber":
428
- ids = get_links_matching_package_uuid(package,
429
- shared_state.get_device().linkgrabber.query_links())
442
+ # Use cached linkgrabber_links instead of re-querying
443
+ ids = get_links_matching_package_uuid(package, cache.linkgrabber_links)
430
444
  if ids:
431
445
  shared_state.get_device().linkgrabber.cleanup(
432
446
  "DELETE_ALL",
@@ -437,8 +451,8 @@ def delete_package(shared_state, package_id):
437
451
  )
438
452
  break
439
453
  elif package["type"] == "downloader":
440
- ids = get_links_matching_package_uuid(package,
441
- shared_state.get_device().downloads.query_links())
454
+ # Use cached downloader_links instead of re-querying
455
+ ids = get_links_matching_package_uuid(package, cache.downloader_links)
442
456
  if ids:
443
457
  shared_state.get_device().downloads.cleanup(
444
458
  "DELETE_ALL",
@@ -3,14 +3,12 @@
3
3
  # Project by https://github.com/rix1337
4
4
 
5
5
  import re
6
- from concurrent.futures import ThreadPoolExecutor, as_completed
7
- from io import BytesIO
8
6
 
9
- from PIL import Image
10
7
  from bs4 import BeautifulSoup, NavigableString
11
8
 
12
9
  from quasarr.providers.log import info, debug
13
10
  from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
11
+ from quasarr.providers.utils import generate_status_url, check_links_online_status
14
12
 
15
13
  hostname = "dl"
16
14
 
@@ -136,28 +134,6 @@ def extract_mirror_name_from_link(link_element):
136
134
  return None
137
135
 
138
136
 
139
- def generate_status_url(href, crypter_type):
140
- """
141
- Generate a status URL for crypters that support it.
142
- Returns None if status URL cannot be generated.
143
- """
144
- if crypter_type == "hide":
145
- # hide.cx links: https://hide.cx/folder/{UUID} → https://hide.cx/state/{UUID}
146
- match = re.search(r'hide\.cx/(?:folder/)?([a-f0-9-]{36})', href, re.IGNORECASE)
147
- if match:
148
- uuid = match.group(1)
149
- return f"https://hide.cx/state/{uuid}"
150
-
151
- elif crypter_type == "tolink":
152
- # tolink links: https://tolink.to/f/{ID} → https://tolink.to/f/{ID}/s/status.png
153
- match = re.search(r'tolink\.to/f/([a-zA-Z0-9]+)', href, re.IGNORECASE)
154
- if match:
155
- link_id = match.group(1)
156
- return f"https://tolink.to/f/{link_id}/s/status.png"
157
-
158
- return None
159
-
160
-
161
137
  def extract_status_url_from_html(link_element, crypter_type):
162
138
  """
163
139
  Extract status image URL from HTML near the link element.
@@ -259,100 +235,6 @@ def build_filecrypt_status_map(soup):
259
235
  return status_map
260
236
 
261
237
 
262
- def image_has_green(image_data):
263
- """
264
- Analyze image data to check if it contains green pixels.
265
- Returns True if any significant green is detected (indicating online status).
266
- """
267
- try:
268
- img = Image.open(BytesIO(image_data))
269
- img = img.convert('RGB')
270
-
271
- pixels = list(img.getdata())
272
-
273
- for r, g, b in pixels:
274
- # Check if pixel is greenish: green channel is dominant
275
- # and has a reasonable absolute value
276
- if g > 100 and g > r * 1.3 and g > b * 1.3:
277
- return True
278
-
279
- return False
280
- except Exception as e:
281
- debug(f"Error analyzing status image: {e}")
282
- # If we can't analyze, assume online to not skip valid links
283
- return True
284
-
285
-
286
- def fetch_status_image(status_url):
287
- """
288
- Fetch a status image and return (status_url, image_data).
289
- Returns (status_url, None) on failure.
290
- """
291
- try:
292
- import requests
293
- response = requests.get(status_url, timeout=10)
294
- if response.status_code == 200:
295
- return (status_url, response.content)
296
- except Exception as e:
297
- debug(f"Error fetching status image {status_url}: {e}")
298
- return (status_url, None)
299
-
300
-
301
- def check_links_online_status(links_with_status):
302
- """
303
- Check online status for links that have status URLs.
304
- Returns list of links that are online (or have no status URL to check).
305
-
306
- links_with_status: list of [href, identifier, status_url] where status_url can be None
307
- """
308
-
309
- links_to_check = [(i, link) for i, link in enumerate(links_with_status) if link[2]]
310
-
311
- if not links_to_check:
312
- # No status URLs to check, return all links as potentially online
313
- return [[link[0], link[1]] for link in links_with_status]
314
-
315
- # Batch fetch status images
316
- status_results = {} # status_url -> has_green
317
- status_urls = list(set(link[2] for _, link in links_to_check))
318
-
319
- batch_size = 10
320
- for i in range(0, len(status_urls), batch_size):
321
- batch = status_urls[i:i + batch_size]
322
- with ThreadPoolExecutor(max_workers=batch_size) as executor:
323
- futures = [executor.submit(fetch_status_image, url) for url in batch]
324
- for future in as_completed(futures):
325
- try:
326
- status_url, image_data = future.result()
327
- if image_data:
328
- status_results[status_url] = image_has_green(image_data)
329
- else:
330
- # Could not fetch, assume online
331
- status_results[status_url] = True
332
- except Exception as e:
333
- debug(f"Error checking status: {e}")
334
-
335
- # Filter to online links
336
- online_links = []
337
-
338
- for link in links_with_status:
339
- href, identifier, status_url = link
340
- if not status_url:
341
- # No status URL, include link (keeplinks case)
342
- online_links.append([href, identifier])
343
- elif status_url in status_results:
344
- if status_results[status_url]:
345
- online_links.append([href, identifier])
346
- debug(f"Link online: {identifier} ({href})")
347
- else:
348
- debug(f"Link offline: {identifier} ({href})")
349
- else:
350
- # Status check failed, include link
351
- online_links.append([href, identifier])
352
-
353
- return online_links
354
-
355
-
356
238
  def extract_links_and_password_from_post(post_content, host):
357
239
  """
358
240
  Extract download links and password from a forum post.
@@ -448,7 +330,11 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
448
330
  info(f"Could not find any posts in thread: {url}")
449
331
  return {"links": [], "password": ""}
450
332
 
451
- # Iterate through posts to find one with online links
333
+ # Track first post with unverifiable links as fallback
334
+ fallback_links = None
335
+ fallback_password = ""
336
+
337
+ # Iterate through posts to find one with verified online links
452
338
  for post_index, post in enumerate(posts):
453
339
  post_content = post.select_one('div.bbWrapper')
454
340
  if not post_content:
@@ -459,16 +345,32 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
459
345
  if not links_with_status:
460
346
  continue
461
347
 
348
+ # Check if any links have status URLs we can verify
349
+ has_verifiable_links = any(link[2] for link in links_with_status)
350
+
351
+ if not has_verifiable_links:
352
+ # No way to check online status - save as fallback and continue looking
353
+ if fallback_links is None:
354
+ fallback_links = [[link[0], link[1]] for link in links_with_status]
355
+ fallback_password = extracted_password
356
+ debug(f"Post #{post_index + 1} has links but no status URLs, saving as fallback...")
357
+ continue
358
+
462
359
  # Check which links are online
463
- online_links = check_links_online_status(links_with_status)
360
+ online_links = check_links_online_status(links_with_status, shared_state)
464
361
 
465
362
  if online_links:
466
363
  post_info = "first post" if post_index == 0 else f"post #{post_index + 1}"
467
- debug(f"Found {len(online_links)} online link(s) in {post_info} for: {title}")
364
+ debug(f"Found {len(online_links)} verified online link(s) in {post_info} for: {title}")
468
365
  return {"links": online_links, "password": extracted_password}
469
366
  else:
470
367
  debug(f"All links in post #{post_index + 1} are offline, checking next post...")
471
368
 
369
+ # No verified online links found - return fallback if available
370
+ if fallback_links:
371
+ debug(f"No verified online links found, returning unverified fallback links for: {title}")
372
+ return {"links": fallback_links, "password": fallback_password}
373
+
472
374
  info(f"No online download links found in any post: {url}")
473
375
  return {"links": [], "password": ""}
474
376
 
@@ -7,6 +7,7 @@ import re
7
7
  import requests
8
8
 
9
9
  from quasarr.providers.log import info, debug
10
+ from quasarr.providers.utils import check_links_online_status
10
11
 
11
12
  hostname = "wx"
12
13
 
@@ -15,7 +16,10 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
15
16
  """
16
17
  KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
17
18
 
18
- WX source handler - Grabs download links from API based on title and mirror.
19
+ WX source handler - Grabs download links from API based on title.
20
+ Finds the best mirror (M1, M2, M3...) by checking online status.
21
+ Returns all online links from the first complete mirror, or the best partial mirror.
22
+ Prefers hide.cx links over other crypters (filecrypt, etc.) when online counts are equal.
19
23
  """
20
24
  host = shared_state.values["config"]("Hostnames").get(hostname)
21
25
 
@@ -35,7 +39,7 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
35
39
  return {"links": []}
36
40
 
37
41
  # Extract slug from URL
38
- slug_match = re.search(r'/detail/([^/]+)', url)
42
+ slug_match = re.search(r'/detail/([^/?]+)', url)
39
43
  if not slug_match:
40
44
  info(f"{hostname.upper()}: Could not extract slug from URL: {url}")
41
45
  return {"links": []}
@@ -64,62 +68,100 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
64
68
 
65
69
  releases = data['item']['releases']
66
70
 
67
- # Find the release matching the title
68
- matching_release = None
69
- for release in releases:
70
- if release.get('fulltitle') == title:
71
- matching_release = release
72
- break
71
+ # Find ALL releases matching the title (these are different mirrors: M1, M2, M3...)
72
+ matching_releases = [r for r in releases if r.get('fulltitle') == title]
73
73
 
74
- if not matching_release:
74
+ if not matching_releases:
75
75
  info(f"{hostname.upper()}: No release found matching title: {title}")
76
76
  return {"links": []}
77
77
 
78
- # Extract crypted_links based on mirror
79
- crypted_links = matching_release.get('crypted_links', {})
80
-
81
- if not crypted_links:
82
- info(f"{hostname.upper()}: No crypted_links found for: {title}")
83
- return {"links": []}
84
-
85
- links = []
86
-
87
- # If mirror is specified, find matching hoster (handle partial matches like 'ddownload' -> 'ddownload.com')
88
- if mirror:
89
- matched_hoster = None
90
- for hoster in crypted_links.keys():
91
- if mirror.lower() in hoster.lower() or hoster.lower() in mirror.lower():
92
- matched_hoster = hoster
93
- break
94
-
95
- if matched_hoster:
96
- link = crypted_links[matched_hoster]
97
- # Prefer hide over filecrypt
98
- if re.search(r'hide\.', link, re.IGNORECASE):
99
- links.append([link, matched_hoster])
100
- debug(f"{hostname.upper()}: Found hide link for mirror {matched_hoster}")
101
- elif re.search(r'filecrypt\.', link, re.IGNORECASE):
102
- links.append([link, matched_hoster])
103
- debug(f"{hostname.upper()}: Found filecrypt link for mirror {matched_hoster}")
104
- else:
105
- info(
106
- f"{hostname.upper()}: Mirror '{mirror}' not found in available hosters: {list(crypted_links.keys())}")
107
- else:
108
- # If no mirror specified, get all available crypted links (prefer hide over filecrypt)
109
- for hoster, link in crypted_links.items():
110
- if re.search(r'hide\.', link, re.IGNORECASE):
111
- links.append([link, hoster])
112
- debug(f"{hostname.upper()}: Found hide link for hoster {hoster}")
113
- elif re.search(r'filecrypt\.', link, re.IGNORECASE):
114
- links.append([link, hoster])
115
- debug(f"{hostname.upper()}: Found filecrypt link for hoster {hoster}")
116
-
117
- if not links:
118
- info(f"{hostname.upper()}: No supported crypted links found for: {title}")
119
- return {"links": []}
120
-
121
- debug(f"{hostname.upper()}: Found {len(links)} crypted link(s) for: {title}")
122
- return {"links": links}
78
+ debug(f"{hostname.upper()}: Found {len(matching_releases)} mirror(s) for: {title}")
79
+
80
+ # Evaluate each mirror and find the best one
81
+ # Track: (online_count, is_hide, online_links)
82
+ best_mirror = None # (online_count, is_hide, online_links)
83
+
84
+ for idx, release in enumerate(matching_releases):
85
+ crypted_links = release.get('crypted_links', {})
86
+ check_urls = release.get('options', {}).get('check', {})
87
+
88
+ if not crypted_links:
89
+ continue
90
+
91
+ # Separate hide.cx links from other crypters
92
+ hide_links = []
93
+ other_links = []
94
+
95
+ for hoster, container_url in crypted_links.items():
96
+ state_url = check_urls.get(hoster)
97
+ if re.search(r'hide\.', container_url, re.IGNORECASE):
98
+ hide_links.append([container_url, hoster, state_url])
99
+ elif re.search(r'filecrypt\.', container_url, re.IGNORECASE):
100
+ other_links.append([container_url, hoster, state_url])
101
+ # Skip other crypters we don't support
102
+
103
+ # Check hide.cx links first (preferred)
104
+ hide_online = 0
105
+ online_hide = []
106
+ if hide_links:
107
+ online_hide = check_links_online_status(hide_links, shared_state)
108
+ hide_total = len(hide_links)
109
+ hide_online = len(online_hide)
110
+
111
+ debug(f"{hostname.upper()}: M{idx + 1} hide.cx: {hide_online}/{hide_total} online")
112
+
113
+ # If all hide.cx links are online, use this mirror immediately
114
+ if hide_online == hide_total and hide_online > 0:
115
+ debug(
116
+ f"{hostname.upper()}: M{idx + 1} is complete (all {hide_online} hide.cx links online), using this mirror")
117
+ return {"links": online_hide}
118
+
119
+ # Check other crypters (filecrypt, etc.) - no early return, always check all mirrors for hide.cx first
120
+ other_online = 0
121
+ online_other = []
122
+ if other_links:
123
+ online_other = check_links_online_status(other_links, shared_state)
124
+ other_total = len(other_links)
125
+ other_online = len(online_other)
126
+
127
+ debug(f"{hostname.upper()}: M{idx + 1} other crypters: {other_online}/{other_total} online")
128
+
129
+ # Determine best option for this mirror (prefer hide.cx on ties)
130
+ mirror_links = None
131
+ mirror_count = 0
132
+ mirror_is_hide = False
133
+
134
+ if hide_online > 0 and hide_online >= other_online:
135
+ # hide.cx wins (more links or tie)
136
+ mirror_links = online_hide
137
+ mirror_count = hide_online
138
+ mirror_is_hide = True
139
+ elif other_online > hide_online:
140
+ # other crypter has more online links
141
+ mirror_links = online_other
142
+ mirror_count = other_online
143
+ mirror_is_hide = False
144
+
145
+ # Update best_mirror if this mirror is better
146
+ # Priority: 1) more online links, 2) hide.cx preference on ties
147
+ if mirror_links:
148
+ if best_mirror is None:
149
+ best_mirror = (mirror_count, mirror_is_hide, mirror_links)
150
+ elif mirror_count > best_mirror[0]:
151
+ best_mirror = (mirror_count, mirror_is_hide, mirror_links)
152
+ elif mirror_count == best_mirror[0] and mirror_is_hide and not best_mirror[1]:
153
+ # Same count but this is hide.cx and current best is not
154
+ best_mirror = (mirror_count, mirror_is_hide, mirror_links)
155
+
156
+ # No complete mirror found, return best partial mirror
157
+ if best_mirror and best_mirror[2]:
158
+ crypter_type = "hide.cx" if best_mirror[1] else "other crypter"
159
+ debug(
160
+ f"{hostname.upper()}: No complete mirror, using best partial with {best_mirror[0]} online {crypter_type} link(s)")
161
+ return {"links": best_mirror[2]}
162
+
163
+ info(f"{hostname.upper()}: No online links found for: {title}")
164
+ return {"links": []}
123
165
 
124
166
  except Exception as e:
125
167
  info(f"{hostname.upper()}: Error extracting download links from {url}: {e}")