quasarr 1.28.2__py3-none-any.whl → 1.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -395,42 +395,8 @@ def get_filecrypt_links(shared_state, token, title, url, password=None, mirror=N
395
395
  links.extend(DLC(shared_state, dlc_file).decrypt())
396
396
  except:
397
397
  debug("DLC fallback failed, trying button fallback.")
398
- info("DLC not found! Falling back to first available download Button...")
399
-
400
- base_url = urlparse(url).netloc
401
- phpsessid = session.cookies.get('PHPSESSID')
402
- if not phpsessid:
403
- info("PHPSESSID cookie not found! Cannot proceed with download links extraction.")
404
- debug("Missing PHPSESSID cookie.")
405
- return False
406
-
407
- results = []
408
- debug("Parsing fallback buttons for download links.")
409
-
410
- for button in soup.find_all('button'):
411
- data_attrs = [v for k, v in button.attrs.items() if k.startswith('data-') and k != 'data-i18n']
412
- if not data_attrs:
413
- continue
414
-
415
- link_id = data_attrs[0]
416
- row = button.find_parent('tr')
417
- mirror_tag = row.find('a', class_='external_link') if row else None
418
- mirror_name = mirror_tag.get_text(strip=True) if mirror_tag else 'unknown'
419
- full_url = f"http://{base_url}/Link/{link_id}.html"
420
- results.append((full_url, mirror_name))
421
-
422
- sorted_results = sorted(results, key=lambda x: 0 if 'rapidgator' in x[1].lower() else 1)
423
- debug(f"Found {len(sorted_results)} fallback link candidates.")
424
-
425
- for result_url, mirror in sorted_results:
426
- info("You must solve circlecaptcha separately!")
427
- debug(f'Session "{phpsessid}" for {result_url} will not live long. Submit new CAPTCHA quickly!')
428
- return {
429
- "status": "replaced",
430
- "replace_url": result_url,
431
- "mirror": mirror,
432
- "session": phpsessid
433
- }
398
+ info("Click'n'Load and DLC not found. Please use the fallback userscript instead!")
399
+ return False
434
400
 
435
401
  if not links:
436
402
  info("No links found in Filecrypt response!")
@@ -6,6 +6,7 @@ import json
6
6
  from collections import defaultdict
7
7
  from urllib.parse import urlparse
8
8
 
9
+ from quasarr.providers.jd_cache import JDPackageCache
9
10
  from quasarr.providers.log import info, debug
10
11
  from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
11
12
 
@@ -102,9 +103,23 @@ def format_eta(seconds):
102
103
  return f"{hours:02}:{minutes:02}:{seconds:02}"
103
104
 
104
105
 
105
- def get_packages(shared_state):
106
+ def get_packages(shared_state, _cache=None):
107
+ """
108
+ Get all packages from protected DB, failed DB, linkgrabber, and downloader.
109
+
110
+ Args:
111
+ shared_state: The shared state object
112
+ _cache: INTERNAL USE ONLY. Used by delete_package() to share cached data
113
+ within a single request. External callers should never pass this.
114
+ """
106
115
  packages = []
107
116
 
117
+ # Create cache for this request - only valid for duration of this call
118
+ if _cache is None:
119
+ _cache = JDPackageCache(shared_state.get_device())
120
+
121
+ cache = _cache # Use shorter name internally
122
+
108
123
  protected_packages = shared_state.get_db("protected").retrieve_all_titles()
109
124
  if protected_packages:
110
125
  for package in protected_packages:
@@ -152,16 +167,15 @@ def get_packages(shared_state):
152
167
  "comment": package_id,
153
168
  "uuid": package_id
154
169
  })
155
- try:
156
- linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
157
- linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
158
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
159
- linkgrabber_packages = []
160
- linkgrabber_links = []
170
+
171
+ # Use cached queries instead of direct API calls
172
+ linkgrabber_packages = cache.linkgrabber_packages
173
+ linkgrabber_links = cache.linkgrabber_links
161
174
 
162
175
  if linkgrabber_packages:
163
176
  for package in linkgrabber_packages:
164
- comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
177
+ # Use cached linkgrabber_links instead of re-querying
178
+ comment = get_links_comment(package, linkgrabber_links)
165
179
  link_details = get_links_status(package, linkgrabber_links, is_archive=False)
166
180
 
167
181
  error = link_details["error"]
@@ -184,25 +198,21 @@ def get_packages(shared_state):
184
198
  "uuid": package.get("uuid"),
185
199
  "error": error
186
200
  })
187
- try:
188
- downloader_packages = shared_state.get_device().downloads.query_packages()
189
- downloader_links = shared_state.get_device().downloads.query_links()
190
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
191
- downloader_packages = []
192
- downloader_links = []
201
+
202
+ # Use cached queries instead of direct API calls
203
+ downloader_packages = cache.downloader_packages
204
+ downloader_links = cache.downloader_links
193
205
 
194
206
  if downloader_packages and downloader_links:
207
+ # Get all package UUIDs that contain archives (uses link data, fallback to single API call)
208
+ archive_package_uuids = cache.get_archive_package_uuids(downloader_packages, downloader_links)
209
+
195
210
  for package in downloader_packages:
196
211
  comment = get_links_comment(package, downloader_links)
197
212
 
198
- # Check if package is actually archived/extracted using archive info
199
- is_archive = False
200
- try:
201
- archive_info = shared_state.get_device().extraction.get_archive_info([], [package.get("uuid")])
202
- is_archive = True if archive_info and archive_info[0] else False
203
- except:
204
- # On error, don't assume it's an archive - check bytes instead
205
- pass
213
+ # Check if this package contains any archive files
214
+ package_uuid = package.get("uuid")
215
+ is_archive = package_uuid in archive_package_uuids if package_uuid else False
206
216
 
207
217
  link_details = get_links_status(package, downloader_links, is_archive)
208
218
 
@@ -250,7 +260,7 @@ def get_packages(shared_state):
250
260
  time_left = "23:59:59"
251
261
  if package["type"] == "linkgrabber":
252
262
  details = package["details"]
253
- name = f"[Linkgrabber] {details["name"]}"
263
+ name = f"[Linkgrabber] {details['name']}"
254
264
  try:
255
265
  mb = mb_left = int(details["bytesTotal"]) / (1024 * 1024)
256
266
  except KeyError:
@@ -302,7 +312,7 @@ def get_packages(shared_state):
302
312
  package_uuid = package["uuid"]
303
313
  else:
304
314
  details = package["details"]
305
- name = f"[CAPTCHA not solved!] {details["title"]}"
315
+ name = f"[CAPTCHA not solved!] {details['title']}"
306
316
  mb = mb_left = details["size_mb"]
307
317
  try:
308
318
  package_id = package["package_id"]
@@ -386,23 +396,23 @@ def get_packages(shared_state):
386
396
  else:
387
397
  info(f"Invalid package location {package['location']}")
388
398
 
389
- if not shared_state.get_device().linkgrabber.is_collecting():
390
- linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
391
- linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
392
-
399
+ # Use cached is_collecting check
400
+ if not cache.is_collecting:
401
+ # Reuse cached data instead of re-querying
393
402
  packages_to_start = []
394
403
  links_to_start = []
395
404
 
396
405
  for package in linkgrabber_packages:
397
- comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
406
+ # Use cached linkgrabber_links instead of re-querying
407
+ comment = get_links_comment(package, linkgrabber_links)
398
408
  if comment and comment.startswith("Quasarr_"):
399
409
  package_uuid = package.get("uuid")
400
410
  if package_uuid:
401
- linkgrabber_links = [link.get("uuid") for link in linkgrabber_links if
402
- link.get("packageUUID") == package_uuid]
403
- if linkgrabber_links:
411
+ package_link_ids = [link.get("uuid") for link in linkgrabber_links if
412
+ link.get("packageUUID") == package_uuid]
413
+ if package_link_ids:
404
414
  packages_to_start.append(package_uuid)
405
- links_to_start.extend(linkgrabber_links)
415
+ links_to_start.extend(package_link_ids)
406
416
  else:
407
417
  info(f"Package {package_uuid} has no links in linkgrabber - skipping start")
408
418
 
@@ -420,13 +430,17 @@ def delete_package(shared_state, package_id):
420
430
  try:
421
431
  deleted_title = ""
422
432
 
423
- packages = get_packages(shared_state)
433
+ # Create cache for this single delete operation
434
+ # Safe to reuse within this request since we fetch->find->delete atomically
435
+ cache = JDPackageCache(shared_state.get_device())
436
+
437
+ packages = get_packages(shared_state, _cache=cache)
424
438
  for package_location in packages:
425
439
  for package in packages[package_location]:
426
440
  if package["nzo_id"] == package_id:
427
441
  if package["type"] == "linkgrabber":
428
- ids = get_links_matching_package_uuid(package,
429
- shared_state.get_device().linkgrabber.query_links())
442
+ # Use cached linkgrabber_links instead of re-querying
443
+ ids = get_links_matching_package_uuid(package, cache.linkgrabber_links)
430
444
  if ids:
431
445
  shared_state.get_device().linkgrabber.cleanup(
432
446
  "DELETE_ALL",
@@ -437,8 +451,8 @@ def delete_package(shared_state, package_id):
437
451
  )
438
452
  break
439
453
  elif package["type"] == "downloader":
440
- ids = get_links_matching_package_uuid(package,
441
- shared_state.get_device().downloads.query_links())
454
+ # Use cached downloader_links instead of re-querying
455
+ ids = get_links_matching_package_uuid(package, cache.downloader_links)
442
456
  if ids:
443
457
  shared_state.get_device().downloads.cleanup(
444
458
  "DELETE_ALL",
@@ -0,0 +1,131 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
6
+
7
+
8
+ class JDPackageCache:
9
+ """
10
+ Caches JDownloader package/link queries within a single request.
11
+
12
+ IMPORTANT: This cache is ONLY valid for the duration of ONE get_packages()
13
+ or delete_package() call. JDownloader state can be modified at any time by
14
+ the user or third-party tools, so cached data must NEVER persist across
15
+ separate requests.
16
+
17
+ This reduces redundant API calls within a single operation where the same
18
+ data (e.g., linkgrabber_links) is needed multiple times.
19
+
20
+ Usage:
21
+ # Cache is created and discarded within a single function call
22
+ cache = JDPackageCache(device)
23
+ packages = cache.linkgrabber_packages # Fetches from API
24
+ packages = cache.linkgrabber_packages # Returns cached (same request)
25
+ # Cache goes out of scope and is garbage collected
26
+ """
27
+
28
+ def __init__(self, device):
29
+ self._device = device
30
+ self._linkgrabber_packages = None
31
+ self._linkgrabber_links = None
32
+ self._downloader_packages = None
33
+ self._downloader_links = None
34
+ self._archive_package_uuids = None # Set of package UUIDs containing archives
35
+ self._is_collecting = None
36
+
37
+ @property
38
+ def linkgrabber_packages(self):
39
+ if self._linkgrabber_packages is None:
40
+ try:
41
+ self._linkgrabber_packages = self._device.linkgrabber.query_packages()
42
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
43
+ self._linkgrabber_packages = []
44
+ return self._linkgrabber_packages
45
+
46
+ @property
47
+ def linkgrabber_links(self):
48
+ if self._linkgrabber_links is None:
49
+ try:
50
+ self._linkgrabber_links = self._device.linkgrabber.query_links()
51
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
52
+ self._linkgrabber_links = []
53
+ return self._linkgrabber_links
54
+
55
+ @property
56
+ def downloader_packages(self):
57
+ if self._downloader_packages is None:
58
+ try:
59
+ self._downloader_packages = self._device.downloads.query_packages()
60
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
61
+ self._downloader_packages = []
62
+ return self._downloader_packages
63
+
64
+ @property
65
+ def downloader_links(self):
66
+ if self._downloader_links is None:
67
+ try:
68
+ self._downloader_links = self._device.downloads.query_links()
69
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
70
+ self._downloader_links = []
71
+ return self._downloader_links
72
+
73
+ @property
74
+ def is_collecting(self):
75
+ if self._is_collecting is None:
76
+ try:
77
+ self._is_collecting = self._device.linkgrabber.is_collecting()
78
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
79
+ self._is_collecting = False
80
+ return self._is_collecting
81
+
82
+ def get_archive_package_uuids(self, downloader_packages, downloader_links):
83
+ """
84
+ Get set of package UUIDs that contain at least one archive file.
85
+
86
+ Two-phase detection:
87
+ 1. Check extractionStatus in link data (free - catches in-progress/completed extractions)
88
+ 2. Single API call for all remaining packages (catches pre-extraction archives)
89
+
90
+ This correctly handles:
91
+ - Mixed packages (archive + non-archive files)
92
+ - Archives before extraction starts
93
+ - Archives during/after extraction
94
+ """
95
+ if self._archive_package_uuids is not None:
96
+ return self._archive_package_uuids
97
+
98
+ self._archive_package_uuids = set()
99
+
100
+ if not downloader_packages:
101
+ return self._archive_package_uuids
102
+
103
+ all_package_uuids = {p.get("uuid") for p in downloader_packages if p.get("uuid")}
104
+
105
+ # Phase 1: Check extractionStatus in already-fetched link data (free - no API call)
106
+ # This catches packages where extraction is in progress or completed
107
+ for link in downloader_links:
108
+ extraction_status = link.get("extractionStatus")
109
+ if extraction_status: # Any non-empty extraction status means it's an archive
110
+ pkg_uuid = link.get("packageUUID")
111
+ if pkg_uuid:
112
+ self._archive_package_uuids.add(pkg_uuid)
113
+
114
+ # Phase 2: Single API call for all unchecked packages
115
+ unchecked_package_uuids = list(all_package_uuids - self._archive_package_uuids)
116
+
117
+ if unchecked_package_uuids:
118
+ try:
119
+ # One API call for ALL unchecked packages
120
+ archive_infos = self._device.extraction.get_archive_info([], unchecked_package_uuids)
121
+ if archive_infos:
122
+ for archive_info in archive_infos:
123
+ if archive_info:
124
+ # Extract package UUID from response
125
+ pkg_uuid = archive_info.get("packageUUID")
126
+ if pkg_uuid:
127
+ self._archive_package_uuids.add(pkg_uuid)
128
+ except:
129
+ pass
130
+
131
+ return self._archive_package_uuids