quasarr 2.6.1__py3-none-any.whl → 2.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +71 -61
- quasarr/api/__init__.py +1 -2
- quasarr/api/arr/__init__.py +66 -57
- quasarr/api/captcha/__init__.py +203 -154
- quasarr/downloads/__init__.py +12 -8
- quasarr/downloads/linkcrypters/al.py +4 -4
- quasarr/downloads/linkcrypters/filecrypt.py +1 -2
- quasarr/downloads/packages/__init__.py +62 -88
- quasarr/downloads/sources/al.py +3 -3
- quasarr/downloads/sources/by.py +3 -3
- quasarr/downloads/sources/he.py +8 -9
- quasarr/downloads/sources/nk.py +3 -3
- quasarr/downloads/sources/sl.py +6 -1
- quasarr/downloads/sources/wd.py +93 -37
- quasarr/downloads/sources/wx.py +11 -17
- quasarr/providers/auth.py +9 -13
- quasarr/providers/cloudflare.py +5 -4
- quasarr/providers/imdb_metadata.py +1 -3
- quasarr/providers/jd_cache.py +64 -90
- quasarr/providers/log.py +226 -8
- quasarr/providers/myjd_api.py +116 -94
- quasarr/providers/sessions/al.py +20 -22
- quasarr/providers/sessions/dd.py +1 -1
- quasarr/providers/sessions/dl.py +8 -10
- quasarr/providers/sessions/nx.py +1 -1
- quasarr/providers/shared_state.py +26 -15
- quasarr/providers/utils.py +15 -6
- quasarr/providers/version.py +1 -1
- quasarr/search/__init__.py +113 -82
- quasarr/search/sources/al.py +19 -23
- quasarr/search/sources/by.py +6 -6
- quasarr/search/sources/dd.py +8 -10
- quasarr/search/sources/dj.py +15 -18
- quasarr/search/sources/dl.py +25 -37
- quasarr/search/sources/dt.py +13 -15
- quasarr/search/sources/dw.py +24 -16
- quasarr/search/sources/fx.py +25 -11
- quasarr/search/sources/he.py +16 -14
- quasarr/search/sources/hs.py +7 -7
- quasarr/search/sources/mb.py +7 -7
- quasarr/search/sources/nk.py +24 -25
- quasarr/search/sources/nx.py +22 -15
- quasarr/search/sources/sf.py +18 -9
- quasarr/search/sources/sj.py +7 -7
- quasarr/search/sources/sl.py +26 -14
- quasarr/search/sources/wd.py +61 -31
- quasarr/search/sources/wx.py +33 -47
- quasarr/storage/config.py +1 -3
- {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/METADATA +4 -1
- quasarr-2.7.1.dist-info/RECORD +84 -0
- quasarr-2.6.1.dist-info/RECORD +0 -84
- {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/WHEEL +0 -0
- {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/entry_points.txt +0 -0
- {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,7 +8,7 @@ from collections import defaultdict
|
|
|
8
8
|
from urllib.parse import urlparse
|
|
9
9
|
|
|
10
10
|
from quasarr.providers.jd_cache import JDPackageCache
|
|
11
|
-
from quasarr.providers.log import debug, info
|
|
11
|
+
from quasarr.providers.log import debug, info, trace
|
|
12
12
|
|
|
13
13
|
# =============================================================================
|
|
14
14
|
# CONSTANTS
|
|
@@ -118,9 +118,7 @@ def get_links_comment(package, package_links):
|
|
|
118
118
|
if link.get("packageUUID") == package_uuid:
|
|
119
119
|
comment = link.get("comment")
|
|
120
120
|
if comment:
|
|
121
|
-
debug(
|
|
122
|
-
f"get_links_comment: Found comment '{comment}' for package {package_uuid}"
|
|
123
|
-
)
|
|
121
|
+
debug(f"Found comment '{comment}' for package {package_uuid}")
|
|
124
122
|
return comment
|
|
125
123
|
return None
|
|
126
124
|
|
|
@@ -138,7 +136,7 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
138
136
|
package_uuid = package.get("uuid")
|
|
139
137
|
package_name = package.get("name", "unknown")
|
|
140
138
|
debug(
|
|
141
|
-
f"
|
|
139
|
+
f"Checking package '{package_name}' ({package_uuid}), is_archive={is_archive}"
|
|
142
140
|
)
|
|
143
141
|
|
|
144
142
|
links_in_package = []
|
|
@@ -147,7 +145,7 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
147
145
|
if link.get("packageUUID") == package_uuid:
|
|
148
146
|
links_in_package.append(link)
|
|
149
147
|
|
|
150
|
-
|
|
148
|
+
trace(f"Found {len(links_in_package)} links in package")
|
|
151
149
|
|
|
152
150
|
all_finished = True
|
|
153
151
|
eta = None
|
|
@@ -171,9 +169,7 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
171
169
|
link.get("availability", "").lower() == "online" for link in mirror_links
|
|
172
170
|
):
|
|
173
171
|
has_mirror_all_online = True
|
|
174
|
-
debug(
|
|
175
|
-
f"get_links_status: Mirror '{domain}' has all {len(mirror_links)} links online"
|
|
176
|
-
)
|
|
172
|
+
debug(f"Mirror '{domain}' has all {len(mirror_links)} links online")
|
|
177
173
|
break
|
|
178
174
|
|
|
179
175
|
# Collect offline link IDs (only if there's an online mirror available)
|
|
@@ -187,7 +183,7 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
187
183
|
|
|
188
184
|
if offline_links:
|
|
189
185
|
debug(
|
|
190
|
-
f"
|
|
186
|
+
f"{len(offline_links)} offline links, has_mirror_all_online={has_mirror_all_online}"
|
|
191
187
|
)
|
|
192
188
|
|
|
193
189
|
# First pass: detect if ANY link has extraction activity (for safety override)
|
|
@@ -197,7 +193,7 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
197
193
|
break
|
|
198
194
|
|
|
199
195
|
if has_extraction_activity:
|
|
200
|
-
debug("
|
|
196
|
+
debug("Package has extraction activity detected")
|
|
201
197
|
|
|
202
198
|
# Second pass: check each link's status
|
|
203
199
|
for link in links_in_package:
|
|
@@ -216,8 +212,8 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
216
212
|
link_status[:50] + "..." if len(link_status) > 50 else link_status
|
|
217
213
|
)
|
|
218
214
|
|
|
219
|
-
|
|
220
|
-
f"
|
|
215
|
+
trace(
|
|
216
|
+
f"Link '{link_name}': finished={link_finished}, "
|
|
221
217
|
f"is_archive_file={link_is_archive_file}, availability={link_availability}, "
|
|
222
218
|
f"extractionStatus='{link_extraction_status}', status='{link_status_preview}'"
|
|
223
219
|
)
|
|
@@ -225,49 +221,41 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
225
221
|
# Check for offline links
|
|
226
222
|
if link_availability == "offline" and not has_mirror_all_online:
|
|
227
223
|
error = "Links offline for all mirrors"
|
|
228
|
-
debug(
|
|
229
|
-
f"get_links_status: ERROR - Link offline with no online mirror: {link_name}"
|
|
230
|
-
)
|
|
224
|
+
debug(f"ERROR - Link offline with no online mirror: {link_name}")
|
|
231
225
|
|
|
232
226
|
# Check for file errors
|
|
233
227
|
if link_status_icon == "false":
|
|
234
228
|
error = "File error in package"
|
|
235
|
-
debug(f"
|
|
229
|
+
debug(f"ERROR - File error in link: {link_name}")
|
|
236
230
|
|
|
237
231
|
# === MAIN LINK STATUS LOGIC ===
|
|
238
232
|
|
|
239
233
|
if not link_finished:
|
|
240
234
|
# Download not complete
|
|
241
235
|
all_finished = False
|
|
242
|
-
|
|
243
|
-
f"get_links_status: Link not finished (download in progress): {link_name}"
|
|
244
|
-
)
|
|
236
|
+
trace(f"Link not finished (download in progress): {link_name}")
|
|
245
237
|
|
|
246
238
|
elif link_extraction_status and link_extraction_status != "successful":
|
|
247
239
|
# Extraction is running or errored (applies to archive files only)
|
|
248
240
|
if link_extraction_status == "error":
|
|
249
241
|
error = link.get("status", "Extraction error")
|
|
250
|
-
|
|
242
|
+
trace(f"Extraction ERROR on {link_name}: {error}")
|
|
251
243
|
elif link_extraction_status == "running":
|
|
252
|
-
|
|
253
|
-
f"get_links_status: Extraction RUNNING on {link_name}, eta={link_eta}s"
|
|
254
|
-
)
|
|
244
|
+
trace(f"Extraction RUNNING on {link_name}, eta={link_eta}s")
|
|
255
245
|
if link_eta > 0:
|
|
256
246
|
if eta is None or link_eta > eta:
|
|
257
247
|
eta = link_eta
|
|
258
248
|
else:
|
|
259
|
-
|
|
260
|
-
f"get_links_status: Extraction status '{link_extraction_status}' on {link_name}"
|
|
261
|
-
)
|
|
249
|
+
trace(f"Extraction status '{link_extraction_status}' on {link_name}")
|
|
262
250
|
all_finished = False
|
|
263
251
|
|
|
264
252
|
elif link_is_archive_file:
|
|
265
253
|
# This specific link IS an archive file - must have "extraction ok"
|
|
266
254
|
if is_extraction_complete(link_status):
|
|
267
|
-
|
|
255
|
+
trace(f"Archive link COMPLETE: {link_name}")
|
|
268
256
|
else:
|
|
269
|
-
|
|
270
|
-
f"
|
|
257
|
+
trace(
|
|
258
|
+
f"Archive link WAITING for extraction: {link_name}, status='{link_status}'"
|
|
271
259
|
)
|
|
272
260
|
all_finished = False
|
|
273
261
|
|
|
@@ -275,16 +263,14 @@ def get_links_status(package, all_links, is_archive=False):
|
|
|
275
263
|
# Package is marked as archive but THIS link doesn't look like an archive file
|
|
276
264
|
# (e.g., .mkv in a package with .rar files)
|
|
277
265
|
# These non-archive files are finished when download is complete
|
|
278
|
-
debug(
|
|
279
|
-
f"get_links_status: Non-archive link in archive package COMPLETE: {link_name}"
|
|
280
|
-
)
|
|
266
|
+
debug(f"Non-archive link in archive package COMPLETE: {link_name}")
|
|
281
267
|
|
|
282
268
|
else:
|
|
283
269
|
# Non-archive file in non-archive package - finished when downloaded
|
|
284
|
-
debug(f"
|
|
270
|
+
debug(f"Non-archive link COMPLETE: {link_name}")
|
|
285
271
|
|
|
286
272
|
debug(
|
|
287
|
-
f"
|
|
273
|
+
f"RESULT for '{package_name}': all_finished={all_finished}, "
|
|
288
274
|
f"eta={eta}, error={error}, is_archive={is_archive}, has_extraction_activity={has_extraction_activity}"
|
|
289
275
|
)
|
|
290
276
|
|
|
@@ -343,21 +329,21 @@ def get_packages(shared_state, _cache=None):
|
|
|
343
329
|
_cache: INTERNAL USE ONLY. Used by delete_package() to share cached data
|
|
344
330
|
within a single request. External callers should never pass this.
|
|
345
331
|
"""
|
|
346
|
-
|
|
332
|
+
trace("Starting package retrieval")
|
|
347
333
|
packages = []
|
|
348
334
|
|
|
349
335
|
# Create cache for this request - only valid for duration of this call
|
|
350
336
|
if _cache is None:
|
|
351
337
|
cache = JDPackageCache(shared_state.get_device())
|
|
352
|
-
|
|
338
|
+
trace("Created new JDPackageCache")
|
|
353
339
|
else:
|
|
354
340
|
cache = _cache
|
|
355
|
-
|
|
341
|
+
trace("Using provided cache instance")
|
|
356
342
|
|
|
357
343
|
# === PROTECTED PACKAGES (CAPTCHA required) ===
|
|
358
344
|
protected_packages = shared_state.get_db("protected").retrieve_all_titles()
|
|
359
345
|
debug(
|
|
360
|
-
f"
|
|
346
|
+
f"Found <g>{len(protected_packages) if protected_packages else 0}</g> protected packages"
|
|
361
347
|
)
|
|
362
348
|
|
|
363
349
|
if protected_packages:
|
|
@@ -379,18 +365,14 @@ def get_packages(shared_state, _cache=None):
|
|
|
379
365
|
"package_id": package_id,
|
|
380
366
|
}
|
|
381
367
|
)
|
|
382
|
-
|
|
383
|
-
f"get_packages: Added protected package '{data['title']}' ({package_id})"
|
|
384
|
-
)
|
|
368
|
+
trace(f"Protected package: '{data['title']}' ({package_id})")
|
|
385
369
|
except (json.JSONDecodeError, KeyError) as e:
|
|
386
|
-
debug(
|
|
387
|
-
f"get_packages: Failed to parse protected package {package_id}: {e}"
|
|
388
|
-
)
|
|
370
|
+
debug(f"Failed to parse protected package {package_id}: {e}")
|
|
389
371
|
|
|
390
372
|
# === FAILED PACKAGES ===
|
|
391
373
|
failed_packages = shared_state.get_db("failed").retrieve_all_titles()
|
|
392
374
|
debug(
|
|
393
|
-
f"
|
|
375
|
+
f"Found <g>{len(failed_packages) if failed_packages else 0}</g> failed packages"
|
|
394
376
|
)
|
|
395
377
|
|
|
396
378
|
if failed_packages:
|
|
@@ -419,17 +401,15 @@ def get_packages(shared_state, _cache=None):
|
|
|
419
401
|
"uuid": package_id,
|
|
420
402
|
}
|
|
421
403
|
)
|
|
422
|
-
|
|
423
|
-
f"get_packages: Added failed package '{details['name']}' ({package_id}): {error}"
|
|
424
|
-
)
|
|
404
|
+
trace(f"Failed package: '{details['name']}' ({package_id}): {error}")
|
|
425
405
|
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
|
426
|
-
debug(f"
|
|
406
|
+
debug(f"Failed to parse failed package {package_id}: {e}")
|
|
427
407
|
|
|
428
408
|
# === LINKGRABBER PACKAGES ===
|
|
429
409
|
linkgrabber_packages = cache.linkgrabber_packages
|
|
430
410
|
linkgrabber_links = cache.linkgrabber_links
|
|
431
411
|
|
|
432
|
-
debug(f"
|
|
412
|
+
debug(f"Processing <g>{len(linkgrabber_packages)}</g> linkgrabber packages")
|
|
433
413
|
|
|
434
414
|
if linkgrabber_packages:
|
|
435
415
|
for package in linkgrabber_packages:
|
|
@@ -447,7 +427,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
447
427
|
# Clean up offline links if we have online mirrors
|
|
448
428
|
if offline_mirror_linkids:
|
|
449
429
|
debug(
|
|
450
|
-
f"
|
|
430
|
+
f"Cleaning up {len(offline_mirror_linkids)} offline links from '{package_name}'"
|
|
451
431
|
)
|
|
452
432
|
try:
|
|
453
433
|
shared_state.get_device().linkgrabber.cleanup(
|
|
@@ -458,7 +438,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
458
438
|
[package_uuid],
|
|
459
439
|
)
|
|
460
440
|
except Exception as e:
|
|
461
|
-
debug(f"
|
|
441
|
+
debug(f"Failed to cleanup offline links: {e}")
|
|
462
442
|
|
|
463
443
|
location = "history" if error else "queue"
|
|
464
444
|
packages.append(
|
|
@@ -471,16 +451,14 @@ def get_packages(shared_state, _cache=None):
|
|
|
471
451
|
"error": error,
|
|
472
452
|
}
|
|
473
453
|
)
|
|
474
|
-
|
|
475
|
-
f"get_packages: Added linkgrabber package '{package_name}' -> {location}"
|
|
476
|
-
)
|
|
454
|
+
trace(f"Linkgrabber package: '{package_name}' -> {location}")
|
|
477
455
|
|
|
478
456
|
# === DOWNLOADER PACKAGES ===
|
|
479
457
|
downloader_packages = cache.downloader_packages
|
|
480
458
|
downloader_links = cache.downloader_links
|
|
481
459
|
|
|
482
460
|
debug(
|
|
483
|
-
f"
|
|
461
|
+
f"Processing <g>{len(downloader_packages)}</g> downloader packages with <g>{len(downloader_links)}</g> links"
|
|
484
462
|
)
|
|
485
463
|
|
|
486
464
|
if downloader_packages and downloader_links:
|
|
@@ -489,7 +467,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
489
467
|
downloader_packages, downloader_links
|
|
490
468
|
)
|
|
491
469
|
debug(
|
|
492
|
-
f"
|
|
470
|
+
f"Archive detection complete - {len(archive_package_uuids)} packages are archives"
|
|
493
471
|
)
|
|
494
472
|
|
|
495
473
|
for package in downloader_packages:
|
|
@@ -502,7 +480,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
502
480
|
is_archive = (
|
|
503
481
|
package_uuid in archive_package_uuids if package_uuid else False
|
|
504
482
|
)
|
|
505
|
-
debug(f"
|
|
483
|
+
debug(f"Package '{package_name}' is_archive={is_archive}")
|
|
506
484
|
|
|
507
485
|
link_details = get_links_status(package, downloader_links, is_archive)
|
|
508
486
|
|
|
@@ -521,12 +499,12 @@ def get_packages(shared_state, _cache=None):
|
|
|
521
499
|
# Only mark as finished if it's not an archive
|
|
522
500
|
if not is_archive:
|
|
523
501
|
debug(
|
|
524
|
-
f"
|
|
502
|
+
f"Package '{package_name}' bytes complete and not archive -> marking finished"
|
|
525
503
|
)
|
|
526
504
|
finished = True
|
|
527
505
|
else:
|
|
528
506
|
debug(
|
|
529
|
-
f"
|
|
507
|
+
f"Package '{package_name}' bytes complete BUT is_archive=True -> NOT marking finished yet"
|
|
530
508
|
)
|
|
531
509
|
|
|
532
510
|
if not finished and link_details["eta"]:
|
|
@@ -535,7 +513,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
535
513
|
location = "history" if error or finished else "queue"
|
|
536
514
|
|
|
537
515
|
debug(
|
|
538
|
-
f"
|
|
516
|
+
f"Package '{package_name}' -> location={location}, "
|
|
539
517
|
f"finished={finished}, error={error}, is_archive={is_archive}"
|
|
540
518
|
)
|
|
541
519
|
|
|
@@ -648,9 +626,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
648
626
|
)
|
|
649
627
|
queue_index += 1
|
|
650
628
|
else:
|
|
651
|
-
debug(
|
|
652
|
-
f"get_packages: Skipping queue package without package_id or uuid: {name}"
|
|
653
|
-
)
|
|
629
|
+
debug(f"Skipping queue package without package_id or uuid: {name}")
|
|
654
630
|
|
|
655
631
|
elif package["location"] == "history":
|
|
656
632
|
details = package["details"]
|
|
@@ -704,9 +680,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
704
680
|
|
|
705
681
|
# === AUTO-START QUASARR PACKAGES ===
|
|
706
682
|
if not cache.is_collecting:
|
|
707
|
-
debug(
|
|
708
|
-
"get_packages: Linkgrabber not collecting, checking for packages to auto-start"
|
|
709
|
-
)
|
|
683
|
+
debug("Linkgrabber not collecting, checking for packages to auto-start")
|
|
710
684
|
|
|
711
685
|
packages_to_start = []
|
|
712
686
|
links_to_start = []
|
|
@@ -723,7 +697,7 @@ def get_packages(shared_state, _cache=None):
|
|
|
723
697
|
]
|
|
724
698
|
if package_link_ids:
|
|
725
699
|
debug(
|
|
726
|
-
f"
|
|
700
|
+
f"Found Quasarr package to start: {package.get('name')} with {len(package_link_ids)} links"
|
|
727
701
|
)
|
|
728
702
|
packages_to_start.append(package_uuid)
|
|
729
703
|
links_to_start.extend(package_link_ids)
|
|
@@ -736,40 +710,40 @@ def get_packages(shared_state, _cache=None):
|
|
|
736
710
|
|
|
737
711
|
if packages_to_start and links_to_start:
|
|
738
712
|
debug(
|
|
739
|
-
f"
|
|
713
|
+
f"Moving <g>{len(packages_to_start)}</g> packages with <g>{len(links_to_start)}</g> links to download list"
|
|
740
714
|
)
|
|
741
715
|
try:
|
|
742
716
|
shared_state.get_device().linkgrabber.move_to_downloadlist(
|
|
743
717
|
links_to_start, packages_to_start
|
|
744
718
|
)
|
|
745
|
-
|
|
719
|
+
debug(
|
|
746
720
|
f"Started {len(packages_to_start)} package download{'s' if len(packages_to_start) > 1 else ''} from linkgrabber"
|
|
747
721
|
)
|
|
748
722
|
except Exception as e:
|
|
749
|
-
debug(f"
|
|
723
|
+
debug(f"Failed to move packages to download list: {e}")
|
|
750
724
|
else:
|
|
751
|
-
debug("
|
|
725
|
+
debug("Linkgrabber is collecting, skipping auto-start")
|
|
752
726
|
|
|
753
|
-
|
|
754
|
-
f"
|
|
727
|
+
trace(
|
|
728
|
+
f"COMPLETE - queue={len(downloads['queue'])}, history={len(downloads['history'])}"
|
|
755
729
|
)
|
|
756
730
|
|
|
757
731
|
# Summary overview for quick debugging
|
|
758
732
|
if downloads["queue"] or downloads["history"]:
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
733
|
+
trace("=" * 60)
|
|
734
|
+
trace("PACKAGE SUMMARY")
|
|
735
|
+
trace("=" * 60)
|
|
736
|
+
trace(f" CACHE: {cache.get_stats()}")
|
|
737
|
+
trace("-" * 60)
|
|
764
738
|
for item in downloads["queue"]:
|
|
765
739
|
is_archive = item.get("is_archive", False)
|
|
766
740
|
archive_indicator = "[ARCHIVE]" if is_archive else ""
|
|
767
741
|
mb = item.get("mb", 0)
|
|
768
742
|
size_str = f"{mb:.0f} MB" if mb < 1024 else f"{mb / 1024:.1f} GB"
|
|
769
|
-
|
|
743
|
+
trace(
|
|
770
744
|
f" QUEUE: {item['filename'][:50]}{'...' if len(item['filename']) > 50 else ''}"
|
|
771
745
|
)
|
|
772
|
-
|
|
746
|
+
trace(
|
|
773
747
|
f" -> {item['percentage']}% | {item['timeleft']} | {size_str} | {item['cat']} {archive_indicator}"
|
|
774
748
|
)
|
|
775
749
|
for item in downloads["history"]:
|
|
@@ -794,15 +768,15 @@ def get_packages(shared_state, _cache=None):
|
|
|
794
768
|
)
|
|
795
769
|
else:
|
|
796
770
|
size_str = "? MB"
|
|
797
|
-
|
|
771
|
+
trace(
|
|
798
772
|
f" HISTORY: {item['name'][:50]}{'...' if len(item['name']) > 50 else ''}"
|
|
799
773
|
)
|
|
800
|
-
|
|
774
|
+
trace(
|
|
801
775
|
f" -> {status_icon} {item['status']} | {size_str} | {item['category']} {archive_status}"
|
|
802
776
|
)
|
|
803
777
|
if item.get("fail_message"):
|
|
804
|
-
|
|
805
|
-
|
|
778
|
+
trace(f" Error: {item['fail_message']}")
|
|
779
|
+
trace("=" * 60)
|
|
806
780
|
|
|
807
781
|
return downloads
|
|
808
782
|
|
|
@@ -917,9 +891,9 @@ def delete_package(shared_state, package_id):
|
|
|
917
891
|
break # Exit outer loop
|
|
918
892
|
|
|
919
893
|
if deleted_title:
|
|
920
|
-
info(f
|
|
894
|
+
info(f"Deleted package <y>{deleted_title}</y> with ID <y>{package_id}</y>")
|
|
921
895
|
else:
|
|
922
|
-
info(f
|
|
896
|
+
info(f"Deleted package <y>{package_id}</y>")
|
|
923
897
|
|
|
924
898
|
debug(
|
|
925
899
|
f"delete_package: Successfully completed deletion for package {package_id}, found={found}"
|
quasarr/downloads/sources/al.py
CHANGED
|
@@ -419,7 +419,7 @@ def parse_info_from_download_item(
|
|
|
419
419
|
r"\.(German|Japanese|English)\.",
|
|
420
420
|
f".{part_string}.\\1.",
|
|
421
421
|
release_title,
|
|
422
|
-
1,
|
|
422
|
+
count=1,
|
|
423
423
|
)
|
|
424
424
|
|
|
425
425
|
# determine if optional episode exists on release page
|
|
@@ -735,10 +735,10 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
735
735
|
)
|
|
736
736
|
try:
|
|
737
737
|
response_json = check_solution.get("json", {})
|
|
738
|
-
except ValueError:
|
|
738
|
+
except ValueError as e:
|
|
739
739
|
raise RuntimeError(
|
|
740
740
|
f"Unexpected /ajax/captcha response: {check_solution.get('text', '')}"
|
|
741
|
-
)
|
|
741
|
+
) from e
|
|
742
742
|
|
|
743
743
|
code = response_json.get("code", "")
|
|
744
744
|
message = response_json.get("message", "")
|
quasarr/downloads/sources/by.py
CHANGED
|
@@ -64,7 +64,7 @@ def get_by_download_links(shared_state, url, mirror, title, password):
|
|
|
64
64
|
async_results.append((content, source))
|
|
65
65
|
|
|
66
66
|
url_hosters = []
|
|
67
|
-
for content,
|
|
67
|
+
for content, _source in async_results:
|
|
68
68
|
host_soup = BeautifulSoup(content, "html.parser")
|
|
69
69
|
link = host_soup.find(
|
|
70
70
|
"a",
|
|
@@ -100,12 +100,12 @@ def get_by_download_links(shared_state, url, mirror, title, password):
|
|
|
100
100
|
)
|
|
101
101
|
rq.raise_for_status()
|
|
102
102
|
if "/404.html" in rq.url:
|
|
103
|
-
info(f"Link leads to 404 page
|
|
103
|
+
info(f"Link leads to 404 page: {r.url}")
|
|
104
104
|
return None
|
|
105
105
|
time.sleep(1)
|
|
106
106
|
return rq.url
|
|
107
107
|
except Exception as e:
|
|
108
|
-
info(f"Error resolving link
|
|
108
|
+
info(f"Error resolving link: {e}")
|
|
109
109
|
mark_hostname_issue(
|
|
110
110
|
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
111
111
|
)
|
quasarr/downloads/sources/he.py
CHANGED
|
@@ -32,7 +32,7 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
32
32
|
r.raise_for_status()
|
|
33
33
|
soup = BeautifulSoup(r.text, "html.parser")
|
|
34
34
|
except Exception as e:
|
|
35
|
-
info(f"
|
|
35
|
+
info(f"Could not fetch release for {title}: {e}")
|
|
36
36
|
mark_hostname_issue(
|
|
37
37
|
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
38
38
|
)
|
|
@@ -49,14 +49,13 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
49
49
|
if m:
|
|
50
50
|
imdb_id = m.group(1)
|
|
51
51
|
else:
|
|
52
|
-
debug(f"
|
|
52
|
+
debug(f"imdb_id not found for title {title} in link href.")
|
|
53
53
|
else:
|
|
54
|
-
debug(f"
|
|
54
|
+
debug(f"imdb_id link href not found for title {title}.")
|
|
55
55
|
except Exception:
|
|
56
|
-
debug(f"
|
|
57
|
-
|
|
56
|
+
debug(f"failed to extract imdb_id for title {title}.")
|
|
58
57
|
anchors = []
|
|
59
|
-
for
|
|
58
|
+
for _retries in range(10):
|
|
60
59
|
form = soup.find("form", id=re.compile(r"content-protector-access-form"))
|
|
61
60
|
if not form:
|
|
62
61
|
return {"links": [], "imdb_id": None}
|
|
@@ -91,7 +90,7 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
91
90
|
r.raise_for_status()
|
|
92
91
|
soup = BeautifulSoup(r.text, "html.parser")
|
|
93
92
|
except Exception as e:
|
|
94
|
-
info(f"
|
|
93
|
+
info(f"Could not submit protector form for {title}: {e}")
|
|
95
94
|
mark_hostname_issue(
|
|
96
95
|
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
97
96
|
)
|
|
@@ -118,11 +117,11 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
118
117
|
|
|
119
118
|
links.append([href, hoster])
|
|
120
119
|
except Exception:
|
|
121
|
-
debug(f"
|
|
120
|
+
debug(f"Could not resolve download link hoster for {title}")
|
|
122
121
|
continue
|
|
123
122
|
|
|
124
123
|
if not links:
|
|
125
|
-
info(f"No external download links found
|
|
124
|
+
info(f"No external download links found for {title}")
|
|
126
125
|
return {"links": [], "imdb_id": None}
|
|
127
126
|
|
|
128
127
|
return {
|
quasarr/downloads/sources/nk.py
CHANGED
|
@@ -31,7 +31,7 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
31
31
|
r.raise_for_status()
|
|
32
32
|
soup = BeautifulSoup(r.text, "html.parser")
|
|
33
33
|
except Exception as e:
|
|
34
|
-
info(f"
|
|
34
|
+
info(f"Could not fetch release page for {title}: {e}")
|
|
35
35
|
mark_hostname_issue(
|
|
36
36
|
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
37
37
|
)
|
|
@@ -56,7 +56,7 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
56
56
|
r.raise_for_status()
|
|
57
57
|
href = r.url
|
|
58
58
|
except Exception as e:
|
|
59
|
-
info(f"
|
|
59
|
+
info(f"Could not resolve download link for {title}: {e}")
|
|
60
60
|
mark_hostname_issue(
|
|
61
61
|
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
62
62
|
)
|
|
@@ -65,6 +65,6 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
65
65
|
candidates.append([href, mirror])
|
|
66
66
|
|
|
67
67
|
if not candidates:
|
|
68
|
-
info(f"No external download links found
|
|
68
|
+
info(f"No external download links found for {title}")
|
|
69
69
|
|
|
70
70
|
return {"links": candidates}
|
quasarr/downloads/sources/sl.py
CHANGED
|
@@ -8,6 +8,7 @@ from urllib.parse import urlparse
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
+
from quasarr.providers.cloudflare import ensure_session_cf_bypassed
|
|
11
12
|
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
12
13
|
from quasarr.providers.log import debug, info
|
|
13
14
|
|
|
@@ -33,7 +34,11 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
33
34
|
session = requests.Session()
|
|
34
35
|
|
|
35
36
|
try:
|
|
36
|
-
|
|
37
|
+
session, headers, r = ensure_session_cf_bypassed(
|
|
38
|
+
info, shared_state, session, url, headers
|
|
39
|
+
)
|
|
40
|
+
if not r:
|
|
41
|
+
raise requests.RequestException("Cloudflare bypass failed")
|
|
37
42
|
r.raise_for_status()
|
|
38
43
|
|
|
39
44
|
soup = BeautifulSoup(r.text, "html.parser")
|