quasarr 1.3.5__py3-none-any.whl → 1.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (67) hide show
  1. quasarr/__init__.py +157 -56
  2. quasarr/api/__init__.py +141 -36
  3. quasarr/api/arr/__init__.py +197 -78
  4. quasarr/api/captcha/__init__.py +897 -42
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +84 -22
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +237 -434
  9. quasarr/downloads/linkcrypters/al.py +237 -0
  10. quasarr/downloads/linkcrypters/filecrypt.py +178 -31
  11. quasarr/downloads/linkcrypters/hide.py +123 -0
  12. quasarr/downloads/packages/__init__.py +461 -0
  13. quasarr/downloads/sources/al.py +697 -0
  14. quasarr/downloads/sources/by.py +106 -0
  15. quasarr/downloads/sources/dd.py +6 -78
  16. quasarr/downloads/sources/dj.py +7 -0
  17. quasarr/downloads/sources/dt.py +1 -1
  18. quasarr/downloads/sources/dw.py +2 -2
  19. quasarr/downloads/sources/he.py +112 -0
  20. quasarr/downloads/sources/mb.py +47 -0
  21. quasarr/downloads/sources/nk.py +51 -0
  22. quasarr/downloads/sources/nx.py +36 -81
  23. quasarr/downloads/sources/sf.py +27 -4
  24. quasarr/downloads/sources/sj.py +7 -0
  25. quasarr/downloads/sources/sl.py +90 -0
  26. quasarr/downloads/sources/wd.py +110 -0
  27. quasarr/providers/cloudflare.py +204 -0
  28. quasarr/providers/html_images.py +20 -0
  29. quasarr/providers/html_templates.py +210 -108
  30. quasarr/providers/imdb_metadata.py +15 -2
  31. quasarr/providers/myjd_api.py +36 -5
  32. quasarr/providers/notifications.py +30 -5
  33. quasarr/providers/obfuscated.py +35 -0
  34. quasarr/providers/sessions/__init__.py +0 -0
  35. quasarr/providers/sessions/al.py +286 -0
  36. quasarr/providers/sessions/dd.py +78 -0
  37. quasarr/providers/sessions/nx.py +76 -0
  38. quasarr/providers/shared_state.py +368 -23
  39. quasarr/providers/statistics.py +154 -0
  40. quasarr/providers/version.py +60 -1
  41. quasarr/search/__init__.py +112 -36
  42. quasarr/search/sources/al.py +448 -0
  43. quasarr/search/sources/by.py +203 -0
  44. quasarr/search/sources/dd.py +17 -6
  45. quasarr/search/sources/dj.py +213 -0
  46. quasarr/search/sources/dt.py +37 -7
  47. quasarr/search/sources/dw.py +27 -47
  48. quasarr/search/sources/fx.py +27 -29
  49. quasarr/search/sources/he.py +196 -0
  50. quasarr/search/sources/mb.py +195 -0
  51. quasarr/search/sources/nk.py +188 -0
  52. quasarr/search/sources/nx.py +22 -6
  53. quasarr/search/sources/sf.py +143 -151
  54. quasarr/search/sources/sj.py +213 -0
  55. quasarr/search/sources/sl.py +246 -0
  56. quasarr/search/sources/wd.py +208 -0
  57. quasarr/storage/config.py +20 -4
  58. quasarr/storage/setup.py +224 -56
  59. quasarr-1.20.4.dist-info/METADATA +304 -0
  60. quasarr-1.20.4.dist-info/RECORD +72 -0
  61. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/WHEEL +1 -1
  62. quasarr/providers/tvmaze_metadata.py +0 -23
  63. quasarr-1.3.5.dist-info/METADATA +0 -174
  64. quasarr-1.3.5.dist-info/RECORD +0 -43
  65. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/entry_points.txt +0 -0
  66. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/licenses/LICENSE +0 -0
  67. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,8 @@ import json
6
6
  import os
7
7
  import re
8
8
  import time
9
- from datetime import datetime, timedelta
9
+ import traceback
10
+ from datetime import datetime, timedelta, date
10
11
  from urllib import parse
11
12
 
12
13
  import quasarr
@@ -18,6 +19,26 @@ from quasarr.storage.sqlite_database import DataBase
18
19
  values = {}
19
20
  lock = None
20
21
 
22
+ # regex to detect season/episode tags for series filtering during search
23
+ SEASON_EP_REGEX = re.compile(r"(?i)(?:S\d{1,3}(?:E\d{1,3}(?:-\d{1,3})?)?|S\d{1,3}-\d{1,3})")
24
+ # regex to filter out season/episode tags for movies
25
+ MOVIE_REGEX = re.compile(r"^(?!.*(?:S\d{1,3}(?:E\d{1,3}(?:-\d{1,3})?)?|S\d{1,3}-\d{1,3})).*$", re.IGNORECASE)
26
+ # List of known file hosters that should not be used as search/feed sites
27
+ SHARE_HOSTERS = {
28
+ "rapidgator",
29
+ "ddownload",
30
+ "keep2share",
31
+ "1fichier",
32
+ "katfile",
33
+ "filer",
34
+ "turbobit",
35
+ "nitroflare",
36
+ "filefactory",
37
+ "uptobox",
38
+ "mediafire",
39
+ "mega",
40
+ }
41
+
21
42
 
22
43
  def set_state(manager_dict, manager_lock):
23
44
  global values
@@ -50,30 +71,44 @@ def set_files(config_path):
50
71
 
51
72
 
52
73
  def generate_api_key():
53
- api_key = os.urandom(24).hex()
74
+ api_key = os.urandom(32).hex()
54
75
  Config('API').save("key", api_key)
55
76
  info(f'API key replaced with: "{api_key}!"')
56
77
  return api_key
57
78
 
58
79
 
59
80
  def extract_valid_hostname(url, shorthand):
60
- # Check if both characters from the shorthand appear in the url
61
81
  try:
62
82
  if '://' not in url:
63
83
  url = 'http://' + url
64
84
  result = parse.urlparse(url)
65
85
  domain = result.netloc
86
+ parts = domain.split('.')
87
+
88
+ if domain.startswith(".") or domain.endswith(".") or "." not in domain[1:-1]:
89
+ message = f'Error: "{domain}" must contain a "." somewhere in the middle – you need to provide a full domain name!'
90
+ domain = None
91
+
92
+ elif any(hoster in parts for hoster in SHARE_HOSTERS):
93
+ offending = next(host for host in parts if host in SHARE_HOSTERS)
94
+ message = (
95
+ f'Error: "{domain}" is a file‑hosting domain and cannot be used here directly! '
96
+ f'Instead please provide a valid hostname that serves direct file links (including "{offending}").'
97
+ )
98
+ domain = None
99
+
100
+ elif all(char in domain for char in shorthand):
101
+ message = f'"{domain}" contains both characters from shorthand "{shorthand}". Continuing...'
66
102
 
67
- # Check if both characters in the shorthand are in the domain
68
- if all(char in domain for char in shorthand):
69
- print(f'"{domain}" matches both characters from "{shorthand}". Continuing...')
70
- return domain
71
103
  else:
72
- print(f'Invalid domain "{domain}": Does not contain both characters from shorthand "{shorthand}"')
73
- return None
104
+ message = f'Error: "{domain}" does not contain both characters from shorthand "{shorthand}".'
105
+ domain = None
74
106
  except Exception as e:
75
- print(f"Error parsing URL {url}: {e}")
76
- return None
107
+ message = f"Error: {e}. Please provide a valid URL."
108
+ domain = None
109
+
110
+ print(message)
111
+ return {"domain": domain, "message": message}
77
112
 
78
113
 
79
114
  def connect_to_jd(jd, user, password, device_name):
@@ -240,12 +275,30 @@ def set_device_settings():
240
275
  "setting": "DeleteArchiveDownloadlinksAfterExtraction",
241
276
  "expected_value": False, # Links must be kept after extraction for Quasarr to work
242
277
  },
278
+ {
279
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
280
+ "storage": None,
281
+ "setting": "OfflinePackageEnabled",
282
+ "expected_value": False, # Don't move offline links to extra package
283
+ },
243
284
  {
244
285
  "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
245
286
  "storage": None,
246
287
  "setting": "HandleOfflineOnConfirmLatestSelection",
247
- "expected_value": "EXCLUDE_OFFLINE_AND_REMOVE", # Prevents popup starting download with offline mirrors
248
- }
288
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
289
+ },
290
+ {
291
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
292
+ "storage": None,
293
+ "setting": "AutoConfirmManagerHandleOffline",
294
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
295
+ },
296
+ {
297
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
298
+ "storage": None,
299
+ "setting": "DefaultOnAddedOfflineLinksAction",
300
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
301
+ },
249
302
  ]
250
303
 
251
304
  for setting in settings_to_enforce:
@@ -293,7 +346,7 @@ def set_device_settings():
293
346
  'filenameFilter': {
294
347
  'enabled': True,
295
348
  'matchType': 'CONTAINS',
296
- 'regex': '.*\\.(sfv|jpe?g|idx|sub|srt|nfo|bat|txt|exe)',
349
+ 'regex': '.*\\.(sfv|jpe?g|idx|srt|nfo|bat|txt|exe)',
297
350
  'useRegex': True
298
351
  },
299
352
  'filesizeFilter': {'enabled': False, 'from': 0, 'matchType': 'BETWEEN', 'to': 0},
@@ -401,12 +454,39 @@ def convert_to_mb(item):
401
454
  return int(size_mb)
402
455
 
403
456
 
457
+ def sanitize_title(title: str) -> str:
458
+ umlaut_map = {
459
+ "Ä": "Ae", "ä": "ae",
460
+ "Ö": "Oe", "ö": "oe",
461
+ "Ü": "Ue", "ü": "ue",
462
+ "ß": "ss"
463
+ }
464
+ for umlaut, replacement in umlaut_map.items():
465
+ title = title.replace(umlaut, replacement)
466
+
467
+ title = title.encode("ascii", errors="ignore").decode()
468
+
469
+ # Replace slashes and spaces with dots
470
+ title = title.replace("/", "").replace(" ", ".")
471
+ title = title.strip(".") # no leading/trailing dots
472
+ title = title.replace(".-.", "-") # .-. → -
473
+
474
+ # Finally, drop any chars except letters, digits, dots, hyphens, ampersands
475
+ title = re.sub(r"[^A-Za-z0-9.\-&]", "", title)
476
+
477
+ # remove any repeated dots
478
+ title = re.sub(r"\.{2,}", ".", title)
479
+ return title
480
+
481
+
404
482
  def sanitize_string(s):
405
483
  s = s.lower()
406
484
 
407
485
  # Remove dots / pluses
408
486
  s = s.replace('.', ' ')
409
487
  s = s.replace('+', ' ')
488
+ s = s.replace('_', ' ')
489
+ s = s.replace('-', ' ')
410
490
 
411
491
  # Umlauts
412
492
  s = re.sub(r'ä', 'ae', s)
@@ -433,13 +513,6 @@ def sanitize_string(s):
433
513
  return s
434
514
 
435
515
 
436
- def is_imdb_id(search_string):
437
- if bool(re.fullmatch(r"tt\d{7,}", search_string)):
438
- return search_string
439
- else:
440
- return None
441
-
442
-
443
516
  def search_string_in_sanitized_title(search_string, title):
444
517
  sanitized_search_string = sanitize_string(search_string)
445
518
  sanitized_title = sanitize_string(title)
@@ -453,12 +526,284 @@ def search_string_in_sanitized_title(search_string, title):
453
526
  return False
454
527
 
455
528
 
529
+ def is_imdb_id(search_string):
530
+ if bool(re.fullmatch(r"tt\d{7,}", search_string)):
531
+ return search_string
532
+ else:
533
+ return None
534
+
535
+
536
+ def match_in_title(title: str, season: int = None, episode: int = None) -> bool:
537
+ # ensure season/episode are ints (or None)
538
+ if isinstance(season, str):
539
+ try:
540
+ season = int(season)
541
+ except ValueError:
542
+ season = None
543
+ if isinstance(episode, str):
544
+ try:
545
+ episode = int(episode)
546
+ except ValueError:
547
+ episode = None
548
+
549
+ pattern = re.compile(
550
+ r"(?i)(?:\.|^)[sS](\d+)(?:-(\d+))?" # season or season‑range
551
+ r"(?:[eE](\d+)(?:-(?:[eE]?)(\d+))?)?" # episode or episode‑range
552
+ r"(?=[\.-]|$)"
553
+ )
554
+
555
+ matches = pattern.findall(title)
556
+ if not matches:
557
+ return False
558
+
559
+ for s_start, s_end, e_start, e_end in matches:
560
+ se_start, se_end = int(s_start), int(s_end or s_start)
561
+
562
+ # if a season was requested, ensure it falls in the range
563
+ if season is not None and not (se_start <= season <= se_end):
564
+ continue
565
+
566
+ # if no episode requested, only accept if the title itself had no episode tag
567
+ if episode is None:
568
+ if not e_start:
569
+ return True
570
+ else:
571
+ # title did specify an episode — skip this match
572
+ continue
573
+
574
+ # episode was requested, so title must supply one
575
+ if not e_start:
576
+ continue
577
+
578
+ ep_start, ep_end = int(e_start), int(e_end or e_start)
579
+ if ep_start <= episode <= ep_end:
580
+ return True
581
+
582
+ return False
583
+
584
+
585
+ def is_valid_release(title: str,
586
+ request_from: str,
587
+ search_string: str,
588
+ season: int = None,
589
+ episode: int = None) -> bool:
590
+ """
591
+ Return True if the given release title is valid for the given search parameters.
592
+ - title: the release title to test
593
+ - request_from: user agent, contains 'Radarr' for movie searches or 'Sonarr' for TV searches
594
+ - search_string: the original search phrase (could be an IMDb id or plain text)
595
+ - season: desired season number (or None)
596
+ - episode: desired episode number (or None)
597
+ """
598
+ try:
599
+ # Determine whether this is a movie or TV search
600
+ rf = request_from.lower()
601
+ is_movie_search = 'radarr' in rf
602
+ is_tv_search = 'sonarr' in rf
603
+ is_docs_search = 'lazylibrarian' in rf
604
+
605
+ # if search string is NOT an imdb id check search_string_in_sanitized_title - if not match, its not valid
606
+ if not is_docs_search and not is_imdb_id(search_string):
607
+ if not search_string_in_sanitized_title(search_string, title):
608
+ debug(f"Skipping {title!r} as it doesn't match sanitized search string: {search_string!r}")
609
+ return False
610
+
611
+
612
+ # if it's a movie search, don't allow any TV show titles (check for NO season or episode tags in the title)
613
+ if is_movie_search:
614
+ if not MOVIE_REGEX.match(title):
615
+ debug(f"Skipping {title!r} as title doesn't match movie regex: {MOVIE_REGEX.pattern}")
616
+ return False
617
+ return True
618
+
619
+ # if it's a TV show search, don't allow any movies (check for season or episode tags in the title)
620
+ if is_tv_search:
621
+ # must have some S/E tag present
622
+ if not SEASON_EP_REGEX.search(title):
623
+ debug(f"Skipping {title!r} as title doesn't match TV show regex: {SEASON_EP_REGEX.pattern}")
624
+ return False
625
+ # if caller specified a season or episode, double‑check the match
626
+ if season is not None or episode is not None:
627
+ if not match_in_title(title, season, episode):
628
+ debug(f"Skipping {title!r} as it doesn't match season {season} and episode {episode}")
629
+ return False
630
+ return True
631
+
632
+ # if it's a document search, it should not contain Movie or TV show tags
633
+ if is_docs_search:
634
+ # must NOT have any S/E tag present
635
+ if SEASON_EP_REGEX.search(title):
636
+ debug(f"Skipping {title!r} as title matches TV show regex: {SEASON_EP_REGEX.pattern}")
637
+ return False
638
+ return True
639
+
640
+ # unknown search source — reject by default
641
+ debug(f"Skipping {title!r} as search source is unknown: {request_from!r}")
642
+ return False
643
+
644
+ except Exception as e:
645
+ # log exception message and short stack trace
646
+ tb = traceback.format_exc()
647
+ debug(f"Exception in is_valid_release: {e!r}\n{tb}"
648
+ f"is_valid_release called with "
649
+ f"title={title!r}, request_from={request_from!r}, "
650
+ f"search_string={search_string!r}, season={season!r}, episode={episode!r}")
651
+ return False
652
+
653
+
654
+ def normalize_magazine_title(title: str) -> str:
655
+ """
656
+ Massage magazine titles so LazyLibrarian's parser can pick up dates reliably:
657
+ - Convert date-like patterns into space-delimited numeric tokens (YYYY MM DD or YYYY MM).
658
+ - Handle malformed "DD.YYYY.YYYY" cases (e.g., 04.2006.2025 → 2025 06 04).
659
+ - Convert two-part month-year like "3.25" into YYYY MM.
660
+ - Convert "No/Nr/Sonderheft X.YYYY" when X≤12 into YYYY MM.
661
+ - Preserve pure issue/volume prefixes and other digit runs untouched.
662
+ """
663
+ title = title.strip()
664
+
665
+ # 0) Bug: DD.YYYY.YYYY -> treat second YYYY's last two digits as month
666
+ def repl_bug(match):
667
+ d = int(match.group(1))
668
+ m_hint = match.group(2)
669
+ y = int(match.group(3))
670
+ m = int(m_hint[-2:])
671
+ try:
672
+ date(y, m, d)
673
+ return f"{y:04d} {m:02d} {d:02d}"
674
+ except ValueError:
675
+ return match.group(0)
676
+
677
+ title = re.sub(r"\b(\d{1,2})\.(20\d{2})\.(20\d{2})\b", repl_bug, title)
678
+
679
+ # 1) DD.MM.YYYY -> "YYYY MM DD"
680
+ def repl_dmy(match):
681
+ d, m, y = map(int, match.groups())
682
+ try:
683
+ date(y, m, d)
684
+ return f"{y:04d} {m:02d} {d:02d}"
685
+ except ValueError:
686
+ return match.group(0)
687
+
688
+ title = re.sub(r"\b(\d{1,2})\.(\d{1,2})\.(\d{4})\b", repl_dmy, title)
689
+
690
+ # 2) DD[.]? MonthName YYYY (optional 'vom') -> "YYYY MM DD"
691
+ def repl_dmony(match):
692
+ d = int(match.group(1))
693
+ name = match.group(2)
694
+ y = int(match.group(3))
695
+ mm = _month_num(name)
696
+ if mm:
697
+ try:
698
+ date(y, mm, d)
699
+ return f"{y:04d} {mm:02d} {d:02d}"
700
+ except ValueError:
701
+ pass
702
+ return match.group(0)
703
+
704
+ title = re.sub(
705
+ r"\b(?:vom\s*)?(\d{1,2})\.?\s+([A-Za-zÄÖÜäöüß]+)\s+(\d{4})\b",
706
+ repl_dmony,
707
+ title,
708
+ flags=re.IGNORECASE
709
+ )
710
+
711
+ # 3) MonthName YYYY -> "YYYY MM"
712
+ def repl_mony(match):
713
+ name = match.group(1)
714
+ y = int(match.group(2))
715
+ mm = _month_num(name)
716
+ if mm:
717
+ try:
718
+ date(y, mm, 1)
719
+ return f"{y:04d} {mm:02d}"
720
+ except ValueError:
721
+ pass
722
+ return match.group(0)
723
+
724
+ title = re.sub(r"\b([A-Za-zÄÖÜäöüß]+)\s+(\d{4})\b", repl_mony, title, flags=re.IGNORECASE)
725
+
726
+ # 4) YYYYMMDD -> "YYYY MM DD"
727
+ def repl_ymd(match):
728
+ y = int(match.group(1))
729
+ m = int(match.group(2))
730
+ d = int(match.group(3))
731
+ try:
732
+ date(y, m, d)
733
+ return f"{y:04d} {m:02d} {d:02d}"
734
+ except ValueError:
735
+ return match.group(0)
736
+
737
+ title = re.sub(r"\b(20\d{2})(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])\b", repl_ymd, title)
738
+
739
+ # 5) YYYYMM -> "YYYY MM"
740
+ def repl_ym(match):
741
+ y = int(match.group(1))
742
+ m = int(match.group(2))
743
+ try:
744
+ date(y, m, 1)
745
+ return f"{y:04d} {m:02d}"
746
+ except ValueError:
747
+ return match.group(0)
748
+
749
+ title = re.sub(r"\b(20\d{2})(0[1-9]|1[0-2])\b", repl_ym, title)
750
+
751
+ # 6) X.YY (month.two-digit-year) -> "YYYY MM" (e.g., 3.25 -> 2025 03)
752
+ def repl_my2(match):
753
+ mm = int(match.group(1))
754
+ yy = int(match.group(2))
755
+ y = 2000 + yy
756
+ if 1 <= mm <= 12:
757
+ try:
758
+ date(y, mm, 1)
759
+ return f"{y:04d} {mm:02d}"
760
+ except ValueError:
761
+ pass
762
+ return match.group(0)
763
+
764
+ title = re.sub(r"\b([1-9]|1[0-2])\.(\d{2})\b", repl_my2, title)
765
+
766
+ # 7) No/Nr/Sonderheft <1-12>.<YYYY> -> "YYYY MM"
767
+ def repl_nmy(match):
768
+ num = int(match.group(1))
769
+ y = int(match.group(2))
770
+ if 1 <= num <= 12:
771
+ try:
772
+ date(y, num, 1)
773
+ return f"{y:04d} {num:02d}"
774
+ except ValueError:
775
+ pass
776
+ return match.group(0)
777
+
778
+ title = re.sub(
779
+ r"\b(?:No|Nr|Sonderheft)\s*(\d{1,2})\.(\d{4})\b",
780
+ repl_nmy,
781
+ title,
782
+ flags=re.IGNORECASE
783
+ )
784
+
785
+ return title
786
+
787
+
788
+ # Helper for month name mapping
789
+ def _month_num(name: str) -> int:
790
+ name = name.lower()
791
+ mmap = {
792
+ 'januar': 1, 'jan': 1, 'februar': 2, 'feb': 2, 'märz': 3, 'maerz': 3, 'mär': 3, 'mrz': 3, 'mae': 3,
793
+ 'april': 4, 'apr': 4, 'mai': 5, 'juni': 6, 'jun': 6, 'juli': 7, 'jul': 7, 'august': 8, 'aug': 8,
794
+ 'september': 9, 'sep': 9, 'oktober': 10, 'okt': 10, 'november': 11, 'nov': 11, 'dezember': 12, 'dez': 12,
795
+ 'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6, 'july': 7, 'august': 8,
796
+ 'september': 9, 'october': 10, 'november': 11, 'december': 12
797
+ }
798
+ return mmap.get(name)
799
+
800
+
456
801
  def get_recently_searched(shared_state, context, timeout_seconds):
457
802
  recently_searched = shared_state.values.get(context, {})
458
803
  threshold = datetime.now() - timedelta(seconds=timeout_seconds)
459
804
  keys_to_remove = [key for key, value in recently_searched.items() if value["timestamp"] <= threshold]
460
805
  for key in keys_to_remove:
461
- debug(f"Removing '/{key}' from recently searched memory ({context})...")
806
+ debug(f"Removing '{key}' from recently searched memory ({context})...")
462
807
  del recently_searched[key]
463
808
  return recently_searched
464
809
 
@@ -467,7 +812,7 @@ def download_package(links, title, password, package_id):
467
812
  device = get_device()
468
813
  downloaded = device.linkgrabber.add_links(params=[
469
814
  {
470
- "autostart": True,
815
+ "autostart": False,
471
816
  "links": json.dumps(links),
472
817
  "packageName": title,
473
818
  "extractPassword": password,
@@ -0,0 +1,154 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ from typing import Dict, Any
6
+
7
+
8
+ class StatsHelper:
9
+ """
10
+ Multiprocessing-safe stats helper using separate rows.
11
+ Uses shared_state for database access across processes.
12
+ """
13
+
14
+ def __init__(self, shared_state):
15
+ self.shared_state = shared_state
16
+ self._ensure_stats_exist()
17
+
18
+ def _get_db(self):
19
+ """Get database interface through shared_state"""
20
+ return self.shared_state.values["database"]("statistics")
21
+
22
+ def _ensure_stats_exist(self):
23
+ """Initialize stats if they don't exist"""
24
+ default_stats = {
25
+ "packages_downloaded": 0,
26
+ "links_processed": 0,
27
+ "captcha_decryptions_automatic": 0,
28
+ "captcha_decryptions_manual": 0,
29
+ "failed_downloads": 0,
30
+ "failed_decryptions_automatic": 0,
31
+ "failed_decryptions_manual": 0
32
+ }
33
+
34
+ db = self._get_db()
35
+ for key, default_value in default_stats.items():
36
+ if db.retrieve(key) is None:
37
+ db.store(key, str(default_value))
38
+
39
+ def _get_stat(self, key: str, default: int = 0) -> int:
40
+ """Get a single stat value"""
41
+ try:
42
+ db = self._get_db()
43
+ value = db.retrieve(key)
44
+ return int(value) if value is not None else default
45
+ except (ValueError, TypeError):
46
+ return default
47
+
48
+ def _increment_stat(self, key: str, count: int = 1):
49
+ """Process-safe increment of a single stat"""
50
+ db = self._get_db()
51
+ current = self._get_stat(key, 0)
52
+ db.update_store(key, str(current + count))
53
+
54
+ def increment_package_with_links(self, links):
55
+ """Increment package downloaded and links processed for one package, or failed download if no links
56
+
57
+ Args:
58
+ links: Can be:
59
+ - list/array: counts the length
60
+ - int: uses the value directly
61
+ - None/False/empty: treats as failed download
62
+ """
63
+ # Handle different input types
64
+ if links is None or links is False:
65
+ link_count = 0
66
+ elif isinstance(links, (list, tuple)):
67
+ link_count = len(links)
68
+ elif isinstance(links, int):
69
+ link_count = links
70
+ else:
71
+ # Handle other falsy values or unexpected types
72
+ try:
73
+ link_count = int(links) if links else 0
74
+ except (ValueError, TypeError):
75
+ link_count = 0
76
+
77
+ # Now handle the actual increment logic
78
+ if link_count == 0:
79
+ self._increment_stat("failed_downloads", 1)
80
+ else:
81
+ self._increment_stat("packages_downloaded", 1)
82
+ self._increment_stat("links_processed", link_count)
83
+
84
+ def increment_captcha_decryptions_automatic(self):
85
+ """Increment automatic captcha decryptions counter"""
86
+ self._increment_stat("captcha_decryptions_automatic", 1)
87
+
88
+ def increment_captcha_decryptions_manual(self):
89
+ """Increment manual captcha decryptions counter"""
90
+ self._increment_stat("captcha_decryptions_manual", 1)
91
+
92
+ def increment_failed_downloads(self):
93
+ """Increment failed downloads counter"""
94
+ self._increment_stat("failed_downloads", 1)
95
+
96
+ def increment_failed_decryptions_automatic(self):
97
+ """Increment failed automatic decryptions counter"""
98
+ self._increment_stat("failed_decryptions_automatic", 1)
99
+
100
+ def increment_failed_decryptions_manual(self):
101
+ """Increment failed manual decryptions counter"""
102
+ self._increment_stat("failed_decryptions_manual", 1)
103
+
104
+ def get_stats(self) -> Dict[str, Any]:
105
+ """Get all current statistics"""
106
+ stats = {
107
+ "packages_downloaded": self._get_stat("packages_downloaded", 0),
108
+ "links_processed": self._get_stat("links_processed", 0),
109
+ "captcha_decryptions_automatic": self._get_stat("captcha_decryptions_automatic", 0),
110
+ "captcha_decryptions_manual": self._get_stat("captcha_decryptions_manual", 0),
111
+ "failed_downloads": self._get_stat("failed_downloads", 0),
112
+ "failed_decryptions_automatic": self._get_stat("failed_decryptions_automatic", 0),
113
+ "failed_decryptions_manual": self._get_stat("failed_decryptions_manual", 0)
114
+ }
115
+
116
+ # Calculate totals and rates
117
+ total_captcha_decryptions = stats["captcha_decryptions_automatic"] + stats["captcha_decryptions_manual"]
118
+ total_failed_decryptions = stats["failed_decryptions_automatic"] + stats["failed_decryptions_manual"]
119
+ total_download_attempts = stats["packages_downloaded"] + stats["failed_downloads"]
120
+ total_decryption_attempts = total_captcha_decryptions + total_failed_decryptions
121
+ total_automatic_attempts = stats["captcha_decryptions_automatic"] + stats["failed_decryptions_automatic"]
122
+ total_manual_attempts = stats["captcha_decryptions_manual"] + stats["failed_decryptions_manual"]
123
+
124
+ # Add calculated fields
125
+ stats.update({
126
+ "total_captcha_decryptions": total_captcha_decryptions,
127
+ "total_failed_decryptions": total_failed_decryptions,
128
+ "total_download_attempts": total_download_attempts,
129
+ "total_decryption_attempts": total_decryption_attempts,
130
+ "total_automatic_attempts": total_automatic_attempts,
131
+ "total_manual_attempts": total_manual_attempts,
132
+ "download_success_rate": (
133
+ (stats["packages_downloaded"] / total_download_attempts * 100)
134
+ if total_download_attempts > 0 else 0
135
+ ),
136
+ "decryption_success_rate": (
137
+ (total_captcha_decryptions / total_decryption_attempts * 100)
138
+ if total_decryption_attempts > 0 else 0
139
+ ),
140
+ "automatic_decryption_success_rate": (
141
+ (stats["captcha_decryptions_automatic"] / total_automatic_attempts * 100)
142
+ if total_automatic_attempts > 0 else 0
143
+ ),
144
+ "manual_decryption_success_rate": (
145
+ (stats["captcha_decryptions_manual"] / total_manual_attempts * 100)
146
+ if total_manual_attempts > 0 else 0
147
+ ),
148
+ "average_links_per_package": (
149
+ stats["links_processed"] / stats["packages_downloaded"]
150
+ if stats["packages_downloaded"] > 0 else 0
151
+ )
152
+ })
153
+
154
+ return stats