quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -2,16 +2,43 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
+ import json
5
6
  import os
7
+ import re
6
8
  import time
9
+ import traceback
10
+ from datetime import datetime, timedelta, date
11
+ from urllib import parse
7
12
 
13
+ import quasarr
14
+ from quasarr.providers.log import info, debug
15
+ from quasarr.providers.myjd_api import Myjdapi, TokenExpiredException, RequestTimeoutException, MYJDException, Jddevice
8
16
  from quasarr.storage.config import Config
9
17
  from quasarr.storage.sqlite_database import DataBase
10
- from quasarr.providers.myjd_api import Myjdapi, TokenExpiredException, RequestTimeoutException, MYJDException, Jddevice
11
18
 
12
19
  values = {}
13
20
  lock = None
14
21
 
22
+ # regex to detect season/episode tags for series filtering during search
23
+ SEASON_EP_REGEX = re.compile(r"(?i)(?:S\d{1,3}(?:E\d{1,3}(?:-\d{1,3})?)?|S\d{1,3}-\d{1,3})")
24
+ # regex to filter out season/episode tags for movies
25
+ MOVIE_REGEX = re.compile(r"^(?!.*(?:S\d{1,3}(?:E\d{1,3}(?:-\d{1,3})?)?|S\d{1,3}-\d{1,3})).*$", re.IGNORECASE)
26
+ # List of known file hosters that should not be used as search/feed sites
27
+ SHARE_HOSTERS = {
28
+ "rapidgator",
29
+ "ddownload",
30
+ "keep2share",
31
+ "1fichier",
32
+ "katfile",
33
+ "filer",
34
+ "turbobit",
35
+ "nitroflare",
36
+ "filefactory",
37
+ "uptobox",
38
+ "mediafire",
39
+ "mega",
40
+ }
41
+
15
42
 
16
43
  def set_state(manager_dict, manager_lock):
17
44
  global values
@@ -30,10 +57,6 @@ def update(key, value):
30
57
  lock.release()
31
58
 
32
59
 
33
- def set_sites():
34
- update("sites", ["DW", "FX", "NX"])
35
-
36
-
37
60
  def set_connection_info(internal_address, external_address, port):
38
61
  if internal_address.count(":") < 2:
39
62
  internal_address = f"{internal_address}:{port}"
@@ -47,13 +70,54 @@ def set_files(config_path):
47
70
  update("dbfile", os.path.join(config_path, "Quasarr.db"))
48
71
 
49
72
 
73
+ def generate_api_key():
74
+ api_key = os.urandom(32).hex()
75
+ Config('API').save("key", api_key)
76
+ info(f'API key replaced with: "{api_key}!"')
77
+ return api_key
78
+
79
+
80
+ def extract_valid_hostname(url, shorthand):
81
+ try:
82
+ if '://' not in url:
83
+ url = 'http://' + url
84
+ result = parse.urlparse(url)
85
+ domain = result.netloc
86
+ parts = domain.split('.')
87
+
88
+ if domain.startswith(".") or domain.endswith(".") or "." not in domain[1:-1]:
89
+ message = f'Error: "{domain}" must contain a "." somewhere in the middle – you need to provide a full domain name!'
90
+ domain = None
91
+
92
+ elif any(hoster in parts for hoster in SHARE_HOSTERS):
93
+ offending = next(host for host in parts if host in SHARE_HOSTERS)
94
+ message = (
95
+ f'Error: "{domain}" is a file‑hosting domain and cannot be used here directly! '
96
+ f'Instead please provide a valid hostname that serves direct file links (including "{offending}").'
97
+ )
98
+ domain = None
99
+
100
+ elif all(char in domain for char in shorthand):
101
+ message = f'"{domain}" contains both characters from shorthand "{shorthand}". Continuing...'
102
+
103
+ else:
104
+ message = f'Error: "{domain}" does not contain both characters from shorthand "{shorthand}".'
105
+ domain = None
106
+ except Exception as e:
107
+ message = f"Error: {e}. Please provide a valid URL."
108
+ domain = None
109
+
110
+ print(message)
111
+ return {"domain": domain, "message": message}
112
+
113
+
50
114
  def connect_to_jd(jd, user, password, device_name):
51
115
  try:
52
116
  jd.connect(user, password)
53
117
  jd.update_devices()
54
118
  device = jd.get_device(device_name)
55
119
  except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
56
- print("Error connecting to JDownloader: " + str(e))
120
+ info("Error connecting to JDownloader: " + str(e).strip())
57
121
  return False
58
122
  if not device or not isinstance(device, (type, Jddevice)):
59
123
  return False
@@ -61,9 +125,9 @@ def connect_to_jd(jd, user, password, device_name):
61
125
  device.downloadcontroller.get_current_state() # request forces direct_connection info update
62
126
  connection_info = device.check_direct_connection()
63
127
  if connection_info["status"]:
64
- print(f'Direct connection to JDownloader established: "{connection_info['ip']}"')
128
+ info(f'Direct connection to JDownloader established: "{connection_info['ip']}"')
65
129
  else:
66
- print("Could not establish direct connection to JDownloader.")
130
+ info("Could not establish direct connection to JDownloader.")
67
131
  update("device", device)
68
132
  return True
69
133
 
@@ -124,6 +188,7 @@ def connect_device():
124
188
 
125
189
  def get_device():
126
190
  attempts = 0
191
+ last_backoff_change = 0 # Track when we last changed backoff strategy
127
192
 
128
193
  while True:
129
194
  try:
@@ -135,14 +200,30 @@ def get_device():
135
200
 
136
201
  update("device", False)
137
202
 
138
- if attempts % 10 == 0:
139
- print(
140
- f"WARNUNG: {attempts} aufeinanderfolgende JDownloader Verbindungsfehler. Bitte prüfen und ggf. neu starten!")
141
- time.sleep(3)
203
+ # Determine sleep time based on failure count
204
+ if attempts <= 10:
205
+ # First 10 failures: 3 seconds
206
+ sleep_time = 3
207
+ if attempts == 10:
208
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Switching to 1-minute intervals.")
209
+ elif attempts <= 15:
210
+ # Next 5 failures (11-15): 1 minute
211
+ sleep_time = 60
212
+ if attempts % 10 == 0:
213
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Please check your credentials!")
214
+ if attempts == 15:
215
+ info(f"WARNING: Still failing after {attempts} attempts. Switching to 5-minute intervals.")
216
+ else:
217
+ # After 15 failures: 5 minutes
218
+ sleep_time = 300
219
+ if attempts % 10 == 0:
220
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Please check your credentials!")
142
221
 
143
222
  if connect_device():
144
223
  break
145
224
 
225
+ time.sleep(sleep_time)
226
+
146
227
  return values["device"]
147
228
 
148
229
 
@@ -155,10 +236,216 @@ def get_devices(user, password):
155
236
  devices = jd.list_devices()
156
237
  return devices
157
238
  except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
158
- print("Error connecting to JDownloader: " + str(e))
239
+ info("Error connecting to JDownloader: " + str(e))
159
240
  return []
160
241
 
161
242
 
243
+ def set_device_settings():
244
+ device = get_device()
245
+
246
+ settings_to_enforce = [
247
+ {
248
+ "namespace": "org.jdownloader.settings.GeneralSettings",
249
+ "storage": None,
250
+ "setting": "AutoStartDownloadOption",
251
+ "expected_value": "ALWAYS", # Downloads must start automatically for Quasarr to work
252
+ },
253
+ {
254
+ "namespace": "org.jdownloader.settings.GeneralSettings",
255
+ "storage": None,
256
+ "setting": "IfFileExistsAction",
257
+ "expected_value": "SKIP_FILE", # Prevents popups during download
258
+ },
259
+ {
260
+ "namespace": "org.jdownloader.settings.GeneralSettings",
261
+ "storage": None,
262
+ "setting": "CleanupAfterDownloadAction",
263
+ "expected_value": "NEVER", # Links must be kept after download for Quasarr to work
264
+ },
265
+ {
266
+ "namespace": "org.jdownloader.settings.GraphicalUserInterfaceSettings",
267
+ "storage": None,
268
+ "setting": "BannerEnabled",
269
+ "expected_value": False, # Removes UI clutter in JDownloader
270
+ },
271
+ {
272
+ "namespace": "org.jdownloader.settings.GraphicalUserInterfaceSettings",
273
+ "storage": None,
274
+ "setting": "DonateButtonState",
275
+ "expected_value": "CUSTOM_HIDDEN", # Removes UI clutter in JDownloader
276
+ },
277
+ {
278
+ "namespace": "org.jdownloader.extensions.extraction.ExtractionConfig",
279
+ "storage": "cfg/org.jdownloader.extensions.extraction.ExtractionExtension",
280
+ "setting": "DeleteArchiveFilesAfterExtractionAction",
281
+ "expected_value": "NULL", # "NULL" is the ENUM for "Delete files from Harddisk"
282
+ },
283
+ {
284
+ "namespace": "org.jdownloader.extensions.extraction.ExtractionConfig",
285
+ "storage": "cfg/org.jdownloader.extensions.extraction.ExtractionExtension",
286
+ "setting": "IfFileExistsAction",
287
+ "expected_value": "OVERWRITE_FILE", # Prevents popups during extraction
288
+ },
289
+ {
290
+ "namespace": "org.jdownloader.extensions.extraction.ExtractionConfig",
291
+ "storage": "cfg/org.jdownloader.extensions.extraction.ExtractionExtension",
292
+ "setting": "DeleteArchiveDownloadlinksAfterExtraction",
293
+ "expected_value": False, # Links must be kept after extraction for Quasarr to work
294
+ },
295
+ {
296
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
297
+ "storage": None,
298
+ "setting": "OfflinePackageEnabled",
299
+ "expected_value": False, # Don't move offline links to extra package
300
+ },
301
+ {
302
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
303
+ "storage": None,
304
+ "setting": "HandleOfflineOnConfirmLatestSelection",
305
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
306
+ },
307
+ {
308
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
309
+ "storage": None,
310
+ "setting": "AutoConfirmManagerHandleOffline",
311
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
312
+ },
313
+ {
314
+ "namespace": "org.jdownloader.gui.views.linkgrabber.addlinksdialog.LinkgrabberSettings",
315
+ "storage": None,
316
+ "setting": "DefaultOnAddedOfflineLinksAction",
317
+ "expected_value": "INCLUDE_OFFLINE", # Offline links must always be kept for Quasarr to handle packages
318
+ },
319
+ ]
320
+
321
+ for setting in settings_to_enforce:
322
+ namespace = setting["namespace"]
323
+ storage = setting["storage"] or "null"
324
+ name = setting["setting"]
325
+ expected_value = setting["expected_value"]
326
+
327
+ settings = device.config.get(namespace, storage, name)
328
+
329
+ if settings != expected_value:
330
+ success = device.config.set(namespace, storage, name, expected_value)
331
+
332
+ location = f"{namespace}/{storage}" if storage != "null" else namespace
333
+ status = "Updated" if success else "Failed to update"
334
+ info(f'{status} "{name}" in "{location}" to "{expected_value}".')
335
+
336
+ settings_to_add = [
337
+ {
338
+ "namespace": "org.jdownloader.extensions.extraction.ExtractionConfig",
339
+ "storage": "cfg/org.jdownloader.extensions.extraction.ExtractionExtension",
340
+ "setting": "BlacklistPatterns",
341
+ "expected_values": [
342
+ '.*sample/.*',
343
+ '.*Sample/.*',
344
+ '.*\\.jpe?g',
345
+ '.*\\.idx',
346
+ '.*\\.sub',
347
+ '.*\\.srt',
348
+ '.*\\.nfo',
349
+ '.*\\.bat',
350
+ '.*\\.txt',
351
+ '.*\\.exe',
352
+ '.*\\.sfv'
353
+ ]
354
+ },
355
+ {
356
+ "namespace": "org.jdownloader.controlling.filter.LinkFilterSettings",
357
+ "storage": "null",
358
+ "setting": "FilterList",
359
+ "expected_values": [
360
+ {'conditionFilter':
361
+ {'conditions': [], 'enabled': False, 'matchType': 'IS_TRUE'}, 'created': 0,
362
+ 'enabled': True,
363
+ 'filenameFilter': {
364
+ 'enabled': True,
365
+ 'matchType': 'CONTAINS',
366
+ 'regex': '.*\\.(sfv|jpe?g|idx|srt|nfo|bat|txt|exe)',
367
+ 'useRegex': True
368
+ },
369
+ 'filesizeFilter': {'enabled': False, 'from': 0, 'matchType': 'BETWEEN', 'to': 0},
370
+ 'filetypeFilter': {'archivesEnabled': False, 'audioFilesEnabled': False, 'customs': None,
371
+ 'docFilesEnabled': False, 'enabled': False, 'exeFilesEnabled': False,
372
+ 'hashEnabled': False, 'imagesEnabled': False, 'matchType': 'IS',
373
+ 'subFilesEnabled': False, 'useRegex': False, 'videoFilesEnabled': False},
374
+ 'hosterURLFilter': {'enabled': False, 'matchType': 'CONTAINS', 'regex': '', 'useRegex': False},
375
+ 'matchAlwaysFilter': {'enabled': False}, 'name': 'Quasarr_Block_Files',
376
+ 'onlineStatusFilter': {'enabled': False, 'matchType': 'IS', 'onlineStatus': 'OFFLINE'},
377
+ 'originFilter': {'enabled': False, 'matchType': 'IS', 'origins': []},
378
+ 'packagenameFilter': {'enabled': False, 'matchType': 'CONTAINS', 'regex': '', 'useRegex': False},
379
+ 'pluginStatusFilter': {'enabled': False, 'matchType': 'IS', 'pluginStatus': 'PREMIUM'},
380
+ 'sourceURLFilter': {'enabled': False, 'matchType': 'CONTAINS', 'regex': '', 'useRegex': False},
381
+ 'testUrl': ''}]
382
+ },
383
+ ]
384
+
385
+ for setting in settings_to_add:
386
+ namespace = setting["namespace"]
387
+ storage = setting["storage"] or "null"
388
+ name = setting["setting"]
389
+ expected_values = setting["expected_values"]
390
+
391
+ added_items = 0
392
+ settings = device.config.get(namespace, storage, name)
393
+ for item in expected_values:
394
+ if item not in settings:
395
+ settings.append(item)
396
+ added_items += 1
397
+
398
+ if added_items:
399
+ success = device.config.set(namespace, storage, name, json.dumps(settings))
400
+
401
+ location = f"{namespace}/{storage}" if storage != "null" else namespace
402
+ status = "Added" if success else "Failed to add"
403
+ info(f'{status} {added_items} items to "{name}" in "{location}".')
404
+
405
+
406
+ def update_jdownloader():
407
+ try:
408
+ if not get_device():
409
+ set_device_from_config()
410
+ device = get_device()
411
+
412
+ if device:
413
+ try:
414
+ current_state = device.downloadcontroller.get_current_state()
415
+ is_collecting = device.linkgrabber.is_collecting()
416
+ update_available = device.update.update_available()
417
+
418
+ if (current_state.lower() == "idle") and (not is_collecting and update_available):
419
+ info("JDownloader update ready. Starting update...")
420
+ device.update.restart_and_update()
421
+ except quasarr.providers.myjd_api.TokenExpiredException:
422
+ return False
423
+ return True
424
+ else:
425
+ return False
426
+ except quasarr.providers.myjd_api.MYJDException as e:
427
+ info(f"Error updating JDownloader: {e}")
428
+ return False
429
+
430
+
431
+ def start_downloads():
432
+ try:
433
+ if not get_device():
434
+ set_device_from_config()
435
+ device = get_device()
436
+
437
+ if device:
438
+ try:
439
+ return device.downloadcontroller.start_downloads()
440
+ except quasarr.providers.myjd_api.TokenExpiredException:
441
+ return False
442
+ else:
443
+ return False
444
+ except quasarr.providers.myjd_api.MYJDException as e:
445
+ info(f"Error starting Downloads: {e}")
446
+ return False
447
+
448
+
162
449
  def get_db(table):
163
450
  return DataBase(table)
164
451
 
@@ -184,12 +471,365 @@ def convert_to_mb(item):
184
471
  return int(size_mb)
185
472
 
186
473
 
474
+ def sanitize_title(title: str) -> str:
475
+ umlaut_map = {
476
+ "Ä": "Ae", "ä": "ae",
477
+ "Ö": "Oe", "ö": "oe",
478
+ "Ü": "Ue", "ü": "ue",
479
+ "ß": "ss"
480
+ }
481
+ for umlaut, replacement in umlaut_map.items():
482
+ title = title.replace(umlaut, replacement)
483
+
484
+ title = title.encode("ascii", errors="ignore").decode()
485
+
486
+ # Replace slashes and spaces with dots
487
+ title = title.replace("/", "").replace(" ", ".")
488
+ title = title.strip(".") # no leading/trailing dots
489
+ title = title.replace(".-.", "-") # .-. → -
490
+
491
+ # Finally, drop any chars except letters, digits, dots, hyphens, ampersands
492
+ title = re.sub(r"[^A-Za-z0-9.\-&]", "", title)
493
+
494
+ # remove any repeated dots
495
+ title = re.sub(r"\.{2,}", ".", title)
496
+ return title
497
+
498
+
499
+ def sanitize_string(s):
500
+ s = s.lower()
501
+
502
+ # Remove dots / pluses
503
+ s = s.replace('.', ' ')
504
+ s = s.replace('+', ' ')
505
+ s = s.replace('_', ' ')
506
+ s = s.replace('-', ' ')
507
+
508
+ # Umlauts
509
+ s = re.sub(r'ä', 'ae', s)
510
+ s = re.sub(r'ö', 'oe', s)
511
+ s = re.sub(r'ü', 'ue', s)
512
+ s = re.sub(r'ß', 'ss', s)
513
+
514
+ # Remove special characters
515
+ s = re.sub(r'[^a-zA-Z0-9\s]', '', s)
516
+
517
+ # Remove season and episode patterns
518
+ s = re.sub(r'\bs\d{1,3}(e\d{1,3})?\b', '', s)
519
+
520
+ # Remove German and English articles
521
+ articles = r'\b(?:der|die|das|ein|eine|einer|eines|einem|einen|the|a|an|and)\b'
522
+ s = re.sub(articles, '', s, re.IGNORECASE)
523
+
524
+ # Replace obsolete titles
525
+ s = s.replace('navy cis', 'ncis')
526
+
527
+ # Remove extra whitespace
528
+ s = ' '.join(s.split())
529
+
530
+ return s
531
+
532
+
533
+ def search_string_in_sanitized_title(search_string, title):
534
+ sanitized_search_string = sanitize_string(search_string)
535
+ sanitized_title = sanitize_string(title)
536
+
537
+ # Use word boundaries to ensure full word/phrase match
538
+ if re.search(rf'\b{re.escape(sanitized_search_string)}\b', sanitized_title):
539
+ debug(f"Matched search string: {sanitized_search_string} with title: {sanitized_title}")
540
+ return True
541
+ else:
542
+ debug(f"Skipping {title} as it doesn't match search string: {sanitized_search_string}")
543
+ return False
544
+
545
+
546
+ def is_imdb_id(search_string):
547
+ if bool(re.fullmatch(r"tt\d{7,}", search_string)):
548
+ return search_string
549
+ else:
550
+ return None
551
+
552
+
553
+ def match_in_title(title: str, season: int = None, episode: int = None) -> bool:
554
+ # ensure season/episode are ints (or None)
555
+ if isinstance(season, str):
556
+ try:
557
+ season = int(season)
558
+ except ValueError:
559
+ season = None
560
+ if isinstance(episode, str):
561
+ try:
562
+ episode = int(episode)
563
+ except ValueError:
564
+ episode = None
565
+
566
+ pattern = re.compile(
567
+ r"(?i)(?:\.|^)[sS](\d+)(?:-(\d+))?" # season or season‑range
568
+ r"(?:[eE](\d+)(?:-(?:[eE]?)(\d+))?)?" # episode or episode‑range
569
+ r"(?=[\.-]|$)"
570
+ )
571
+
572
+ matches = pattern.findall(title)
573
+ if not matches:
574
+ return False
575
+
576
+ for s_start, s_end, e_start, e_end in matches:
577
+ se_start, se_end = int(s_start), int(s_end or s_start)
578
+
579
+ # if a season was requested, ensure it falls in the range
580
+ if season is not None and not (se_start <= season <= se_end):
581
+ continue
582
+
583
+ # if no episode requested, only accept if the title itself had no episode tag
584
+ if episode is None:
585
+ if not e_start:
586
+ return True
587
+ else:
588
+ # title did specify an episode — skip this match
589
+ continue
590
+
591
+ # episode was requested, so title must supply one
592
+ if not e_start:
593
+ continue
594
+
595
+ ep_start, ep_end = int(e_start), int(e_end or e_start)
596
+ if ep_start <= episode <= ep_end:
597
+ return True
598
+
599
+ return False
600
+
601
+
602
+ def is_valid_release(title: str,
603
+ request_from: str,
604
+ search_string: str,
605
+ season: int = None,
606
+ episode: int = None) -> bool:
607
+ """
608
+ Return True if the given release title is valid for the given search parameters.
609
+ - title: the release title to test
610
+ - request_from: user agent, contains 'Radarr' for movie searches or 'Sonarr' for TV searches
611
+ - search_string: the original search phrase (could be an IMDb id or plain text)
612
+ - season: desired season number (or None)
613
+ - episode: desired episode number (or None)
614
+ """
615
+ try:
616
+ # Determine whether this is a movie or TV search
617
+ rf = request_from.lower()
618
+ is_movie_search = 'radarr' in rf
619
+ is_tv_search = 'sonarr' in rf
620
+ is_docs_search = 'lazylibrarian' in rf
621
+
622
+ # if search string is NOT an imdb id check search_string_in_sanitized_title - if not match, its not valid
623
+ if not is_docs_search and not is_imdb_id(search_string):
624
+ if not search_string_in_sanitized_title(search_string, title):
625
+ debug(f"Skipping {title!r} as it doesn't match sanitized search string: {search_string!r}")
626
+ return False
627
+
628
+ # if it's a movie search, don't allow any TV show titles (check for NO season or episode tags in the title)
629
+ if is_movie_search:
630
+ if not MOVIE_REGEX.match(title):
631
+ debug(f"Skipping {title!r} as title doesn't match movie regex: {MOVIE_REGEX.pattern}")
632
+ return False
633
+ return True
634
+
635
+ # if it's a TV show search, don't allow any movies (check for season or episode tags in the title)
636
+ if is_tv_search:
637
+ # must have some S/E tag present
638
+ if not SEASON_EP_REGEX.search(title):
639
+ debug(f"Skipping {title!r} as title doesn't match TV show regex: {SEASON_EP_REGEX.pattern}")
640
+ return False
641
+ # if caller specified a season or episode, double‑check the match
642
+ if season is not None or episode is not None:
643
+ if not match_in_title(title, season, episode):
644
+ debug(f"Skipping {title!r} as it doesn't match season {season} and episode {episode}")
645
+ return False
646
+ return True
647
+
648
+ # if it's a document search, it should not contain Movie or TV show tags
649
+ if is_docs_search:
650
+ # must NOT have any S/E tag present
651
+ if SEASON_EP_REGEX.search(title):
652
+ debug(f"Skipping {title!r} as title matches TV show regex: {SEASON_EP_REGEX.pattern}")
653
+ return False
654
+ return True
655
+
656
+ # unknown search source — reject by default
657
+ debug(f"Skipping {title!r} as search source is unknown: {request_from!r}")
658
+ return False
659
+
660
+ except Exception as e:
661
+ # log exception message and short stack trace
662
+ tb = traceback.format_exc()
663
+ debug(f"Exception in is_valid_release: {e!r}\n{tb}"
664
+ f"is_valid_release called with "
665
+ f"title={title!r}, request_from={request_from!r}, "
666
+ f"search_string={search_string!r}, season={season!r}, episode={episode!r}")
667
+ return False
668
+
669
+
670
+ def normalize_magazine_title(title: str) -> str:
671
+ """
672
+ Massage magazine titles so LazyLibrarian's parser can pick up dates reliably:
673
+ - Convert date-like patterns into space-delimited numeric tokens (YYYY MM DD or YYYY MM).
674
+ - Handle malformed "DD.YYYY.YYYY" cases (e.g., 04.2006.2025 → 2025 06 04).
675
+ - Convert two-part month-year like "3.25" into YYYY MM.
676
+ - Convert "No/Nr/Sonderheft X.YYYY" when X≤12 into YYYY MM.
677
+ - Preserve pure issue/volume prefixes and other digit runs untouched.
678
+ """
679
+ title = title.strip()
680
+
681
+ # 0) Bug: DD.YYYY.YYYY -> treat second YYYY's last two digits as month
682
+ def repl_bug(match):
683
+ d = int(match.group(1))
684
+ m_hint = match.group(2)
685
+ y = int(match.group(3))
686
+ m = int(m_hint[-2:])
687
+ try:
688
+ date(y, m, d)
689
+ return f"{y:04d} {m:02d} {d:02d}"
690
+ except ValueError:
691
+ return match.group(0)
692
+
693
+ title = re.sub(r"\b(\d{1,2})\.(20\d{2})\.(20\d{2})\b", repl_bug, title)
694
+
695
+ # 1) DD.MM.YYYY -> "YYYY MM DD"
696
+ def repl_dmy(match):
697
+ d, m, y = map(int, match.groups())
698
+ try:
699
+ date(y, m, d)
700
+ return f"{y:04d} {m:02d} {d:02d}"
701
+ except ValueError:
702
+ return match.group(0)
703
+
704
+ title = re.sub(r"\b(\d{1,2})\.(\d{1,2})\.(\d{4})\b", repl_dmy, title)
705
+
706
+ # 2) DD[.]? MonthName YYYY (optional 'vom') -> "YYYY MM DD"
707
+ def repl_dmony(match):
708
+ d = int(match.group(1))
709
+ name = match.group(2)
710
+ y = int(match.group(3))
711
+ mm = _month_num(name)
712
+ if mm:
713
+ try:
714
+ date(y, mm, d)
715
+ return f"{y:04d} {mm:02d} {d:02d}"
716
+ except ValueError:
717
+ pass
718
+ return match.group(0)
719
+
720
+ title = re.sub(
721
+ r"\b(?:vom\s*)?(\d{1,2})\.?\s+([A-Za-zÄÖÜäöüß]+)\s+(\d{4})\b",
722
+ repl_dmony,
723
+ title,
724
+ flags=re.IGNORECASE
725
+ )
726
+
727
+ # 3) MonthName YYYY -> "YYYY MM"
728
+ def repl_mony(match):
729
+ name = match.group(1)
730
+ y = int(match.group(2))
731
+ mm = _month_num(name)
732
+ if mm:
733
+ try:
734
+ date(y, mm, 1)
735
+ return f"{y:04d} {mm:02d}"
736
+ except ValueError:
737
+ pass
738
+ return match.group(0)
739
+
740
+ title = re.sub(r"\b([A-Za-zÄÖÜäöüß]+)\s+(\d{4})\b", repl_mony, title, flags=re.IGNORECASE)
741
+
742
+ # 4) YYYYMMDD -> "YYYY MM DD"
743
+ def repl_ymd(match):
744
+ y = int(match.group(1))
745
+ m = int(match.group(2))
746
+ d = int(match.group(3))
747
+ try:
748
+ date(y, m, d)
749
+ return f"{y:04d} {m:02d} {d:02d}"
750
+ except ValueError:
751
+ return match.group(0)
752
+
753
+ title = re.sub(r"\b(20\d{2})(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])\b", repl_ymd, title)
754
+
755
+ # 5) YYYYMM -> "YYYY MM"
756
+ def repl_ym(match):
757
+ y = int(match.group(1))
758
+ m = int(match.group(2))
759
+ try:
760
+ date(y, m, 1)
761
+ return f"{y:04d} {m:02d}"
762
+ except ValueError:
763
+ return match.group(0)
764
+
765
+ title = re.sub(r"\b(20\d{2})(0[1-9]|1[0-2])\b", repl_ym, title)
766
+
767
+ # 6) X.YY (month.two-digit-year) -> "YYYY MM" (e.g., 3.25 -> 2025 03)
768
+ def repl_my2(match):
769
+ mm = int(match.group(1))
770
+ yy = int(match.group(2))
771
+ y = 2000 + yy
772
+ if 1 <= mm <= 12:
773
+ try:
774
+ date(y, mm, 1)
775
+ return f"{y:04d} {mm:02d}"
776
+ except ValueError:
777
+ pass
778
+ return match.group(0)
779
+
780
+ title = re.sub(r"\b([1-9]|1[0-2])\.(\d{2})\b", repl_my2, title)
781
+
782
+ # 7) No/Nr/Sonderheft <1-12>.<YYYY> -> "YYYY MM"
783
+ def repl_nmy(match):
784
+ num = int(match.group(1))
785
+ y = int(match.group(2))
786
+ if 1 <= num <= 12:
787
+ try:
788
+ date(y, num, 1)
789
+ return f"{y:04d} {num:02d}"
790
+ except ValueError:
791
+ pass
792
+ return match.group(0)
793
+
794
+ title = re.sub(
795
+ r"\b(?:No|Nr|Sonderheft)\s*(\d{1,2})\.(\d{4})\b",
796
+ repl_nmy,
797
+ title,
798
+ flags=re.IGNORECASE
799
+ )
800
+
801
+ return title
802
+
803
+
804
+ # Helper for month name mapping
805
+ def _month_num(name: str) -> int:
806
+ name = name.lower()
807
+ mmap = {
808
+ 'januar': 1, 'jan': 1, 'februar': 2, 'feb': 2, 'märz': 3, 'maerz': 3, 'mär': 3, 'mrz': 3, 'mae': 3,
809
+ 'april': 4, 'apr': 4, 'mai': 5, 'juni': 6, 'jun': 6, 'juli': 7, 'jul': 7, 'august': 8, 'aug': 8,
810
+ 'september': 9, 'sep': 9, 'oktober': 10, 'okt': 10, 'november': 11, 'nov': 11, 'dezember': 12, 'dez': 12,
811
+ 'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6, 'july': 7, 'august': 8,
812
+ 'september': 9, 'october': 10, 'november': 11, 'december': 12
813
+ }
814
+ return mmap.get(name)
815
+
816
+
817
+ def get_recently_searched(shared_state, context, timeout_seconds):
818
+ recently_searched = shared_state.values.get(context, {})
819
+ threshold = datetime.now() - timedelta(seconds=timeout_seconds)
820
+ keys_to_remove = [key for key, value in recently_searched.items() if value["timestamp"] <= threshold]
821
+ for key in keys_to_remove:
822
+ debug(f"Removing '{key}' from recently searched memory ({context})...")
823
+ del recently_searched[key]
824
+ return recently_searched
825
+
826
+
187
827
  def download_package(links, title, password, package_id):
188
828
  device = get_device()
189
- added = device.linkgrabber.add_links(params=[
829
+ downloaded = device.linkgrabber.add_links(params=[
190
830
  {
191
831
  "autostart": False,
192
- "links": str(links).replace(" ", ""),
832
+ "links": json.dumps(links),
193
833
  "packageName": title,
194
834
  "extractPassword": password,
195
835
  "priority": "DEFAULT",
@@ -199,67 +839,4 @@ def download_package(links, title, password, package_id):
199
839
  "overwritePackagizerRules": True
200
840
  }
201
841
  ])
202
-
203
- package_uuids = []
204
- link_ids = []
205
- archive_id = None
206
-
207
- for _ in range(30):
208
- try:
209
- collecting = device.linkgrabber.is_collecting()
210
- if not collecting:
211
- links = device.linkgrabber.query_links()
212
- for link in links:
213
- if link["comment"] == package_id:
214
- link_id = link["uuid"]
215
- if link_id not in link_ids:
216
- link_ids.append(link_id)
217
- package_uuid = link["packageUUID"]
218
- if package_uuid not in package_uuids:
219
- package_uuids.append(package_uuid)
220
-
221
- if link_ids and package_uuids:
222
- archive = device.extraction.get_archive_info(link_ids=link_ids, package_ids=package_uuids)
223
- if archive:
224
- archive_id = archive[0].get("archiveId", None)
225
- if archive_id:
226
- break # Exit the loop as archive_id is found
227
-
228
- except Exception as e:
229
- print(f"An error occurred: {e}")
230
-
231
- time.sleep(1)
232
-
233
- if not link_ids and not package_uuids:
234
- print(f"No links or packages found within 30 seconds! Adding {title} package failed.")
235
- return False
236
-
237
- if not archive_id:
238
- print(f"Archive ID for {title} not found! Release may not be compressed.")
239
- else:
240
- settings = {
241
- "autoExtract": True,
242
- "removeDownloadLinksAfterExtraction": False,
243
- "removeFilesAfterExtraction": True
244
- }
245
- settings_set = device.extraction.set_archive_settings(archive_id, archive_settings=settings)
246
- if not settings_set:
247
- print(f"Failed to set archive settings for {title}!")
248
-
249
- time.sleep(3)
250
- links = device.linkgrabber.query_links()
251
- for link in links:
252
- if link["comment"] == package_id:
253
- link_id = link["uuid"]
254
- if link_id not in link_ids:
255
- link_ids.append(link_id)
256
- package_uuid = link["packageUUID"]
257
- if package_uuid not in package_uuids:
258
- package_uuids.append(package_uuid)
259
-
260
- try:
261
- device.linkgrabber.move_to_downloadlist(link_ids, package_uuids)
262
- except Exception as e:
263
- print(f"Failed to start download for {title}: {e}")
264
- return False
265
- return True
842
+ return downloaded