quasarr 2.5.0__py3-none-any.whl → 2.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -143,6 +143,7 @@ def render_centered_html(inner_content, footer_content=""):
143
143
  justify-content: center;
144
144
  margin-bottom: 0.5rem;
145
145
  font-size: 2rem;
146
+ cursor: pointer;
146
147
  }
147
148
  .logo {
148
149
  width: 48px;
@@ -333,6 +334,16 @@ def render_centered_html(inner_content, footer_content=""):
333
334
  justify-content: flex-end;
334
335
  }
335
336
  </style>
337
+ <script>
338
+ document.addEventListener('DOMContentLoaded', function() {
339
+ const h1 = document.querySelector('h1');
340
+ if (h1) {
341
+ h1.onclick = function() {
342
+ window.location.href = '/';
343
+ };
344
+ }
345
+ });
346
+ </script>
336
347
  </head>"""
337
348
  )
338
349
 
@@ -417,7 +428,7 @@ def render_button(text, button_type="primary", attributes=None):
417
428
 
418
429
  def render_form(header, form="", script="", footer_content=""):
419
430
  content = f'''
420
- <h1><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
431
+ <h1 onclick="window.location.href='/'"><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
421
432
  <h2>{header}</h2>
422
433
  {form}
423
434
  {script}
@@ -446,7 +457,7 @@ def render_success(message, timeout=10, optional_text=""):
446
457
  }}, 1000);
447
458
  </script>
448
459
  """
449
- content = f'''<h1><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
460
+ content = f'''<h1 onclick="window.location.href='/'"><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
450
461
  <h2>{message}</h2>
451
462
  {optional_text}
452
463
  {button_html}
@@ -459,7 +470,7 @@ def render_fail(message):
459
470
  button_html = render_button(
460
471
  "Back", "secondary", {"onclick": "window.location.href='/'"}
461
472
  )
462
- return render_centered_html(f"""<h1><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
473
+ return render_centered_html(f"""<h1 onclick="window.location.href='/'"><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
463
474
  <h2>{message}</h2>
464
475
  {button_html}
465
476
  """)
@@ -371,6 +371,7 @@ def fetch_via_requests_session(
371
371
  target_url: str,
372
372
  post_data: dict = None,
373
373
  timeout: int = 30,
374
+ year: int = None,
374
375
  ):
375
376
  """
376
377
  - method: "GET" or "POST"
@@ -383,6 +384,9 @@ def fetch_via_requests_session(
383
384
  f"{hostname}: site not usable (login skipped or no credentials)"
384
385
  )
385
386
 
387
+ if year:
388
+ sess.cookies["filter"] = f'{{"year":{{"from":{year},"to":{year}}}}}'
389
+
386
390
  # Execute request
387
391
  if method.upper() == "GET":
388
392
  r = sess.get(target_url, timeout=timeout)
@@ -171,19 +171,18 @@ def set_device_from_config():
171
171
 
172
172
  def check_device(device):
173
173
  try:
174
- valid = (
175
- isinstance(device, (type, Jddevice))
176
- and device.downloadcontroller.get_current_state()
177
- )
178
- except (
179
- AttributeError,
180
- KeyError,
181
- TokenExpiredException,
182
- RequestTimeoutException,
183
- MYJDException,
184
- ):
185
- valid = False
186
- return valid
174
+ if not isinstance(device, (type, Jddevice)):
175
+ return False
176
+
177
+ # Trigger a network request to verify connectivity
178
+ # get_current_state() performs an API call to JDownloader
179
+ state = device.downloadcontroller.get_current_state()
180
+
181
+ if state:
182
+ return True
183
+ return False
184
+ except Exception:
185
+ return False
187
186
 
188
187
 
189
188
  def connect_device():
@@ -627,11 +626,12 @@ def search_string_in_sanitized_title(search_string, title):
627
626
  sanitized_search_string = sanitize_string(search_string)
628
627
  sanitized_title = sanitize_string(title)
629
628
 
629
+ search_regex = r"\b.+\b".join(
630
+ [re.escape(s) for s in sanitized_search_string.split(" ")]
631
+ )
630
632
  # Use word boundaries to ensure full word/phrase match
631
- if re.search(rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title):
632
- debug(
633
- f"Matched search string: {sanitized_search_string} with title: {sanitized_title}"
634
- )
633
+ if re.search(rf"\b{search_regex}\b", sanitized_title):
634
+ debug(f"Matched search string: {search_regex} with title: {sanitized_title}")
635
635
  return True
636
636
  else:
637
637
  debug(
@@ -5,7 +5,7 @@
5
5
  import re
6
6
  import sys
7
7
 
8
- __version__ = "2.5.0"
8
+ __version__ = "2.6.0"
9
9
 
10
10
 
11
11
  def get_version():
@@ -16,6 +16,7 @@ from quasarr.search.sources.dt import dt_feed, dt_search
16
16
  from quasarr.search.sources.dw import dw_feed, dw_search
17
17
  from quasarr.search.sources.fx import fx_feed, fx_search
18
18
  from quasarr.search.sources.he import he_feed, he_search
19
+ from quasarr.search.sources.hs import hs_feed, hs_search
19
20
  from quasarr.search.sources.mb import mb_feed, mb_search
20
21
  from quasarr.search.sources.nk import nk_feed, nk_search
21
22
  from quasarr.search.sources.nx import nx_feed, nx_search
@@ -53,6 +54,7 @@ def get_search_results(
53
54
  dw = shared_state.values["config"]("Hostnames").get("dw")
54
55
  fx = shared_state.values["config"]("Hostnames").get("fx")
55
56
  he = shared_state.values["config"]("Hostnames").get("he")
57
+ hs = shared_state.values["config"]("Hostnames").get("hs")
56
58
  mb = shared_state.values["config"]("Hostnames").get("mb")
57
59
  nk = shared_state.values["config"]("Hostnames").get("nk")
58
60
  nx = shared_state.values["config"]("Hostnames").get("nx")
@@ -77,6 +79,7 @@ def get_search_results(
77
79
  (dw, dw_search),
78
80
  (fx, fx_search),
79
81
  (he, he_search),
82
+ (hs, hs_search),
80
83
  (mb, mb_search),
81
84
  (nk, nk_search),
82
85
  (nx, nx_search),
@@ -108,6 +111,7 @@ def get_search_results(
108
111
  (dw, dw_feed),
109
112
  (fx, fx_feed),
110
113
  (he, he_feed),
114
+ (hs, hs_feed),
111
115
  (mb, mb_feed),
112
116
  (nk, nk_feed),
113
117
  (nx, nx_feed),
@@ -15,7 +15,7 @@ from quasarr.downloads.sources.al import (
15
15
  parse_info_from_feed_entry,
16
16
  )
17
17
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
18
- from quasarr.providers.imdb_metadata import get_localized_title
18
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
19
19
  from quasarr.providers.log import debug, info
20
20
  from quasarr.providers.sessions.al import fetch_via_requests_session, invalidate_session
21
21
 
@@ -122,9 +122,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
122
122
  host = shared_state.values["config"]("Hostnames").get(hostname)
123
123
 
124
124
  if not "arr" in request_from.lower():
125
- debug(
126
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
127
- )
125
+ debug(f"{hostname}: Skipping {request_from} search (unsupported media type)!")
128
126
  return releases
129
127
 
130
128
  if "Radarr" in request_from:
@@ -274,9 +272,7 @@ def al_search(
274
272
  host = shared_state.values["config"]("Hostnames").get(hostname)
275
273
 
276
274
  if not "arr" in request_from.lower():
277
- debug(
278
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
279
- )
275
+ debug(f"{hostname}: Skipping {request_from} search (unsupported media type)!")
280
276
  return releases
281
277
 
282
278
  if "Radarr" in request_from:
@@ -285,7 +281,7 @@ def al_search(
285
281
  valid_type = "series"
286
282
 
287
283
  if mirror and mirror not in supported_mirrors:
288
- debug(f'Mirror "{mirror}" not supported by {hostname}.')
284
+ debug(f'{hostname}: Mirror "{mirror}" not supported.')
289
285
  return releases
290
286
 
291
287
  imdb_id = shared_state.is_imdb_id(search_string)
@@ -303,7 +299,11 @@ def al_search(
303
299
  try:
304
300
  url = f"https://www.{host}/search?q={encoded_search_string}"
305
301
  r = fetch_via_requests_session(
306
- shared_state, method="GET", target_url=url, timeout=10
302
+ shared_state,
303
+ method="GET",
304
+ target_url=url,
305
+ timeout=10,
306
+ year=get_year(imdb_id) if imdb_id else None,
307
307
  )
308
308
  r.raise_for_status()
309
309
  except Exception as e:
@@ -322,7 +322,7 @@ def al_search(
322
322
  last_redirect.url, redirect_location
323
323
  ) # in case of relative URL
324
324
  debug(
325
- f"{search_string} redirected to {absolute_redirect_url} instead of search results page"
325
+ f"{hostname}: {search_string} redirected to {absolute_redirect_url} instead of search results page"
326
326
  )
327
327
 
328
328
  try:
@@ -350,9 +350,13 @@ def al_search(
350
350
  sanitized_search_string = shared_state.sanitize_string(search_string)
351
351
  sanitized_title = shared_state.sanitize_string(name)
352
352
  if not sanitized_search_string in sanitized_title:
353
- debug(f"Search string '{search_string}' doesn't match '{name}'")
353
+ debug(
354
+ f"{hostname}: Search string '{search_string}' doesn't match '{name}'"
355
+ )
354
356
  continue
355
- debug(f"Matched search string '{search_string}' with result '{name}'")
357
+ debug(
358
+ f"{hostname}: Matched search string '{search_string}' with result '{name}'"
359
+ )
356
360
 
357
361
  type_label = None
358
362
  for lbl in body.select("div.label-group a[href]"):
@@ -384,7 +388,7 @@ def al_search(
384
388
  use_cache = ts and ts > datetime.now() - timedelta(seconds=threshold)
385
389
 
386
390
  if use_cache and entry.get("html"):
387
- debug(f"Using cached content for '{url}'")
391
+ debug(f"{hostname}: Using cached content for '{url}'")
388
392
  data_html = entry["html"]
389
393
  else:
390
394
  entry = {"timestamp": datetime.now()}
@@ -13,7 +13,7 @@ import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
17
  from quasarr.providers.log import debug, info
18
18
 
19
19
  hostname = "by"
@@ -232,6 +232,9 @@ def by_search(
232
232
  info(f"Could not extract title from IMDb-ID {imdb_id}")
233
233
  return []
234
234
  search_string = html.unescape(title)
235
+ if not season:
236
+ if year := get_year(imdb_id):
237
+ search_string += f" {year}"
235
238
 
236
239
  base_url = f"https://{by}"
237
240
  q = quote_plus(search_string)
@@ -8,7 +8,7 @@ from base64 import urlsafe_b64encode
8
8
  from datetime import datetime, timezone
9
9
 
10
10
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
11
- from quasarr.providers.imdb_metadata import get_localized_title
11
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
12
12
  from quasarr.providers.log import debug, info
13
13
  from quasarr.providers.sessions.dd import (
14
14
  create_and_persist_session,
@@ -77,6 +77,13 @@ def dd_search(
77
77
  info(f"Could not extract title from IMDb-ID {imdb_id}")
78
78
  return releases
79
79
  search_string = html.unescape(search_string)
80
+ if season:
81
+ search_string += f" S{int(season):02d}"
82
+ if episode:
83
+ search_string += f"E{int(episode):02d}"
84
+ else:
85
+ if year := get_year(imdb_id):
86
+ search_string += f" {year}"
80
87
 
81
88
  if not search_string:
82
89
  search_type = "feed"
@@ -116,7 +123,7 @@ def dd_search(
116
123
  try:
117
124
  if release.get("fake"):
118
125
  debug(
119
- f"Release {release.get('release')} marked as fake. Invalidating {hostname.upper()} session..."
126
+ f"{hostname}: Release {release.get('release')} marked as fake. Invalidating {hostname.upper()} session..."
120
127
  )
121
128
  create_and_persist_session(shared_state)
122
129
  return []
@@ -128,14 +135,19 @@ def dd_search(
128
135
  ):
129
136
  continue
130
137
 
131
- imdb_id = release.get("imdbid", None)
138
+ release_imdb = release.get("imdbid", None)
139
+ if release_imdb and imdb_id and imdb_id != release_imdb:
140
+ debug(
141
+ f"{hostname}: Release {title} IMDb-ID mismatch ({imdb_id} != {release.get('imdbid', None)})"
142
+ )
143
+ continue
132
144
 
133
145
  source = f"https://{dd}/"
134
146
  size_item = extract_size(release.get("size"))
135
147
  mb = shared_state.convert_to_mb(size_item) * 1024 * 1024
136
148
  published = convert_to_rss_date(release.get("when"))
137
149
  payload = urlsafe_b64encode(
138
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
150
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb}|{hostname}".encode(
139
151
  "utf-8"
140
152
  )
141
153
  ).decode("utf-8")
@@ -11,7 +11,7 @@ from html import unescape
11
11
  from bs4 import BeautifulSoup
12
12
 
13
13
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
14
- from quasarr.providers.imdb_metadata import get_localized_title
14
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
15
15
  from quasarr.providers.log import debug, info
16
16
  from quasarr.providers.sessions.dl import (
17
17
  fetch_via_requests_session,
@@ -354,6 +354,9 @@ def dl_search(
354
354
  info(f"{hostname}: no title for IMDb {imdb_id}")
355
355
  return releases
356
356
  search_string = title
357
+ if not season:
358
+ if year := get_year(imdb_id):
359
+ search_string += f" {year}"
357
360
 
358
361
  search_string = unescape(search_string)
359
362
  max_search_duration = 7
@@ -371,6 +374,7 @@ def dl_search(
371
374
  search_id = None
372
375
  page_num = 0
373
376
  search_start_time = time.time()
377
+ release_titles_per_page = set()
374
378
 
375
379
  # Sequential search through pages until timeout or no results
376
380
  while (time.time() - search_start_time) < max_search_duration:
@@ -389,6 +393,14 @@ def dl_search(
389
393
  episode,
390
394
  )
391
395
 
396
+ page_release_titles = tuple(pr["details"]["title"] for pr in page_releases)
397
+ if page_release_titles in release_titles_per_page:
398
+ debug(
399
+ f"{hostname}: [Page {page_num}] duplicate page detected, stopping"
400
+ )
401
+ break
402
+ release_titles_per_page.add(page_release_titles)
403
+
392
404
  # Update search_id from first page
393
405
  if page_num == 1:
394
406
  search_id = extracted_search_id