quasarr 1.21.0__tar.gz → 1.21.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (83) hide show
  1. {quasarr-1.21.0 → quasarr-1.21.2}/PKG-INFO +1 -1
  2. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/arr/__init__.py +11 -0
  3. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/version.py +1 -1
  4. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/by.py +2 -1
  5. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/dl.py +113 -75
  6. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/storage/setup.py +1 -1
  7. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/PKG-INFO +1 -1
  8. {quasarr-1.21.0 → quasarr-1.21.2}/LICENSE +0 -0
  9. {quasarr-1.21.0 → quasarr-1.21.2}/README.md +0 -0
  10. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/__init__.py +0 -0
  11. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/__init__.py +0 -0
  12. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/captcha/__init__.py +0 -0
  13. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/config/__init__.py +0 -0
  14. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/sponsors_helper/__init__.py +0 -0
  15. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/api/statistics/__init__.py +0 -0
  16. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/__init__.py +0 -0
  17. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/linkcrypters/__init__.py +0 -0
  18. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/linkcrypters/al.py +0 -0
  19. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/linkcrypters/filecrypt.py +0 -0
  20. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/linkcrypters/hide.py +0 -0
  21. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/packages/__init__.py +0 -0
  22. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/__init__.py +0 -0
  23. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/al.py +0 -0
  24. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/by.py +0 -0
  25. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/dd.py +0 -0
  26. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/dj.py +0 -0
  27. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/dl.py +0 -0
  28. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/dt.py +0 -0
  29. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/dw.py +0 -0
  30. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/he.py +0 -0
  31. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/mb.py +0 -0
  32. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/nk.py +0 -0
  33. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/nx.py +0 -0
  34. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/sf.py +0 -0
  35. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/sj.py +0 -0
  36. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/sl.py +0 -0
  37. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/wd.py +0 -0
  38. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/downloads/sources/wx.py +0 -0
  39. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/__init__.py +0 -0
  40. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/cloudflare.py +0 -0
  41. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/html_images.py +0 -0
  42. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/html_templates.py +0 -0
  43. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/imdb_metadata.py +0 -0
  44. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/log.py +0 -0
  45. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/myjd_api.py +0 -0
  46. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/notifications.py +0 -0
  47. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/obfuscated.py +0 -0
  48. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/sessions/__init__.py +0 -0
  49. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/sessions/al.py +0 -0
  50. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/sessions/dd.py +0 -0
  51. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/sessions/dl.py +0 -0
  52. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/sessions/nx.py +0 -0
  53. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/shared_state.py +0 -0
  54. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/statistics.py +0 -0
  55. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/providers/web_server.py +0 -0
  56. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/__init__.py +0 -0
  57. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/__init__.py +0 -0
  58. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/al.py +0 -0
  59. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/dd.py +0 -0
  60. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/dj.py +0 -0
  61. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/dt.py +0 -0
  62. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/dw.py +0 -0
  63. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/fx.py +0 -0
  64. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/he.py +0 -0
  65. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/mb.py +0 -0
  66. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/nk.py +0 -0
  67. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/nx.py +0 -0
  68. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/sf.py +0 -0
  69. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/sj.py +0 -0
  70. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/sl.py +0 -0
  71. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/wd.py +0 -0
  72. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/search/sources/wx.py +0 -0
  73. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/storage/__init__.py +0 -0
  74. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/storage/config.py +0 -0
  75. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr/storage/sqlite_database.py +0 -0
  76. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/SOURCES.txt +0 -0
  77. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/dependency_links.txt +0 -0
  78. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/entry_points.txt +0 -0
  79. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/not-zip-safe +0 -0
  80. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/requires.txt +0 -0
  81. {quasarr-1.21.0 → quasarr-1.21.2}/quasarr.egg-info/top_level.txt +0 -0
  82. {quasarr-1.21.0 → quasarr-1.21.2}/setup.cfg +0 -0
  83. {quasarr-1.21.0 → quasarr-1.21.2}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.21.0
3
+ Version: 1.21.2
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -353,6 +353,17 @@ def setup_arr_routes(app):
353
353
  <enclosure url="{release.get("link", "")}" length="{release.get("size", 0)}" type="application/x-nzb" />
354
354
  </item>'''
355
355
 
356
+ if not items:
357
+ items = f'''
358
+ <item>
359
+ <title>No results found</title>
360
+ <guid isPermaLink="False">0</guid>
361
+ <link>https://github.com/rix1337/Quasarr</link>
362
+ <comments>No results matched your search criteria.</comments>
363
+ <pubDate>{datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")}</pubDate>
364
+ <enclosure url="https://github.com/rix1337/Quasarr" length="0" type="application/x-nzb" />
365
+ </item>'''
366
+
356
367
  return f'''<?xml version="1.0" encoding="UTF-8"?>
357
368
  <rss>
358
369
  <channel>
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "1.21.0"
11
+ return "1.21.2"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -17,9 +17,10 @@ from quasarr.providers.log import info, debug
17
17
 
18
18
  hostname = "by"
19
19
  supported_mirrors = ["rapidgator", "ddownload", "nitroflare"]
20
- XXX_REGEX = re.compile(r"\.xxx\.", re.I)
20
+
21
21
  RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
22
22
  CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
23
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
23
24
  IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
24
25
 
25
26
 
@@ -4,23 +4,38 @@
4
4
 
5
5
  import re
6
6
  import time
7
- import warnings
8
7
  from base64 import urlsafe_b64encode
9
- from concurrent.futures import ThreadPoolExecutor, as_completed
10
8
  from datetime import datetime
11
9
  from html import unescape
12
10
 
13
11
  from bs4 import BeautifulSoup
14
- from bs4 import XMLParsedAsHTMLWarning
15
12
 
16
13
  from quasarr.providers.imdb_metadata import get_localized_title
17
14
  from quasarr.providers.log import info, debug
18
15
  from quasarr.providers.sessions.dl import retrieve_and_validate_session, invalidate_session, fetch_via_requests_session
19
16
 
20
- warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
21
-
22
17
  hostname = "dl"
23
- supported_mirrors = []
18
+
19
+ RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
20
+ CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
21
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
22
+
23
+
24
+ def convert_to_rss_date(iso_date_str):
25
+ """
26
+ Convert ISO format datetime to RSS date format.
27
+ DL date format: '2025-12-15T20:43:06+0100'
28
+ Returns: 'Sun, 15 Dec 2025 20:43:06 +0100'
29
+ Falls back to current time if conversion fails.
30
+ """
31
+ if not iso_date_str:
32
+ return datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
33
+
34
+ try:
35
+ dt_obj = datetime.fromisoformat(iso_date_str)
36
+ return dt_obj.strftime("%a, %d %b %Y %H:%M:%S %z")
37
+ except Exception:
38
+ return datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
24
39
 
25
40
 
26
41
  def normalize_title_for_sonarr(title):
@@ -37,11 +52,18 @@ def normalize_title_for_sonarr(title):
37
52
 
38
53
  def dl_feed(shared_state, start_time, request_from, mirror=None):
39
54
  """
40
- Parse the RSS feed and return releases.
55
+ Parse the correct forum and return releases.
41
56
  """
42
57
  releases = []
43
58
  host = shared_state.values["config"]("Hostnames").get(hostname)
44
59
 
60
+ if "lazylibrarian" in request_from.lower():
61
+ forum = "magazine-zeitschriften.72"
62
+ elif "radarr" in request_from.lower():
63
+ forum = "hd.8"
64
+ else:
65
+ forum = "hd.14"
66
+
45
67
  if not host:
46
68
  debug(f"{hostname}: hostname not configured")
47
69
  return releases
@@ -52,49 +74,49 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
52
74
  info(f"Could not retrieve valid session for {host}")
53
75
  return releases
54
76
 
55
- # Instead we should parse the HTML for the correct *arr client
56
- rss_url = f'https://www.{host}/forums/-/index.rss'
57
- response = sess.get(rss_url, timeout=30)
77
+ forum_url = f'https://www.{host}/forums/{forum}/?order=post_date&direction=desc'
78
+ response = sess.get(forum_url, timeout=30)
58
79
 
59
80
  if response.status_code != 200:
60
- info(f"{hostname}: RSS feed returned status {response.status_code}")
81
+ info(f"{hostname}: Forum request failed with {response.status_code}")
61
82
  return releases
62
83
 
63
84
  soup = BeautifulSoup(response.content, 'html.parser')
64
- items = soup.find_all('item')
85
+
86
+ # Find all thread items in the forum
87
+ items = soup.select('div.structItem.structItem--thread')
65
88
 
66
89
  if not items:
67
- debug(f"{hostname}: No entries found in RSS feed")
90
+ debug(f"{hostname}: No entries found in Forum")
68
91
  return releases
69
92
 
70
93
  for item in items:
71
94
  try:
72
- title_tag = item.find('title')
73
- if not title_tag:
95
+ # Extract title from the thread
96
+ title_elem = item.select_one('div.structItem-title a')
97
+ if not title_elem:
74
98
  continue
75
99
 
76
- title = title_tag.get_text(strip=True)
100
+ title = title_elem.get_text(strip=True)
77
101
  if not title:
78
102
  continue
79
103
 
80
104
  title = unescape(title)
81
- title = title.replace(']]>', '').replace('<![CDATA[', '')
82
105
  title = normalize_title_for_sonarr(title)
83
106
 
84
- item_text = item.get_text()
85
- thread_url = None
86
- match = re.search(r'https://[^\s]+/threads/[^\s]+', item_text)
87
- if match:
88
- thread_url = match.group(0)
107
+ # Extract thread URL
108
+ thread_url = title_elem.get('href')
89
109
  if not thread_url:
90
110
  continue
91
111
 
92
- pub_date = item.find('pubdate')
93
- if pub_date:
94
- date_str = pub_date.get_text(strip=True)
95
- else:
96
- # Fallback: use current time if no pubDate found
97
- date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
112
+ # Make sure URL is absolute
113
+ if thread_url.startswith('/'):
114
+ thread_url = f"https://www.{host}{thread_url}"
115
+
116
+ # Extract date and convert to RFC 2822 format
117
+ date_elem = item.select_one('time.u-dt')
118
+ iso_date = date_elem.get('datetime', '') if date_elem else ''
119
+ published = convert_to_rss_date(iso_date)
98
120
 
99
121
  mb = 0
100
122
  imdb_id = None
@@ -113,18 +135,18 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
113
135
  "link": link,
114
136
  "mirror": mirror,
115
137
  "size": mb * 1024 * 1024,
116
- "date": date_str,
138
+ "date": published,
117
139
  "source": thread_url
118
140
  },
119
141
  "type": "protected"
120
142
  })
121
143
 
122
144
  except Exception as e:
123
- debug(f"{hostname}: error parsing RSS entry: {e}")
145
+ debug(f"{hostname}: error parsing Forum item: {e}")
124
146
  continue
125
147
 
126
148
  except Exception as e:
127
- info(f"{hostname}: RSS feed error: {e}")
149
+ info(f"{hostname}: Forum feed error: {e}")
128
150
  invalidate_session(shared_state)
129
151
 
130
152
  elapsed = time.time() - start_time
@@ -132,6 +154,23 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
132
154
  return releases
133
155
 
134
156
 
157
+ def _replace_umlauts(text):
158
+ replacements = {
159
+ 'ä': 'ae',
160
+ 'ö': 'oe',
161
+ 'ü': 'ue',
162
+ 'Ä': 'Ae',
163
+ 'Ö': 'Oe',
164
+ 'Ü': 'Ue',
165
+ 'ß': 'ss'
166
+ }
167
+
168
+ for umlaut, replacement in replacements.items():
169
+ text = text.replace(umlaut, replacement)
170
+
171
+ return text
172
+
173
+
135
174
  def _search_single_page(shared_state, host, search_string, search_id, page_num, imdb_id, mirror, request_from, season,
136
175
  episode):
137
176
  """
@@ -139,6 +178,8 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
139
178
  """
140
179
  page_releases = []
141
180
 
181
+ search_string = _replace_umlauts(search_string)
182
+
142
183
  try:
143
184
  if page_num == 1:
144
185
  search_params = {
@@ -194,6 +235,15 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
194
235
  title = unescape(title)
195
236
  title_normalized = normalize_title_for_sonarr(title)
196
237
 
238
+ # Filter: Skip if no resolution or codec info (unless LazyLibrarian)
239
+ if 'lazylibrarian' not in request_from.lower():
240
+ if not (RESOLUTION_REGEX.search(title_normalized) or CODEC_REGEX.search(title_normalized)):
241
+ continue
242
+
243
+ # Filter: Skip XXX content unless explicitly searched for
244
+ if XXX_REGEX.search(title_normalized) and 'xxx' not in search_string.lower():
245
+ continue
246
+
197
247
  thread_url = title_elem.get('href')
198
248
  if thread_url.startswith('/'):
199
249
  thread_url = f"https://www.{host}{thread_url}"
@@ -201,16 +251,10 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
201
251
  if not shared_state.is_valid_release(title_normalized, request_from, search_string, season, episode):
202
252
  continue
203
253
 
204
- minor_info = item.select_one('div.contentRow-minor')
205
- date_str = ""
206
- if minor_info:
207
- date_elem = minor_info.select_one('time.u-dt')
208
- if date_elem:
209
- date_str = date_elem.get('datetime', '')
210
-
211
- # Fallback: use current time if no date found
212
- if not date_str:
213
- date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
254
+ # Extract date and convert to RFC 2822 format
255
+ date_elem = item.select_one('time.u-dt')
256
+ iso_date = date_elem.get('datetime', '') if date_elem else ''
257
+ published = convert_to_rss_date(iso_date)
214
258
 
215
259
  mb = 0
216
260
  password = ""
@@ -228,7 +272,7 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
228
272
  "link": link,
229
273
  "mirror": mirror,
230
274
  "size": mb * 1024 * 1024,
231
- "date": date_str,
275
+ "date": published,
232
276
  "source": thread_url
233
277
  },
234
278
  "type": "protected"
@@ -247,8 +291,8 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
247
291
  def dl_search(shared_state, start_time, request_from, search_string,
248
292
  mirror=None, season=None, episode=None):
249
293
  """
250
- Search with parallel pagination (max 5 pages) to find best quality releases.
251
- Requests are fired in parallel to minimize search time.
294
+ Search with sequential pagination (max 5 pages) to find best quality releases.
295
+ Stops searching if a page returns 0 results.
252
296
  """
253
297
  releases = []
254
298
  host = shared_state.values["config"]("Hostnames").get(hostname)
@@ -264,8 +308,8 @@ def dl_search(shared_state, start_time, request_from, search_string,
264
308
  search_string = unescape(search_string)
265
309
  max_pages = 5
266
310
 
267
- info(
268
- f"{hostname}: Starting parallel paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
311
+ debug(
312
+ f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
269
313
 
270
314
  try:
271
315
  sess = retrieve_and_validate_session(shared_state)
@@ -273,42 +317,36 @@ def dl_search(shared_state, start_time, request_from, search_string,
273
317
  info(f"Could not retrieve valid session for {host}")
274
318
  return releases
275
319
 
276
- # First, do page 1 to get the search ID
277
- page_1_releases, search_id = _search_single_page(
278
- shared_state, host, search_string, None, 1,
279
- imdb_id, mirror, request_from, season, episode
280
- )
281
- releases.extend(page_1_releases)
320
+ search_id = None
282
321
 
283
- if not search_id:
284
- info(f"{hostname}: Could not extract search ID, stopping pagination")
285
- return releases
322
+ # Sequential search through pages
323
+ for page_num in range(1, max_pages + 1):
324
+ page_releases, extracted_search_id = _search_single_page(
325
+ shared_state, host, search_string, search_id, page_num,
326
+ imdb_id, mirror, request_from, season, episode
327
+ )
328
+
329
+ # Update search_id from first page
330
+ if page_num == 1:
331
+ search_id = extracted_search_id
332
+ if not search_id:
333
+ info(f"{hostname}: Could not extract search ID, stopping pagination")
334
+ break
335
+
336
+ # Add releases from this page
337
+ releases.extend(page_releases)
338
+ debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
286
339
 
287
- # Now fire remaining pages in parallel
288
- with ThreadPoolExecutor(max_workers=4) as executor:
289
- futures = {}
290
- for page_num in range(2, max_pages + 1):
291
- future = executor.submit(
292
- _search_single_page,
293
- shared_state, host, search_string, search_id, page_num,
294
- imdb_id, mirror, request_from, season, episode
295
- )
296
- futures[future] = page_num
297
-
298
- for future in as_completed(futures):
299
- page_num = futures[future]
300
- try:
301
- page_releases, _ = future.result()
302
- releases.extend(page_releases)
303
- debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
304
- except Exception as e:
305
- info(f"{hostname}: [Page {page_num}] failed: {e}")
340
+ # Stop if this page returned 0 results
341
+ if len(page_releases) == 0:
342
+ debug(f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination")
343
+ break
306
344
 
307
345
  except Exception as e:
308
346
  info(f"{hostname}: search error: {e}")
309
347
  invalidate_session(shared_state)
310
348
 
311
- info(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
349
+ debug(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
312
350
 
313
351
  elapsed = time.time() - start_time
314
352
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
@@ -197,7 +197,7 @@ def save_hostnames(shared_state, timeout=5, first_run=True):
197
197
  if not first_run:
198
198
  # Append restart notice for specific sites that actually changed
199
199
  for site in changed_sites:
200
- if site.lower() in {'al', 'dd', 'nx'}:
200
+ if site.lower() in {'al', 'dd', 'dl', 'nx'}:
201
201
  optional_text += f"{site.upper()}: You must restart Quasarr and follow additional steps to start using this site.<br>"
202
202
 
203
203
  return render_success(success_msg, timeout, optional_text=optional_text)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.21.0
3
+ Version: 1.21.2
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
File without changes
File without changes
File without changes
File without changes
File without changes