quasarr 1.21.0__tar.gz → 1.21.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (83) hide show
  1. {quasarr-1.21.0 → quasarr-1.21.1}/PKG-INFO +1 -1
  2. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/version.py +1 -1
  3. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/dl.py +89 -61
  4. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/storage/setup.py +1 -1
  5. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/PKG-INFO +1 -1
  6. {quasarr-1.21.0 → quasarr-1.21.1}/LICENSE +0 -0
  7. {quasarr-1.21.0 → quasarr-1.21.1}/README.md +0 -0
  8. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/__init__.py +0 -0
  9. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/__init__.py +0 -0
  10. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/arr/__init__.py +0 -0
  11. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/captcha/__init__.py +0 -0
  12. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/config/__init__.py +0 -0
  13. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/sponsors_helper/__init__.py +0 -0
  14. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/api/statistics/__init__.py +0 -0
  15. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/__init__.py +0 -0
  16. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/linkcrypters/__init__.py +0 -0
  17. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/linkcrypters/al.py +0 -0
  18. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/linkcrypters/filecrypt.py +0 -0
  19. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/linkcrypters/hide.py +0 -0
  20. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/packages/__init__.py +0 -0
  21. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/__init__.py +0 -0
  22. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/al.py +0 -0
  23. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/by.py +0 -0
  24. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/dd.py +0 -0
  25. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/dj.py +0 -0
  26. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/dl.py +0 -0
  27. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/dt.py +0 -0
  28. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/dw.py +0 -0
  29. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/he.py +0 -0
  30. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/mb.py +0 -0
  31. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/nk.py +0 -0
  32. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/nx.py +0 -0
  33. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/sf.py +0 -0
  34. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/sj.py +0 -0
  35. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/sl.py +0 -0
  36. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/wd.py +0 -0
  37. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/downloads/sources/wx.py +0 -0
  38. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/__init__.py +0 -0
  39. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/cloudflare.py +0 -0
  40. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/html_images.py +0 -0
  41. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/html_templates.py +0 -0
  42. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/imdb_metadata.py +0 -0
  43. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/log.py +0 -0
  44. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/myjd_api.py +0 -0
  45. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/notifications.py +0 -0
  46. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/obfuscated.py +0 -0
  47. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/sessions/__init__.py +0 -0
  48. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/sessions/al.py +0 -0
  49. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/sessions/dd.py +0 -0
  50. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/sessions/dl.py +0 -0
  51. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/sessions/nx.py +0 -0
  52. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/shared_state.py +0 -0
  53. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/statistics.py +0 -0
  54. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/providers/web_server.py +0 -0
  55. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/__init__.py +0 -0
  56. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/__init__.py +0 -0
  57. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/al.py +0 -0
  58. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/by.py +0 -0
  59. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/dd.py +0 -0
  60. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/dj.py +0 -0
  61. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/dt.py +0 -0
  62. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/dw.py +0 -0
  63. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/fx.py +0 -0
  64. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/he.py +0 -0
  65. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/mb.py +0 -0
  66. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/nk.py +0 -0
  67. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/nx.py +0 -0
  68. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/sf.py +0 -0
  69. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/sj.py +0 -0
  70. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/sl.py +0 -0
  71. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/wd.py +0 -0
  72. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/search/sources/wx.py +0 -0
  73. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/storage/__init__.py +0 -0
  74. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/storage/config.py +0 -0
  75. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr/storage/sqlite_database.py +0 -0
  76. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/SOURCES.txt +0 -0
  77. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/dependency_links.txt +0 -0
  78. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/entry_points.txt +0 -0
  79. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/not-zip-safe +0 -0
  80. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/requires.txt +0 -0
  81. {quasarr-1.21.0 → quasarr-1.21.1}/quasarr.egg-info/top_level.txt +0 -0
  82. {quasarr-1.21.0 → quasarr-1.21.1}/setup.cfg +0 -0
  83. {quasarr-1.21.0 → quasarr-1.21.1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.21.0
3
+ Version: 1.21.1
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "1.21.0"
11
+ return "1.21.1"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -4,21 +4,17 @@
4
4
 
5
5
  import re
6
6
  import time
7
- import warnings
8
7
  from base64 import urlsafe_b64encode
9
8
  from concurrent.futures import ThreadPoolExecutor, as_completed
10
9
  from datetime import datetime
11
10
  from html import unescape
12
11
 
13
12
  from bs4 import BeautifulSoup
14
- from bs4 import XMLParsedAsHTMLWarning
15
13
 
16
14
  from quasarr.providers.imdb_metadata import get_localized_title
17
15
  from quasarr.providers.log import info, debug
18
16
  from quasarr.providers.sessions.dl import retrieve_and_validate_session, invalidate_session, fetch_via_requests_session
19
17
 
20
- warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
21
-
22
18
  hostname = "dl"
23
19
  supported_mirrors = []
24
20
 
@@ -37,11 +33,18 @@ def normalize_title_for_sonarr(title):
37
33
 
38
34
  def dl_feed(shared_state, start_time, request_from, mirror=None):
39
35
  """
40
- Parse the RSS feed and return releases.
36
+ Parse the correct forum and return releases.
41
37
  """
42
38
  releases = []
43
39
  host = shared_state.values["config"]("Hostnames").get(hostname)
44
40
 
41
+ if "lazylibrarian" in request_from.lower():
42
+ forum = "magazine-zeitschriften.72"
43
+ elif "radarr" in request_from.lower():
44
+ forum = "hd.8"
45
+ else:
46
+ forum = "hd.14"
47
+
45
48
  if not host:
46
49
  debug(f"{hostname}: hostname not configured")
47
50
  return releases
@@ -52,49 +55,61 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
52
55
  info(f"Could not retrieve valid session for {host}")
53
56
  return releases
54
57
 
55
- # Instead we should parse the HTML for the correct *arr client
56
- rss_url = f'https://www.{host}/forums/-/index.rss'
57
- response = sess.get(rss_url, timeout=30)
58
+ forum_url = f'https://www.{host}/forums/{forum}/?order=post_date&direction=desc'
59
+ response = sess.get(forum_url, timeout=30)
58
60
 
59
61
  if response.status_code != 200:
60
- info(f"{hostname}: RSS feed returned status {response.status_code}")
62
+ info(f"{hostname}: Forum request failed with {response.status_code}")
61
63
  return releases
62
64
 
63
65
  soup = BeautifulSoup(response.content, 'html.parser')
64
- items = soup.find_all('item')
66
+
67
+ # Find all thread items in the forum
68
+ items = soup.select('div.structItem.structItem--thread')
65
69
 
66
70
  if not items:
67
- debug(f"{hostname}: No entries found in RSS feed")
71
+ debug(f"{hostname}: No entries found in Forum")
68
72
  return releases
69
73
 
70
74
  for item in items:
71
75
  try:
72
- title_tag = item.find('title')
73
- if not title_tag:
76
+ # Extract title from the thread
77
+ title_elem = item.select_one('div.structItem-title a')
78
+ if not title_elem:
74
79
  continue
75
80
 
76
- title = title_tag.get_text(strip=True)
81
+ title = title_elem.get_text(strip=True)
77
82
  if not title:
78
83
  continue
79
84
 
80
85
  title = unescape(title)
81
- title = title.replace(']]>', '').replace('<![CDATA[', '')
82
86
  title = normalize_title_for_sonarr(title)
83
87
 
84
- item_text = item.get_text()
85
- thread_url = None
86
- match = re.search(r'https://[^\s]+/threads/[^\s]+', item_text)
87
- if match:
88
- thread_url = match.group(0)
88
+ # Extract thread URL
89
+ thread_url = title_elem.get('href')
89
90
  if not thread_url:
90
91
  continue
91
92
 
92
- pub_date = item.find('pubdate')
93
- if pub_date:
94
- date_str = pub_date.get_text(strip=True)
95
- else:
96
- # Fallback: use current time if no pubDate found
97
- date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
93
+ # Make sure URL is absolute
94
+ if thread_url.startswith('/'):
95
+ thread_url = f"https://www.{host}{thread_url}"
96
+
97
+ # Extract date and convert to RFC 2822 format
98
+ date_str = None
99
+ date_elem = item.select_one('time.u-dt')
100
+ if date_elem:
101
+ iso_date = date_elem.get('datetime', '')
102
+ if iso_date:
103
+ try:
104
+ # Parse ISO format and convert to RFC 2822
105
+ dt = datetime.fromisoformat(iso_date.replace('Z', '+00:00'))
106
+ date_str = dt.strftime("%a, %d %b %Y %H:%M:%S %z")
107
+ except Exception:
108
+ date_str = None
109
+
110
+ # Fallback: use current time if no date found
111
+ if not date_str:
112
+ date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %z")
98
113
 
99
114
  mb = 0
100
115
  imdb_id = None
@@ -120,11 +135,11 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
120
135
  })
121
136
 
122
137
  except Exception as e:
123
- debug(f"{hostname}: error parsing RSS entry: {e}")
138
+ debug(f"{hostname}: error parsing Forum item: {e}")
124
139
  continue
125
140
 
126
141
  except Exception as e:
127
- info(f"{hostname}: RSS feed error: {e}")
142
+ info(f"{hostname}: Forum feed error: {e}")
128
143
  invalidate_session(shared_state)
129
144
 
130
145
  elapsed = time.time() - start_time
@@ -132,6 +147,23 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
132
147
  return releases
133
148
 
134
149
 
150
+ def _replace_umlauts(text):
151
+ replacements = {
152
+ 'ä': 'ae',
153
+ 'ö': 'oe',
154
+ 'ü': 'ue',
155
+ 'Ä': 'Ae',
156
+ 'Ö': 'Oe',
157
+ 'Ü': 'Ue',
158
+ 'ß': 'ss'
159
+ }
160
+
161
+ for umlaut, replacement in replacements.items():
162
+ text = text.replace(umlaut, replacement)
163
+
164
+ return text
165
+
166
+
135
167
  def _search_single_page(shared_state, host, search_string, search_id, page_num, imdb_id, mirror, request_from, season,
136
168
  episode):
137
169
  """
@@ -139,6 +171,8 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
139
171
  """
140
172
  page_releases = []
141
173
 
174
+ search_string = _replace_umlauts(search_string)
175
+
142
176
  try:
143
177
  if page_num == 1:
144
178
  search_params = {
@@ -247,8 +281,8 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
247
281
  def dl_search(shared_state, start_time, request_from, search_string,
248
282
  mirror=None, season=None, episode=None):
249
283
  """
250
- Search with parallel pagination (max 5 pages) to find best quality releases.
251
- Requests are fired in parallel to minimize search time.
284
+ Search with sequential pagination (max 5 pages) to find best quality releases.
285
+ Stops searching if a page returns 0 results.
252
286
  """
253
287
  releases = []
254
288
  host = shared_state.values["config"]("Hostnames").get(hostname)
@@ -264,8 +298,8 @@ def dl_search(shared_state, start_time, request_from, search_string,
264
298
  search_string = unescape(search_string)
265
299
  max_pages = 5
266
300
 
267
- info(
268
- f"{hostname}: Starting parallel paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
301
+ debug(
302
+ f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
269
303
 
270
304
  try:
271
305
  sess = retrieve_and_validate_session(shared_state)
@@ -273,42 +307,36 @@ def dl_search(shared_state, start_time, request_from, search_string,
273
307
  info(f"Could not retrieve valid session for {host}")
274
308
  return releases
275
309
 
276
- # First, do page 1 to get the search ID
277
- page_1_releases, search_id = _search_single_page(
278
- shared_state, host, search_string, None, 1,
279
- imdb_id, mirror, request_from, season, episode
280
- )
281
- releases.extend(page_1_releases)
310
+ search_id = None
282
311
 
283
- if not search_id:
284
- info(f"{hostname}: Could not extract search ID, stopping pagination")
285
- return releases
312
+ # Sequential search through pages
313
+ for page_num in range(1, max_pages + 1):
314
+ page_releases, extracted_search_id = _search_single_page(
315
+ shared_state, host, search_string, search_id, page_num,
316
+ imdb_id, mirror, request_from, season, episode
317
+ )
318
+
319
+ # Update search_id from first page
320
+ if page_num == 1:
321
+ search_id = extracted_search_id
322
+ if not search_id:
323
+ info(f"{hostname}: Could not extract search ID, stopping pagination")
324
+ break
325
+
326
+ # Add releases from this page
327
+ releases.extend(page_releases)
328
+ debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
286
329
 
287
- # Now fire remaining pages in parallel
288
- with ThreadPoolExecutor(max_workers=4) as executor:
289
- futures = {}
290
- for page_num in range(2, max_pages + 1):
291
- future = executor.submit(
292
- _search_single_page,
293
- shared_state, host, search_string, search_id, page_num,
294
- imdb_id, mirror, request_from, season, episode
295
- )
296
- futures[future] = page_num
297
-
298
- for future in as_completed(futures):
299
- page_num = futures[future]
300
- try:
301
- page_releases, _ = future.result()
302
- releases.extend(page_releases)
303
- debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
304
- except Exception as e:
305
- info(f"{hostname}: [Page {page_num}] failed: {e}")
330
+ # Stop if this page returned 0 results
331
+ if len(page_releases) == 0:
332
+ debug(f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination")
333
+ break
306
334
 
307
335
  except Exception as e:
308
336
  info(f"{hostname}: search error: {e}")
309
337
  invalidate_session(shared_state)
310
338
 
311
- info(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
339
+ debug(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
312
340
 
313
341
  elapsed = time.time() - start_time
314
342
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
@@ -197,7 +197,7 @@ def save_hostnames(shared_state, timeout=5, first_run=True):
197
197
  if not first_run:
198
198
  # Append restart notice for specific sites that actually changed
199
199
  for site in changed_sites:
200
- if site.lower() in {'al', 'dd', 'nx'}:
200
+ if site.lower() in {'al', 'dd', 'dl', 'nx'}:
201
201
  optional_text += f"{site.upper()}: You must restart Quasarr and follow additional steps to start using this site.<br>"
202
202
 
203
203
  return render_success(success_msg, timeout, optional_text=optional_text)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.21.0
3
+ Version: 1.21.1
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
File without changes
File without changes
File without changes
File without changes
File without changes