quasarr 2.4.7__py3-none-any.whl → 2.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. quasarr/__init__.py +134 -70
  2. quasarr/api/__init__.py +40 -31
  3. quasarr/api/arr/__init__.py +116 -108
  4. quasarr/api/captcha/__init__.py +262 -137
  5. quasarr/api/config/__init__.py +76 -46
  6. quasarr/api/packages/__init__.py +138 -102
  7. quasarr/api/sponsors_helper/__init__.py +29 -16
  8. quasarr/api/statistics/__init__.py +19 -19
  9. quasarr/downloads/__init__.py +165 -72
  10. quasarr/downloads/linkcrypters/al.py +35 -18
  11. quasarr/downloads/linkcrypters/filecrypt.py +107 -52
  12. quasarr/downloads/linkcrypters/hide.py +5 -6
  13. quasarr/downloads/packages/__init__.py +342 -177
  14. quasarr/downloads/sources/al.py +191 -100
  15. quasarr/downloads/sources/by.py +31 -13
  16. quasarr/downloads/sources/dd.py +27 -14
  17. quasarr/downloads/sources/dj.py +1 -3
  18. quasarr/downloads/sources/dl.py +126 -71
  19. quasarr/downloads/sources/dt.py +11 -5
  20. quasarr/downloads/sources/dw.py +28 -14
  21. quasarr/downloads/sources/he.py +32 -24
  22. quasarr/downloads/sources/mb.py +19 -9
  23. quasarr/downloads/sources/nk.py +14 -10
  24. quasarr/downloads/sources/nx.py +8 -18
  25. quasarr/downloads/sources/sf.py +45 -20
  26. quasarr/downloads/sources/sj.py +1 -3
  27. quasarr/downloads/sources/sl.py +9 -5
  28. quasarr/downloads/sources/wd.py +32 -12
  29. quasarr/downloads/sources/wx.py +35 -21
  30. quasarr/providers/auth.py +42 -37
  31. quasarr/providers/cloudflare.py +28 -30
  32. quasarr/providers/hostname_issues.py +2 -1
  33. quasarr/providers/html_images.py +2 -2
  34. quasarr/providers/html_templates.py +22 -14
  35. quasarr/providers/imdb_metadata.py +149 -80
  36. quasarr/providers/jd_cache.py +131 -39
  37. quasarr/providers/log.py +1 -1
  38. quasarr/providers/myjd_api.py +260 -196
  39. quasarr/providers/notifications.py +53 -41
  40. quasarr/providers/obfuscated.py +9 -4
  41. quasarr/providers/sessions/al.py +71 -55
  42. quasarr/providers/sessions/dd.py +21 -14
  43. quasarr/providers/sessions/dl.py +30 -19
  44. quasarr/providers/sessions/nx.py +23 -14
  45. quasarr/providers/shared_state.py +292 -141
  46. quasarr/providers/statistics.py +75 -43
  47. quasarr/providers/utils.py +33 -27
  48. quasarr/providers/version.py +45 -14
  49. quasarr/providers/web_server.py +10 -5
  50. quasarr/search/__init__.py +30 -18
  51. quasarr/search/sources/al.py +124 -73
  52. quasarr/search/sources/by.py +110 -59
  53. quasarr/search/sources/dd.py +57 -35
  54. quasarr/search/sources/dj.py +69 -48
  55. quasarr/search/sources/dl.py +159 -100
  56. quasarr/search/sources/dt.py +110 -74
  57. quasarr/search/sources/dw.py +121 -61
  58. quasarr/search/sources/fx.py +108 -62
  59. quasarr/search/sources/he.py +78 -49
  60. quasarr/search/sources/mb.py +96 -48
  61. quasarr/search/sources/nk.py +80 -50
  62. quasarr/search/sources/nx.py +91 -62
  63. quasarr/search/sources/sf.py +171 -106
  64. quasarr/search/sources/sj.py +69 -48
  65. quasarr/search/sources/sl.py +115 -71
  66. quasarr/search/sources/wd.py +67 -44
  67. quasarr/search/sources/wx.py +188 -123
  68. quasarr/storage/config.py +65 -52
  69. quasarr/storage/setup.py +238 -140
  70. quasarr/storage/sqlite_database.py +10 -4
  71. {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
  72. quasarr-2.4.9.dist-info/RECORD +81 -0
  73. quasarr-2.4.7.dist-info/RECORD +0 -81
  74. {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
  75. {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
  76. {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
@@ -10,14 +10,15 @@ from base64 import urlsafe_b64encode
10
10
  from datetime import datetime
11
11
 
12
12
  import requests
13
- from bs4 import BeautifulSoup
14
- from bs4 import XMLParsedAsHTMLWarning
13
+ from bs4 import BeautifulSoup, XMLParsedAsHTMLWarning
15
14
 
16
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
17
16
  from quasarr.providers.imdb_metadata import get_localized_title
18
- from quasarr.providers.log import info, debug
17
+ from quasarr.providers.log import debug, info
19
18
 
20
- warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
19
+ warnings.filterwarnings(
20
+ "ignore", category=XMLParsedAsHTMLWarning
21
+ ) # we dont want to use lxml
21
22
 
22
23
  hostname = "wx"
23
24
  supported_mirrors = []
@@ -31,23 +32,25 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
31
32
  host = shared_state.values["config"]("Hostnames").get(hostname)
32
33
 
33
34
  if "lazylibrarian" in request_from.lower():
34
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
35
+ debug(
36
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
37
+ )
35
38
  return releases
36
39
 
37
- rss_url = f'https://{host}/rss'
40
+ rss_url = f"https://{host}/rss"
38
41
  headers = {
39
- 'User-Agent': shared_state.values["user_agent"],
42
+ "User-Agent": shared_state.values["user_agent"],
40
43
  }
41
44
 
42
45
  try:
43
46
  r = requests.get(rss_url, headers=headers, timeout=10)
44
47
  r.raise_for_status()
45
48
 
46
- soup = BeautifulSoup(r.content, 'html.parser')
47
- items = soup.find_all('entry')
49
+ soup = BeautifulSoup(r.content, "html.parser")
50
+ items = soup.find_all("entry")
48
51
 
49
52
  if not items:
50
- items = soup.find_all('item')
53
+ items = soup.find_all("item")
51
54
 
52
55
  if not items:
53
56
  info(f"{hostname.upper()}: No entries found in RSS feed")
@@ -57,7 +60,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
57
60
 
58
61
  for item in items:
59
62
  try:
60
- title_tag = item.find('title')
63
+ title_tag = item.find("title")
61
64
  if not title_tag:
62
65
  continue
63
66
 
@@ -66,14 +69,14 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
66
69
  continue
67
70
 
68
71
  title = html.unescape(title)
69
- title = title.replace(']]>', '').replace('<![CDATA[', '')
70
- title = title.replace(' ', '.')
72
+ title = title.replace("]]>", "").replace("<![CDATA[", "")
73
+ title = title.replace(" ", ".")
71
74
 
72
- link_tag = item.find('link', rel='alternate')
73
- if link_tag and link_tag.has_attr('href'):
74
- source = link_tag['href']
75
+ link_tag = item.find("link", rel="alternate")
76
+ if link_tag and link_tag.has_attr("href"):
77
+ source = link_tag["href"]
75
78
  else:
76
- link_tag = item.find('link')
79
+ link_tag = item.find("link")
77
80
  if not link_tag:
78
81
  continue
79
82
  source = link_tag.get_text(strip=True)
@@ -81,7 +84,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
81
84
  if not source:
82
85
  continue
83
86
 
84
- pub_date = item.find('updated') or item.find('pubDate')
87
+ pub_date = item.find("updated") or item.find("pubDate")
85
88
  if pub_date:
86
89
  published = pub_date.get_text(strip=True)
87
90
  else:
@@ -94,23 +97,27 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
94
97
  password = host.upper()
95
98
 
96
99
  payload = urlsafe_b64encode(
97
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode("utf-8")
100
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
101
+ "utf-8"
102
+ )
98
103
  ).decode("utf-8")
99
104
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
100
105
 
101
- releases.append({
102
- "details": {
103
- "title": title,
104
- "hostname": hostname,
105
- "imdb_id": imdb_id,
106
- "link": link,
107
- "mirror": mirror,
108
- "size": size,
109
- "date": published,
110
- "source": source
111
- },
112
- "type": "protected"
113
- })
106
+ releases.append(
107
+ {
108
+ "details": {
109
+ "title": title,
110
+ "hostname": hostname,
111
+ "imdb_id": imdb_id,
112
+ "link": link,
113
+ "mirror": mirror,
114
+ "size": size,
115
+ "date": published,
116
+ "source": source,
117
+ },
118
+ "type": "protected",
119
+ }
120
+ )
114
121
 
115
122
  except Exception as e:
116
123
  debug(f"{hostname.upper()}: error parsing RSS entry: {e}")
@@ -118,7 +125,9 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
118
125
 
119
126
  except Exception as e:
120
127
  info(f"Error loading {hostname.upper()} feed: {e}")
121
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
128
+ mark_hostname_issue(
129
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
130
+ )
122
131
  return releases
123
132
 
124
133
  elapsed_time = time.time() - start_time
@@ -129,7 +138,15 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
129
138
  return releases
130
139
 
131
140
 
132
- def wx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
141
+ def wx_search(
142
+ shared_state,
143
+ start_time,
144
+ request_from,
145
+ search_string,
146
+ mirror=None,
147
+ season=None,
148
+ episode=None,
149
+ ):
133
150
  """
134
151
  Search using internal API.
135
152
  Deduplicates results by fulltitle - each unique release appears only once.
@@ -138,48 +155,52 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
138
155
  host = shared_state.values["config"]("Hostnames").get(hostname)
139
156
 
140
157
  if "lazylibrarian" in request_from.lower():
141
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
158
+ debug(
159
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
160
+ )
142
161
  return releases
143
162
 
144
163
  imdb_id = shared_state.is_imdb_id(search_string)
145
164
  if imdb_id:
146
165
  debug(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
147
- title = get_localized_title(shared_state, imdb_id, 'de')
166
+ title = get_localized_title(shared_state, imdb_id, "de")
148
167
  if not title:
149
168
  debug(f"{hostname.upper()}: no title for IMDb {imdb_id}")
150
169
  return releases
151
- debug(f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'")
170
+ debug(
171
+ f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'"
172
+ )
152
173
  search_string = html.unescape(title)
153
174
  else:
154
175
  debug(f"{hostname.upper()}: Using search string directly: '{search_string}'")
155
176
 
156
- api_url = f'https://api.{host}/start/search'
177
+ api_url = f"https://api.{host}/start/search"
157
178
 
158
179
  headers = {
159
- 'User-Agent': shared_state.values["user_agent"],
160
- 'Accept': 'application/json, text/plain, */*',
161
- 'Referer': f'https://{host}/search'
180
+ "User-Agent": shared_state.values["user_agent"],
181
+ "Accept": "application/json, text/plain, */*",
182
+ "Referer": f"https://{host}/search",
162
183
  }
163
184
 
164
185
  params = {
165
- '__LOAD_P': '',
166
- 'per_page': 50,
167
- 'q': search_string,
168
- 'selectedTypes': '',
169
- 'selectedGenres': '',
170
- 'types': 'movie,series,anime',
171
- 'genres': '',
172
- 'years': '',
173
- 'ratings': '',
174
- 'page': 1,
175
- 'sortBy': 'latest',
176
- 'sortOrder': 'desc'
186
+ "__LOAD_P": "",
187
+ "per_page": 50,
188
+ "q": search_string,
189
+ "selectedTypes": "",
190
+ "selectedGenres": "",
191
+ "types": "movie,series,anime",
192
+ "genres": "",
193
+ "years": "",
194
+ "ratings": "",
195
+ "page": 1,
196
+ "sortBy": "latest",
197
+ "sortOrder": "desc",
177
198
  }
178
199
 
179
200
  if "sonarr" in request_from.lower():
180
- params['types'] = 'series,anime'
201
+ params["types"] = "series,anime"
181
202
  elif "radarr" in request_from.lower():
182
- params['types'] = 'movie'
203
+ params["types"] = "movie"
183
204
 
184
205
  debug(f"{hostname.upper()}: Searching: '{search_string}'")
185
206
 
@@ -189,12 +210,12 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
189
210
 
190
211
  data = r.json()
191
212
 
192
- if 'items' in data and 'data' in data['items']:
193
- items = data['items']['data']
194
- elif 'data' in data:
195
- items = data['data']
196
- elif 'results' in data:
197
- items = data['results']
213
+ if "items" in data and "data" in data["items"]:
214
+ items = data["items"]["data"]
215
+ elif "data" in data:
216
+ items = data["data"]
217
+ elif "results" in data:
218
+ items = data["results"]
198
219
  else:
199
220
  items = data if isinstance(data, list) else []
200
221
 
@@ -205,124 +226,164 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
205
226
 
206
227
  for item in items:
207
228
  try:
208
- uid = item.get('uid')
229
+ uid = item.get("uid")
209
230
  if not uid:
210
231
  debug(f"{hostname.upper()}: Item has no UID, skipping")
211
232
  continue
212
233
 
213
234
  debug(f"{hostname.upper()}: Fetching details for UID: {uid}")
214
235
 
215
- detail_url = f'https://api.{host}/start/d/{uid}'
236
+ detail_url = f"https://api.{host}/start/d/{uid}"
216
237
  detail_r = requests.get(detail_url, headers=headers, timeout=10)
217
238
  detail_r.raise_for_status()
218
239
 
219
240
  detail_data = detail_r.json()
220
241
 
221
- if 'item' in detail_data:
222
- detail_item = detail_data['item']
242
+ if "item" in detail_data:
243
+ detail_item = detail_data["item"]
223
244
  else:
224
245
  detail_item = detail_data
225
246
 
226
247
  item_imdb_id = imdb_id
227
248
  if not item_imdb_id:
228
- item_imdb_id = detail_item.get('imdb_id') or detail_item.get('imdbid')
229
- if not item_imdb_id and 'options' in detail_item:
230
- item_imdb_id = detail_item['options'].get('imdb_id')
249
+ item_imdb_id = detail_item.get("imdb_id") or detail_item.get(
250
+ "imdbid"
251
+ )
252
+ if not item_imdb_id and "options" in detail_item:
253
+ item_imdb_id = detail_item["options"].get("imdb_id")
231
254
 
232
255
  source = f"https://{host}/detail/{uid}"
233
256
 
234
- main_title = detail_item.get('fulltitle') or detail_item.get('title') or detail_item.get('name')
257
+ main_title = (
258
+ detail_item.get("fulltitle")
259
+ or detail_item.get("title")
260
+ or detail_item.get("name")
261
+ )
235
262
  if main_title:
236
263
  title = html.unescape(main_title)
237
- title = title.replace(' ', '.')
264
+ title = title.replace(" ", ".")
238
265
 
239
- if shared_state.is_valid_release(title, request_from, search_string, season, episode):
266
+ if shared_state.is_valid_release(
267
+ title, request_from, search_string, season, episode
268
+ ):
240
269
  # Skip if we've already seen this exact title
241
270
  if title in seen_titles:
242
- debug(f"{hostname.upper()}: Skipping duplicate main title: {title}")
271
+ debug(
272
+ f"{hostname.upper()}: Skipping duplicate main title: {title}"
273
+ )
243
274
  else:
244
275
  seen_titles.add(title)
245
- published = detail_item.get('updated_at') or detail_item.get('created_at')
276
+ published = detail_item.get(
277
+ "updated_at"
278
+ ) or detail_item.get("created_at")
246
279
  if not published:
247
- published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
280
+ published = datetime.now().strftime(
281
+ "%a, %d %b %Y %H:%M:%S +0000"
282
+ )
248
283
  password = f"www.{host}"
249
284
 
250
285
  payload = urlsafe_b64encode(
251
286
  f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}|{hostname}".encode(
252
- "utf-8")
287
+ "utf-8"
288
+ )
253
289
  ).decode("utf-8")
254
290
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
255
291
 
256
- releases.append({
257
- "details": {
258
- "title": title,
259
- "hostname": hostname,
260
- "imdb_id": item_imdb_id,
261
- "link": link,
262
- "mirror": mirror,
263
- "size": 0,
264
- "date": published,
265
- "source": source
266
- },
267
- "type": "protected"
268
- })
269
-
270
- if 'releases' in detail_item and isinstance(detail_item['releases'], list):
271
- debug(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
272
-
273
- for release in detail_item['releases']:
292
+ releases.append(
293
+ {
294
+ "details": {
295
+ "title": title,
296
+ "hostname": hostname,
297
+ "imdb_id": item_imdb_id,
298
+ "link": link,
299
+ "mirror": mirror,
300
+ "size": 0,
301
+ "date": published,
302
+ "source": source,
303
+ },
304
+ "type": "protected",
305
+ }
306
+ )
307
+
308
+ if "releases" in detail_item and isinstance(
309
+ detail_item["releases"], list
310
+ ):
311
+ debug(
312
+ f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}"
313
+ )
314
+
315
+ for release in detail_item["releases"]:
274
316
  try:
275
- release_title = release.get('fulltitle')
317
+ release_title = release.get("fulltitle")
276
318
  if not release_title:
277
319
  continue
278
320
 
279
321
  release_title = html.unescape(release_title)
280
- release_title = release_title.replace(' ', '.')
281
-
282
- if not shared_state.is_valid_release(release_title, request_from, search_string, season,
283
- episode):
284
- debug(f"{hostname.upper()}: ✗ Release filtered out: {release_title}")
322
+ release_title = release_title.replace(" ", ".")
323
+
324
+ if not shared_state.is_valid_release(
325
+ release_title,
326
+ request_from,
327
+ search_string,
328
+ season,
329
+ episode,
330
+ ):
331
+ debug(
332
+ f"{hostname.upper()}: ✗ Release filtered out: {release_title}"
333
+ )
285
334
  continue
286
335
 
287
336
  # Skip if we've already seen this exact title (deduplication)
288
337
  if release_title in seen_titles:
289
- debug(f"{hostname.upper()}: Skipping duplicate release: {release_title}")
338
+ debug(
339
+ f"{hostname.upper()}: Skipping duplicate release: {release_title}"
340
+ )
290
341
  continue
291
342
 
292
343
  seen_titles.add(release_title)
293
344
 
294
- release_uid = release.get('uid')
345
+ release_uid = release.get("uid")
295
346
  if release_uid:
296
- release_source = f"https://{host}/detail/{uid}?release={release_uid}"
347
+ release_source = (
348
+ f"https://{host}/detail/{uid}?release={release_uid}"
349
+ )
297
350
  else:
298
351
  release_source = source
299
352
 
300
- release_published = release.get('updated_at') or release.get(
301
- 'created_at') or detail_item.get('updated_at')
353
+ release_published = (
354
+ release.get("updated_at")
355
+ or release.get("created_at")
356
+ or detail_item.get("updated_at")
357
+ )
302
358
  if not release_published:
303
- release_published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
304
- release_size = release.get('size', 0)
359
+ release_published = datetime.now().strftime(
360
+ "%a, %d %b %Y %H:%M:%S +0000"
361
+ )
362
+ release_size = release.get("size", 0)
305
363
  password = f"www.{host}"
306
364
 
307
365
  payload = urlsafe_b64encode(
308
366
  f"{release_title}|{release_source}|{mirror}|{release_size}|{password}|{item_imdb_id or ''}|{hostname}".encode(
309
- "utf-8")
367
+ "utf-8"
368
+ )
310
369
  ).decode("utf-8")
311
370
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
312
371
 
313
- releases.append({
314
- "details": {
315
- "title": release_title,
316
- "hostname": hostname,
317
- "imdb_id": item_imdb_id,
318
- "link": link,
319
- "mirror": mirror,
320
- "size": release_size,
321
- "date": release_published,
322
- "source": release_source
323
- },
324
- "type": "protected"
325
- })
372
+ releases.append(
373
+ {
374
+ "details": {
375
+ "title": release_title,
376
+ "hostname": hostname,
377
+ "imdb_id": item_imdb_id,
378
+ "link": link,
379
+ "mirror": mirror,
380
+ "size": release_size,
381
+ "date": release_published,
382
+ "source": release_source,
383
+ },
384
+ "type": "protected",
385
+ }
386
+ )
326
387
 
327
388
  except Exception as e:
328
389
  debug(f"{hostname.upper()}: Error parsing release: {e}")
@@ -335,11 +396,15 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
335
396
  debug(f"{hostname.upper()}: {traceback.format_exc()}")
336
397
  continue
337
398
 
338
- debug(f"{hostname.upper()}: Returning {len(releases)} total releases (deduplicated)")
399
+ debug(
400
+ f"{hostname.upper()}: Returning {len(releases)} total releases (deduplicated)"
401
+ )
339
402
 
340
403
  except Exception as e:
341
404
  info(f"Error in {hostname.upper()} search: {e}")
342
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
405
+ mark_hostname_issue(
406
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
407
+ )
343
408
 
344
409
  debug(f"{hostname.upper()}: {traceback.format_exc()}")
345
410
  return releases