quasarr 2.7.0__py3-none-any.whl → 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,7 +2,6 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
- import time
6
5
  import traceback
7
6
  import xml.sax.saxutils as sax_utils
8
7
  from base64 import urlsafe_b64decode
@@ -16,7 +15,7 @@ from quasarr.downloads import download
16
15
  from quasarr.downloads.packages import delete_package, get_packages
17
16
  from quasarr.providers import shared_state
18
17
  from quasarr.providers.auth import require_api_key
19
- from quasarr.providers.log import debug, info
18
+ from quasarr.providers.log import debug, error, info, warn
20
19
  from quasarr.providers.version import get_version
21
20
  from quasarr.search import get_search_results
22
21
 
@@ -304,38 +303,29 @@ def setup_arr_routes(app):
304
303
  </caps>"""
305
304
  elif mode in ["movie", "tvsearch", "book", "search"]:
306
305
  releases = []
307
- cache_key = None
308
306
 
309
307
  try:
310
- offset = int(getattr(request.query, "offset", 0))
308
+ offset = int(getattr(request.query, "offset", 0) or 0)
311
309
  except (AttributeError, ValueError) as e:
312
310
  debug(f"Error parsing offset parameter: {e}")
313
311
  offset = 0
314
312
 
315
313
  try:
316
- limit = int(getattr(request.query, "limit", 100))
314
+ limit = int(getattr(request.query, "limit", 9999) or 9999)
317
315
  except (AttributeError, ValueError) as e:
318
316
  debug(f"Error parsing limit parameter: {e}")
319
- limit = 100
317
+ limit = 1000
320
318
 
321
319
  if mode == "movie":
322
320
  # supported params: imdbid
323
321
  imdb_id = getattr(request.query, "imdbid", "")
324
-
325
- if imdb_id != "":
326
- cache_key = f"{request_from}::{imdb_id}::${mirror}"
327
-
328
- if result := results_cache.get(cache_key, offset, limit):
329
- debug(
330
- f"Returning offset {offset}, limit {limit} for {cache_key}"
331
- )
332
- return result
333
-
334
322
  releases = get_search_results(
335
323
  shared_state,
336
324
  request_from,
337
325
  imdb_id=imdb_id,
338
326
  mirror=mirror,
327
+ offset=offset,
328
+ limit=limit,
339
329
  )
340
330
 
341
331
  elif mode == "tvsearch":
@@ -343,16 +333,6 @@ def setup_arr_routes(app):
343
333
  imdb_id = getattr(request.query, "imdbid", "")
344
334
  season = getattr(request.query, "season", None)
345
335
  episode = getattr(request.query, "ep", None)
346
-
347
- if imdb_id != "":
348
- cache_key = f"{request_from}::{imdb_id}::${mirror}::{season}::{episode}"
349
-
350
- if result := results_cache.get(cache_key, offset, limit):
351
- debug(
352
- f"Returning offset {offset}, limit {limit} for {cache_key}"
353
- )
354
- return result
355
-
356
336
  releases = get_search_results(
357
337
  shared_state,
358
338
  request_from,
@@ -360,7 +340,10 @@ def setup_arr_routes(app):
360
340
  mirror=mirror,
361
341
  season=season,
362
342
  episode=episode,
343
+ offset=offset,
344
+ limit=limit,
363
345
  )
346
+
364
347
  elif mode == "book":
365
348
  author = getattr(request.query, "author", "")
366
349
  title = getattr(request.query, "title", "")
@@ -370,6 +353,8 @@ def setup_arr_routes(app):
370
353
  request_from,
371
354
  search_phrase=search_phrase,
372
355
  mirror=mirror,
356
+ offset=offset,
357
+ limit=limit,
373
358
  )
374
359
 
375
360
  elif mode == "search":
@@ -380,6 +365,8 @@ def setup_arr_routes(app):
380
365
  request_from,
381
366
  search_phrase=search_phrase,
382
367
  mirror=mirror,
368
+ offset=offset,
369
+ limit=limit,
383
370
  )
384
371
  else:
385
372
  # sonarr expects this but we will not support non-imdbid searches
@@ -387,9 +374,8 @@ def setup_arr_routes(app):
387
374
  f"Ignoring search request from {request_from} - only imdbid searches are supported"
388
375
  )
389
376
 
377
+ # XML Generation (releases are already sliced)
390
378
  items = ""
391
- items_amount = 0
392
- items_processed = 0
393
379
  for release in releases:
394
380
  release = release.get("details", {})
395
381
 
@@ -412,43 +398,6 @@ def setup_arr_routes(app):
412
398
  <pubDate>{pub_date}</pubDate>
413
399
  <enclosure url="{release.get("link", "")}" length="{release.get("size", 0)}" type="application/x-nzb" />
414
400
  </item>'''
415
- items_amount += 1
416
-
417
- if cache_key and items_amount == limit:
418
- items_processed += items_amount
419
- debug(
420
- f"Processed {items_processed}/{len(releases)} releases"
421
- )
422
- results_cache.set(
423
- cache_key,
424
- f"""<?xml version="1.0" encoding="UTF-8"?>
425
- <rss>
426
- <channel>
427
- {items}
428
- </channel>
429
- </rss>""",
430
- items_processed - items_amount,
431
- limit,
432
- )
433
- items = ""
434
- items_amount = 0
435
-
436
- if cache_key and items_amount > 0:
437
- items_processed += items_amount
438
- debug(f"Processed {items_processed}/{len(releases)} releases")
439
- results_cache.set(
440
- cache_key,
441
- f"""<?xml version="1.0" encoding="UTF-8"?>
442
- <rss>
443
- <channel>
444
- {items}
445
- </channel>
446
- </rss>""",
447
- items_processed - items_amount,
448
- limit,
449
- )
450
- items = ""
451
- items_amount = 0
452
401
 
453
402
  requires_placeholder_item = not getattr(
454
403
  request.query, "imdbid", ""
@@ -464,13 +413,6 @@ def setup_arr_routes(app):
464
413
  <enclosure url="https://github.com/rix1337/Quasarr" length="0" type="application/x-nzb" />
465
414
  </item>"""
466
415
 
467
- if cache_key:
468
- if result := results_cache.get(cache_key, offset, limit):
469
- debug(
470
- f"Returning offset {offset}, limit {limit} for {cache_key}"
471
- )
472
- return result
473
-
474
416
  return f"""<?xml version="1.0" encoding="UTF-8"?>
475
417
  <rss>
476
418
  <channel>
@@ -478,9 +420,8 @@ def setup_arr_routes(app):
478
420
  </channel>
479
421
  </rss>"""
480
422
  except Exception as e:
481
- info(f"Error loading search results: {e}")
482
- info(traceback.format_exc())
483
- info(f"[ERROR] Unknown indexer request: {dict(request.query)}")
423
+ error(f"Error loading search results: {e} " + traceback.format_exc())
424
+ warn(f"Unknown indexer request: {dict(request.query)}")
484
425
  return """<?xml version="1.0" encoding="UTF-8"?>
485
426
  <rss>
486
427
  <channel>
@@ -490,41 +431,5 @@ def setup_arr_routes(app):
490
431
  </channel>
491
432
  </rss>"""
492
433
 
493
- info(f"[ERROR] Unknown general request: {dict(request.query)}")
434
+ warn(f"[ERROR] Unknown general request: {dict(request.query)}")
494
435
  return {"error": True}
495
-
496
-
497
- class ResultsCache:
498
- def __init__(self):
499
- self.last_cleaned = time.time()
500
- self.cache = {}
501
-
502
- def clean(self, now):
503
- if now - self.last_cleaned < 60:
504
- return
505
-
506
- keys_to_delete = [
507
- key for key, (_, expiry) in self.cache.items() if now >= expiry
508
- ]
509
-
510
- for key in keys_to_delete:
511
- del self.cache[key]
512
-
513
- self.last_cleaned = now
514
-
515
- def get(self, key, offset, limit):
516
- key = key + f"::{offset}::{limit}"
517
- value, expiry = self.cache.get(key, (None, 0))
518
- if time.time() < expiry:
519
- return value
520
-
521
- return None
522
-
523
- def set(self, key, value, offset, limit, ttl=300):
524
- now = time.time()
525
- key = key + f"::{offset}::{limit}"
526
- self.cache[key] = (value, now + ttl)
527
- self.clean(now)
528
-
529
-
530
- results_cache = ResultsCache()
@@ -254,7 +254,7 @@ def solve_captcha(
254
254
  method="POST",
255
255
  target_url=captcha_base,
256
256
  post_data={"cID": 0, "pC": identified_captcha_image, "rT": 2},
257
- timeout=60,
257
+ timeout=30,
258
258
  )
259
259
 
260
260
  return {"response": result["text"], "captcha_id": identified_captcha_image}
@@ -169,7 +169,7 @@ class FlareSolverrResponse:
169
169
  raise requests.HTTPError(f"{self.status_code} Error at {self.url}")
170
170
 
171
171
 
172
- def flaresolverr_get(shared_state, url, timeout=60, session_id=None):
172
+ def flaresolverr_get(shared_state, url, timeout=30, session_id=None):
173
173
  """
174
174
  Core function for performing a GET request via FlareSolverr only.
175
175
  Used internally by FlareSolverrSession.get()
@@ -236,7 +236,7 @@ class IMDbFlareSolverr:
236
236
  flaresolverr_url,
237
237
  json=post_data,
238
238
  headers={"Content-Type": "application/json"},
239
- timeout=60,
239
+ timeout=30,
240
240
  )
241
241
  if response.status_code == 200:
242
242
  json_response = response.json()
@@ -118,7 +118,7 @@ def extract_valid_hostname(url, shorthand):
118
118
  message = f"Error: {e}. Please provide a valid URL."
119
119
  domain = None
120
120
 
121
- print(message)
121
+ debug(message)
122
122
  return {"domain": domain, "message": message}
123
123
 
124
124
 
@@ -5,7 +5,7 @@
5
5
  import re
6
6
  import sys
7
7
 
8
- __version__ = "2.7.0"
8
+ __version__ = "2.7.2"
9
9
 
10
10
 
11
11
  def get_version():
@@ -4,9 +4,11 @@
4
4
 
5
5
  import time
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ from datetime import timezone
8
+ from email.utils import parsedate_to_datetime
7
9
 
8
10
  from quasarr.providers.imdb_metadata import get_imdb_metadata
9
- from quasarr.providers.log import debug, info
11
+ from quasarr.providers.log import debug, info, trace
10
12
  from quasarr.search.sources.al import al_feed, al_search
11
13
  from quasarr.search.sources.by import by_feed, by_search
12
14
  from quasarr.search.sources.dd import dd_feed, dd_search
@@ -35,6 +37,8 @@ def get_search_results(
35
37
  mirror=None,
36
38
  season="",
37
39
  episode="",
40
+ offset=0,
41
+ limit=1000,
38
42
  ):
39
43
  if imdb_id and not imdb_id.startswith("tt"):
40
44
  imdb_id = f"tt{imdb_id}"
@@ -70,54 +74,54 @@ def get_search_results(
70
74
 
71
75
  # Mappings
72
76
  imdb_map = [
73
- (al, al_search),
74
- (by, by_search),
75
- (dd, dd_search),
76
- (dl, dl_search),
77
- (dt, dt_search),
78
- (dj, dj_search),
79
- (dw, dw_search),
80
- (fx, fx_search),
81
- (he, he_search),
82
- (hs, hs_search),
83
- (mb, mb_search),
84
- (nk, nk_search),
85
- (nx, nx_search),
86
- (sf, sf_search),
87
- (sj, sj_search),
88
- (sl, sl_search),
89
- (wd, wd_search),
90
- (wx, wx_search),
77
+ ("al", al, al_search),
78
+ ("by", by, by_search),
79
+ ("dd", dd, dd_search),
80
+ ("dl", dl, dl_search),
81
+ ("dt", dt, dt_search),
82
+ ("dj", dj, dj_search),
83
+ ("dw", dw, dw_search),
84
+ ("fx", fx, fx_search),
85
+ ("he", he, he_search),
86
+ ("hs", hs, hs_search),
87
+ ("mb", mb, mb_search),
88
+ ("nk", nk, nk_search),
89
+ ("nx", nx, nx_search),
90
+ ("sf", sf, sf_search),
91
+ ("sj", sj, sj_search),
92
+ ("sl", sl, sl_search),
93
+ ("wd", wd, wd_search),
94
+ ("wx", wx, wx_search),
91
95
  ]
92
96
 
93
97
  phrase_map = [
94
- (by, by_search),
95
- (dl, dl_search),
96
- (dt, dt_search),
97
- (nx, nx_search),
98
- (sl, sl_search),
99
- (wd, wd_search),
98
+ ("by", by, by_search),
99
+ ("dl", dl, dl_search),
100
+ ("dt", dt, dt_search),
101
+ ("nx", nx, nx_search),
102
+ ("sl", sl, sl_search),
103
+ ("wd", wd, wd_search),
100
104
  ]
101
105
 
102
106
  feed_map = [
103
- (al, al_feed),
104
- (by, by_feed),
105
- (dd, dd_feed),
106
- (dj, dj_feed),
107
- (dl, dl_feed),
108
- (dt, dt_feed),
109
- (dw, dw_feed),
110
- (fx, fx_feed),
111
- (he, he_feed),
112
- (hs, hs_feed),
113
- (mb, mb_feed),
114
- (nk, nk_feed),
115
- (nx, nx_feed),
116
- (sf, sf_feed),
117
- (sj, sj_feed),
118
- (sl, sl_feed),
119
- (wd, wd_feed),
120
- (wx, wx_feed),
107
+ ("al", al, al_feed),
108
+ ("by", by, by_feed),
109
+ ("dd", dd, dd_feed),
110
+ ("dj", dj, dj_feed),
111
+ ("dl", dl, dl_feed),
112
+ ("dt", dt, dt_feed),
113
+ ("dw", dw, dw_feed),
114
+ ("fx", fx, fx_feed),
115
+ ("he", he, he_feed),
116
+ ("hs", hs, hs_feed),
117
+ ("mb", mb, mb_feed),
118
+ ("nk", nk, nk_feed),
119
+ ("nx", nx, nx_feed),
120
+ ("sf", sf, sf_feed),
121
+ ("sj", sj, sj_feed),
122
+ ("sl", sl, sl_feed),
123
+ ("wd", wd, wd_feed),
124
+ ("wx", wx, wx_feed),
121
125
  ]
122
126
 
123
127
  # Add searches
@@ -126,119 +130,166 @@ def get_search_results(
126
130
  (shared_state, start_time, request_from, imdb_id),
127
131
  {"mirror": mirror, "season": season, "episode": episode},
128
132
  )
129
- for flag, func in imdb_map:
130
- if flag:
131
- search_executor.add(func, args, kwargs, True)
133
+ for name, url, func in imdb_map:
134
+ if url:
135
+ search_executor.add(func, args, kwargs, True, name.upper())
132
136
 
133
137
  elif search_phrase and docs_search:
134
138
  args, kwargs = (
135
139
  (shared_state, start_time, request_from, search_phrase),
136
140
  {"mirror": mirror, "season": season, "episode": episode},
137
141
  )
138
- for flag, func in phrase_map:
139
- if flag:
140
- search_executor.add(func, args, kwargs)
142
+ for name, url, func in phrase_map:
143
+ if url:
144
+ search_executor.add(func, args, kwargs, source_name=name.upper())
141
145
 
142
146
  elif search_phrase:
143
147
  debug(f"Search phrase '{search_phrase}' is not supported for {request_from}.")
144
148
 
145
149
  else:
146
150
  args, kwargs = ((shared_state, start_time, request_from), {"mirror": mirror})
147
- for flag, func in feed_map:
148
- if flag:
149
- search_executor.add(func, args, kwargs)
151
+ for name, url, func in feed_map:
152
+ if url:
153
+ search_executor.add(func, args, kwargs, source_name=name.upper())
150
154
 
151
155
  # Clean description for Console UI
152
156
  if imdb_id:
153
- desc_text = f"Searching for IMDb-ID {imdb_id}"
154
157
  stype = f"IMDb-ID <b>{imdb_id}</b>"
155
158
  elif search_phrase:
156
- desc_text = f"Searching for '{search_phrase}'"
157
159
  stype = f"Search-Phrase <b>{search_phrase}</b>"
158
160
  else:
159
- desc_text = "Running Feed Search"
160
161
  stype = "<b>feed</b> search"
161
162
 
162
163
  debug(f"Starting <g>{len(search_executor.searches)}</g> searches for {stype}...")
163
164
 
164
- results = search_executor.run_all(desc_text)
165
+ # Unpack the new return values (all_cached, min_ttl)
166
+ results, status_bar, all_cached, min_ttl = search_executor.run_all()
165
167
 
166
168
  elapsed_time = time.time() - start_time
169
+
170
+ # Sort results by date (newest first)
171
+ def get_date(item):
172
+ try:
173
+ dt = parsedate_to_datetime(item.get("details", {}).get("date", ""))
174
+ if dt.tzinfo is None:
175
+ dt = dt.replace(tzinfo=timezone.utc)
176
+ return dt
177
+ except Exception:
178
+ return parsedate_to_datetime("Thu, 01 Jan 1970 00:00:00 +0000")
179
+
180
+ results.sort(key=get_date, reverse=True)
181
+
182
+ # Calculate pagination for logging and return
183
+ total_count = len(results)
184
+
185
+ # Slicing
186
+ sliced_results = results[offset : offset + limit]
187
+
188
+ if sliced_results:
189
+ trace(f"First {len(sliced_results)} results sorted by date:")
190
+ for i, res in enumerate(sliced_results):
191
+ details = res.get("details", {})
192
+ trace(f"{i + 1}. {details.get('date')} | {details.get('title')}")
193
+
194
+ # Formatting for log (1-based index for humans)
195
+ log_start = min(offset + 1, total_count) if total_count > 0 else 0
196
+ log_end = min(offset + limit, total_count)
197
+
198
+ # Logic to switch between "Time taken" and "from cache"
199
+ if all_cached:
200
+ time_info = f"from cache ({int(min_ttl)}s left)"
201
+ else:
202
+ time_info = f"Time taken: {elapsed_time:.2f} seconds"
203
+
167
204
  info(
168
- f"Providing <g>{len(results)} releases</g> to <d>{request_from}</d> for {stype}. <blue>Time taken: {elapsed_time:.2f} seconds</blue>"
205
+ f"Providing releases <g>{log_start}-{log_end}</g> of <g>{total_count}</g> to <d>{request_from}</d> "
206
+ f"for {stype}{status_bar} <blue>{time_info}</blue>"
169
207
  )
170
208
 
171
- return results
209
+ return sliced_results
172
210
 
173
211
 
174
212
  class SearchExecutor:
175
213
  def __init__(self):
176
214
  self.searches = []
177
215
 
178
- def add(self, func, args, kwargs, use_cache=False):
216
+ def add(self, func, args, kwargs, use_cache=False, source_name=None):
179
217
  key_args = list(args)
180
218
  key_args[1] = None
181
219
  key_args = tuple(key_args)
182
220
  key = hash((func.__name__, key_args, frozenset(kwargs.items())))
183
- self.searches.append((key, lambda: func(*args, **kwargs), use_cache))
221
+ self.searches.append(
222
+ (
223
+ key,
224
+ lambda: func(*args, **kwargs),
225
+ use_cache,
226
+ source_name or func.__name__,
227
+ )
228
+ )
184
229
 
185
- def run_all(self, description):
230
+ def run_all(self):
186
231
  results = []
187
232
  future_to_meta = {}
188
233
 
234
+ # Track cache state
235
+ all_cached = len(self.searches) > 0
236
+ min_ttl = float("inf")
237
+ bar_str = "" # Initialize to prevent UnboundLocalError on full cache
238
+
189
239
  with ThreadPoolExecutor() as executor:
190
240
  current_index = 0
191
241
  pending_futures = []
192
- cache_used = False
193
242
 
194
- for key, func, use_cache in self.searches:
243
+ for key, func, use_cache, source_name in self.searches:
195
244
  cached_result = None
245
+ exp = 0
246
+
196
247
  if use_cache:
197
- cached_result = search_cache.get(key)
248
+ # Get both result and expiry
249
+ cached_result, exp = search_cache.get(key)
198
250
 
199
251
  if cached_result is not None:
200
252
  debug(f"Using cached result for {key}")
201
- cache_used = True
202
253
  results.extend(cached_result)
254
+
255
+ # Calculate TTL for this cached item
256
+ ttl = exp - time.time()
257
+ if ttl < min_ttl:
258
+ min_ttl = ttl
203
259
  else:
260
+ all_cached = False
204
261
  future = executor.submit(func)
205
262
  cache_key = key if use_cache else None
206
- future_to_meta[future] = (current_index, cache_key)
263
+ future_to_meta[future] = (current_index, cache_key, source_name)
207
264
  pending_futures.append(future)
208
265
  current_index += 1
209
266
 
210
- # Prepare list to track status of each provider
211
- # Icons will be filled in as threads complete
212
- total_active = len(pending_futures)
213
- icons = ["▪️"] * total_active
214
-
215
- for future in as_completed(pending_futures):
216
- index, cache_key = future_to_meta[future]
217
- try:
218
- res = future.result()
219
- if res and len(res) > 0:
220
- status = "✅"
221
- else:
222
- status = "⚪"
223
-
224
- icons[index] = status
267
+ if pending_futures:
268
+ results_badges = [""] * len(pending_futures)
225
269
 
226
- results.extend(res)
227
- if cache_key:
228
- search_cache.set(cache_key, res)
229
- except Exception as e:
230
- icons[index] = "❌"
231
- info(f"Search error: {e}")
270
+ for future in as_completed(pending_futures):
271
+ index, cache_key, source_name = future_to_meta[future]
272
+ try:
273
+ res = future.result()
274
+ if res and len(res) > 0:
275
+ badge = f"<bg green><black>{source_name}</black></bg green>"
276
+ else:
277
+ debug(f"❌ No results returned by {source_name}")
278
+ badge = f"<bg black><white>{source_name}</white></bg black>"
232
279
 
233
- # Log the final status summary if any searches were performed
234
- if total_active > 0:
235
- bar_str = "".join(icons)
236
- info(f"{description} [{bar_str}]")
280
+ results_badges[index] = badge
281
+ results.extend(res)
282
+ if cache_key:
283
+ search_cache.set(cache_key, res)
284
+ except Exception as e:
285
+ results_badges[index] = (
286
+ f"<bg red><white>{source_name}</white></bg red>"
287
+ )
288
+ info(f"Search error: {e}")
237
289
 
238
- if cache_used:
239
- info("Presenting cached results for some items.")
290
+ bar_str = f" [{' '.join(results_badges)}]"
240
291
 
241
- return results
292
+ return results, bar_str, all_cached, min_ttl
242
293
 
243
294
 
244
295
  class SearchCache:
@@ -256,7 +307,8 @@ class SearchCache:
256
307
 
257
308
  def get(self, key):
258
309
  val, exp = self.cache.get(key, (None, 0))
259
- return val if time.time() < exp else None
310
+ # Return tuple (value, expiry) if valid, else (None, 0)
311
+ return (val, exp) if time.time() < exp else (None, 0)
260
312
 
261
313
  def set(self, key, value, ttl=300):
262
314
  now = time.time()
@@ -37,70 +37,73 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
37
37
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
38
38
  password = sj_host
39
39
 
40
- url = f"https://{sj_host}/api/releases/latest/0"
41
40
  headers = {"User-Agent": shared_state.values["user_agent"]}
42
41
 
43
- try:
44
- r = requests.get(url, headers=headers, timeout=30)
45
- r.raise_for_status()
46
- data = json.loads(r.content)
47
- except Exception as e:
48
- error(f"Feed load error: {e}")
49
- mark_hostname_issue(
50
- hostname, "feed", str(e) if "e" in dir() else "Error occurred"
51
- )
52
- return releases
42
+ for days in range(4):
43
+ url = f"https://{sj_host}/api/releases/latest/{days}"
53
44
 
54
- for release in data:
55
45
  try:
56
- title = release.get("name").rstrip(".")
57
- if not title:
58
- continue
46
+ r = requests.get(url, headers=headers, timeout=30)
47
+ r.raise_for_status()
48
+ data = json.loads(r.content)
49
+ except Exception as e:
50
+ error(f"Feed load error: {e}")
51
+ mark_hostname_issue(
52
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
+ )
54
+ return releases
59
55
 
60
- published = convert_to_rss_date(release.get("createdAt"))
61
- if not published:
62
- continue
56
+ for release in data:
57
+ try:
58
+ title = release.get("name").rstrip(".")
59
+ if not title:
60
+ continue
63
61
 
64
- media = release.get("_media", {})
65
- slug = media.get("slug")
66
- if not slug:
67
- continue
62
+ published = convert_to_rss_date(release.get("createdAt"))
63
+ if not published:
64
+ continue
68
65
 
69
- series_url = f"https://{sj_host}/serie/{slug}"
66
+ media = release.get("_media", {})
67
+ slug = media.get("slug")
68
+ if not slug:
69
+ continue
70
70
 
71
- mb = 0
72
- size = 0
73
- imdb_id = None
71
+ series_url = f"https://{sj_host}/serie/{slug}"
74
72
 
75
- payload = urlsafe_b64encode(
76
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
77
- "utf-8"
78
- )
79
- ).decode("utf-8")
73
+ mb = 0
74
+ size = 0
75
+ imdb_id = None
80
76
 
81
- link = (
82
- f"{shared_state.values['internal_address']}/download/?payload={payload}"
83
- )
77
+ payload = urlsafe_b64encode(
78
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
79
+ "utf-8"
80
+ )
81
+ ).decode("utf-8")
82
+
83
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
84
+
85
+ releases.append(
86
+ {
87
+ "details": {
88
+ "title": title,
89
+ "hostname": hostname,
90
+ "imdb_id": imdb_id,
91
+ "link": link,
92
+ "mirror": mirror,
93
+ "size": size,
94
+ "date": published,
95
+ "source": series_url,
96
+ },
97
+ "type": "protected",
98
+ }
99
+ )
84
100
 
85
- releases.append(
86
- {
87
- "details": {
88
- "title": title,
89
- "hostname": hostname,
90
- "imdb_id": imdb_id,
91
- "link": link,
92
- "mirror": mirror,
93
- "size": size,
94
- "date": published,
95
- "source": series_url,
96
- },
97
- "type": "protected",
98
- }
99
- )
101
+ except Exception as e:
102
+ warn(f"Feed parse error: {e}")
103
+ continue
100
104
 
101
- except Exception as e:
102
- warn(f"Feed parse error: {e}")
103
- continue
105
+ if releases:
106
+ break
104
107
 
105
108
  debug(f"Time taken: {time.time() - start_time:.2f}s")
106
109
 
@@ -12,7 +12,7 @@ import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
- from quasarr.providers.log import debug, warn
15
+ from quasarr.providers.log import debug, error, warn
16
16
 
17
17
  hostname = "mb"
18
18
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -104,21 +104,9 @@ def _parse_posts(
104
104
  if " " in title:
105
105
  continue
106
106
 
107
- # can't check for mirrors in search context
108
- if mirror_filter and mirror_filter not in supported_mirrors:
109
- continue
110
- else:
111
- mirror_candidates = []
112
- for strong in post.find_all(
113
- "strong", string=re.compile(r"^Download", re.I)
114
- ):
115
- link_tag = strong.find_next_sibling("a")
116
- if link_tag and link_tag.get_text(strip=True):
117
- host = link_tag.get_text(strip=True).split(".")[0].lower()
118
- mirror_candidates.append(host)
119
- valid = [m for m in mirror_candidates if m in supported_mirrors]
120
- if not valid or (mirror_filter and mirror_filter not in valid):
121
- continue
107
+ # can't check for mirrors in soup, so we use the hardcoded list
108
+ if mirror_filter and mirror_filter not in supported_mirrors:
109
+ continue
122
110
 
123
111
  # extract IMDb ID
124
112
  imdb_id = None
@@ -128,9 +116,18 @@ def _parse_posts(
128
116
  imdb_id = m.group(1)
129
117
  break
130
118
 
119
+ if not imdb_id:
120
+ m = IMDB_REGEX.search(post.get_text())
121
+ if m:
122
+ imdb_id = m.group(1)
123
+
131
124
  # size extraction
132
125
  mb = size_bytes = 0
133
- size_match = re.search(r"Größe:\s*([\d\.]+)\s*([GMK]B)", post.get_text())
126
+ size_match = re.search(
127
+ r"(?:Größe|Size).*?:\s*([\d\.]+)\s*([GMK]B)",
128
+ post.get_text(),
129
+ re.IGNORECASE,
130
+ )
134
131
  if size_match:
135
132
  sz = {"size": size_match.group(1), "sizeunit": size_match.group(2)}
136
133
  mb = shared_state.convert_to_mb(sz)
@@ -159,7 +156,7 @@ def _parse_posts(
159
156
  }
160
157
  )
161
158
  except Exception as e:
162
- debug(f"Error parsing {hostname.upper()} post: {e}")
159
+ error(f"Error parsing {hostname.upper()} post: {e}")
163
160
  continue
164
161
  return releases
165
162
 
@@ -316,7 +316,7 @@ def sf_search(
316
316
  # fetch API HTML
317
317
  epoch = str(datetime.now().timestamp()).replace(".", "")[:-3]
318
318
  api_url = f"https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}"
319
- debug(f"Requesting SF API URL: {api_url}")
319
+ trace(f"Requesting SF API URL: {api_url}")
320
320
  try:
321
321
  r = requests.get(api_url, headers=headers, timeout=10)
322
322
  r.raise_for_status()
@@ -39,70 +39,73 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
39
39
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
40
40
  password = sj_host
41
41
 
42
- url = f"https://{sj_host}/api/releases/latest/0"
43
42
  headers = {"User-Agent": shared_state.values["user_agent"]}
44
43
 
45
- try:
46
- r = requests.get(url, headers=headers, timeout=30)
47
- r.raise_for_status()
48
- data = json.loads(r.content)
49
- except Exception as e:
50
- info(f"{hostname.upper()}: feed load error: {e}")
51
- mark_hostname_issue(
52
- hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
- )
54
- return releases
44
+ for days in range(4):
45
+ url = f"https://{sj_host}/api/releases/latest/{days}"
55
46
 
56
- for release in data:
57
47
  try:
58
- title = release.get("name").rstrip(".")
59
- if not title:
60
- continue
48
+ r = requests.get(url, headers=headers, timeout=30)
49
+ r.raise_for_status()
50
+ data = json.loads(r.content)
51
+ except Exception as e:
52
+ info(f"{hostname.upper()}: feed load error: {e}")
53
+ mark_hostname_issue(
54
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
55
+ )
56
+ return releases
61
57
 
62
- published = convert_to_rss_date(release.get("createdAt"))
63
- if not published:
64
- continue
58
+ for release in data:
59
+ try:
60
+ title = release.get("name").rstrip(".")
61
+ if not title:
62
+ continue
65
63
 
66
- media = release.get("_media", {})
67
- slug = media.get("slug")
68
- if not slug:
69
- continue
64
+ published = convert_to_rss_date(release.get("createdAt"))
65
+ if not published:
66
+ continue
70
67
 
71
- series_url = f"https://{sj_host}/serie/{slug}"
68
+ media = release.get("_media", {})
69
+ slug = media.get("slug")
70
+ if not slug:
71
+ continue
72
72
 
73
- mb = 0
74
- size = 0
75
- imdb_id = None
73
+ series_url = f"https://{sj_host}/serie/{slug}"
76
74
 
77
- payload = urlsafe_b64encode(
78
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
79
- "utf-8"
80
- )
81
- ).decode("utf-8")
75
+ mb = 0
76
+ size = 0
77
+ imdb_id = None
82
78
 
83
- link = (
84
- f"{shared_state.values['internal_address']}/download/?payload={payload}"
85
- )
79
+ payload = urlsafe_b64encode(
80
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
81
+ "utf-8"
82
+ )
83
+ ).decode("utf-8")
84
+
85
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
86
+
87
+ releases.append(
88
+ {
89
+ "details": {
90
+ "title": title,
91
+ "hostname": hostname,
92
+ "imdb_id": imdb_id,
93
+ "link": link,
94
+ "mirror": mirror,
95
+ "size": size,
96
+ "date": published,
97
+ "source": series_url,
98
+ },
99
+ "type": "protected",
100
+ }
101
+ )
86
102
 
87
- releases.append(
88
- {
89
- "details": {
90
- "title": title,
91
- "hostname": hostname,
92
- "imdb_id": imdb_id,
93
- "link": link,
94
- "mirror": mirror,
95
- "size": size,
96
- "date": published,
97
- "source": series_url,
98
- },
99
- "type": "protected",
100
- }
101
- )
103
+ except Exception as e:
104
+ debug(f"{hostname.upper()}: feed parse error: {e}")
105
+ continue
102
106
 
103
- except Exception as e:
104
- debug(f"{hostname.upper()}: feed parse error: {e}")
105
- continue
107
+ if releases:
108
+ break
106
109
 
107
110
  debug(f"Time taken: {time.time() - start_time:.2f}s")
108
111
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.7.0
3
+ Version: 2.7.2
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Author-email: rix1337 <rix1337@users.noreply.github.com>
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  quasarr/__init__.py,sha256=nh1MU1Evh0G1Pm657qtMMWWX4NSHm6PpETqGFtK2QLE,17197
2
2
  quasarr/api/__init__.py,sha256=2CXR0JEjC3zooTB8Bk-z_aZgVM2cPE9ijfO5yJAE9CE,20142
3
- quasarr/api/arr/__init__.py,sha256=1NcjcfNOjzTQCUDedTGJluK0xU-6krh0T8QSGu7eoeU,22283
3
+ quasarr/api/arr/__init__.py,sha256=LVHD26vBG_n2y1voceCJFMOf7M_s3UscNQRcxP9M6wk,18831
4
4
  quasarr/api/captcha/__init__.py,sha256=9wBmdYKn0DImiFatHe4y2icV57d4710vfXFncvPKki8,78030
5
5
  quasarr/api/config/__init__.py,sha256=FJZHALhL6NExonhCk53vOYnM1ICkmbTRue5UMCy5Yzg,8813
6
6
  quasarr/api/jdownloader/__init__.py,sha256=SixcV-sgMAunjAT5LawASb1qSuOOokorQo2F7cQ3jZ4,9427
@@ -9,7 +9,7 @@ quasarr/api/sponsors_helper/__init__.py,sha256=QAFXK_JTtAnstRAlieCbbCsoTwIcBu7ZX
9
9
  quasarr/api/statistics/__init__.py,sha256=rJz6S4jSnpFDWtjU7O-2jECUEqlueOHOEfRUjSb3cMY,7943
10
10
  quasarr/downloads/__init__.py,sha256=571QRloySskkg-JRi7JjyrKyfZIRnd9WgotbOOZ9k0s,17364
11
11
  quasarr/downloads/linkcrypters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- quasarr/downloads/linkcrypters/al.py,sha256=sNdEl1gogVn2xerd5fSOkAOgEF2sslQr81g34Jhu5So,8996
12
+ quasarr/downloads/linkcrypters/al.py,sha256=_a5kbSLtwOf6y6gN_8_ZpJxvzWJa3lLQTqcTvk_Cjgo,8996
13
13
  quasarr/downloads/linkcrypters/filecrypt.py,sha256=yMkDM_GVOd3Bl9lgPkL1BDDuYOpMgxnVwlqRtskZ0Xo,17729
14
14
  quasarr/downloads/linkcrypters/hide.py,sha256=t9p_Hb5taJDuRAPaWZw7T1GTcLVgd8keD9LlZJ1-Gsg,6266
15
15
  quasarr/downloads/packages/__init__.py,sha256=MdKug4D-ex6sJBJuM0mi3_IjXX7AjV5ryUonOs2aupc,34887
@@ -33,43 +33,43 @@ quasarr/downloads/sources/wd.py,sha256=Xh5cvsGGBfM7iYWmBktmRHaWX6vZBRerBy-L8ycJW
33
33
  quasarr/downloads/sources/wx.py,sha256=b3_--zovX4BrknzGEmdh_QQw72dtyPfIrI_me_KyVjo,6772
34
34
  quasarr/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
35
  quasarr/providers/auth.py,sha256=qDdXr28SJ078Q8AVZ_50Z1FwVhfuinOtRl6JHF4RgnM,10412
36
- quasarr/providers/cloudflare.py,sha256=oLtwQ_UElLIWJ-c-qH2c9NUjsZtlmzYpXlAWhQcE1FM,9076
36
+ quasarr/providers/cloudflare.py,sha256=gwcznTQXZ0Xc0G5kLy42Ix90_MicAdN55BSLjrw9LyE,9076
37
37
  quasarr/providers/hostname_issues.py,sha256=SpnZAxOLejSXJGFnYkCrRzR8D0IQsTMtylM-O0h21Z0,1462
38
38
  quasarr/providers/html_images.py,sha256=xmxfNwqAqQimVaOq7IelkxlBdcRpPZZLGli_MJDOacI,19755
39
39
  quasarr/providers/html_templates.py,sha256=e5b66N47y5Uq7Ikwcm6kOWiyXZ7Bz4gqg2DcajIBGgE,16360
40
- quasarr/providers/imdb_metadata.py,sha256=JP9YQ7jU1H2-dify6q-qE7gpbJ9ospY4evNLQaa4FDY,21946
40
+ quasarr/providers/imdb_metadata.py,sha256=IOsmk3-e8b-CUJmbfn-dwFZbP33VOAjt095ji8tyM7A,21946
41
41
  quasarr/providers/jd_cache.py,sha256=RZsjw9X8wouVH__T2EG7w18CLUrxKh73BHnk_rpHdgE,13534
42
42
  quasarr/providers/log.py,sha256=E5g5Angdn9iflW_Z0PNbAmhVK_ZC6IwLnOaJ_mVarqM,7018
43
43
  quasarr/providers/myjd_api.py,sha256=hCWVU5IAl7QQV_icMF0B91y7CLLM_j2xfyByTP7an0g,35206
44
44
  quasarr/providers/notifications.py,sha256=fL0HQdk7jBLXToM_URQiJq6y2UAHs0RzMFMCFdb3SHQ,4894
45
45
  quasarr/providers/obfuscated.py,sha256=IAN0-5m6UblLjaFdPhRy75ryqDMF0nlbkClq5-n1bQQ,2275634
46
- quasarr/providers/shared_state.py,sha256=alUxC0KJQEGsERcHUSn-nSY53PcUjmgHk5R04kj_hOs,33247
46
+ quasarr/providers/shared_state.py,sha256=SoxoXREaeC-Rbxh5S6P2zF6yfjp01tjfQTOFFiKutEI,33247
47
47
  quasarr/providers/statistics.py,sha256=1X_Aa7TE3W7ovwkemVMsgIx55Jw3eYMiyUxuCUDgO5s,8666
48
48
  quasarr/providers/utils.py,sha256=FR0tGwao1ytYtWbmUocaHwt29pHKqskKMH2YE2bgSFI,12481
49
- quasarr/providers/version.py,sha256=vYbQKxf4PPBZ1AradCg9Rn9q7TQrQLaNkfDHTi2Cs_k,4424
49
+ quasarr/providers/version.py,sha256=8ZasSv75vxS89_3atRUTM_C82mBISgjkiiKPzP-rQsQ,4424
50
50
  quasarr/providers/web_server.py,sha256=tHkMxhV6eaHC8cWsEpbUqD_U29IFE24VsU6tjk-xCEM,1765
51
51
  quasarr/providers/sessions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  quasarr/providers/sessions/al.py,sha256=AOWl1v-wcwxUeo1bRizd7zAzbUludsFbgCGICHCVZFQ,13270
53
53
  quasarr/providers/sessions/dd.py,sha256=K503Ny-3zWolzpGVane4ag5Gu1yzPv49eni0I8Hw4v8,3353
54
54
  quasarr/providers/sessions/dl.py,sha256=PnyuX_h4gQIk81w0NKYCFxpg-Il0gi72BQxbdLED1ds,5820
55
55
  quasarr/providers/sessions/nx.py,sha256=BkEMEVAiJQBlsGQYw4ZTSyys8Ua-WToAmqL0Il41OAg,3491
56
- quasarr/search/__init__.py,sha256=ggQG8NreFQ4IU6SAigh3YXCScUcQbcQjf3-nyfICOoQ,8162
56
+ quasarr/search/__init__.py,sha256=1EDjnppuzpxKEdedU4QP1goqF35xYKD8O0JsHvIW12c,10466
57
57
  quasarr/search/sources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  quasarr/search/sources/al.py,sha256=2RsaIfA4o3uMZuJZwPh1tETdLkNeAQ6-ymFVBL706Po,18206
59
59
  quasarr/search/sources/by.py,sha256=cgy39DN0LIMqO9Yfs6mx7Uio9unuEk4Our562BKQWz0,8971
60
60
  quasarr/search/sources/dd.py,sha256=J5SBHgItYPS3UL_Fu8a9wP1j9Rs_t9-ZbLcZaRIjU-I,6144
61
- quasarr/search/sources/dj.py,sha256=6mNuyhnG1MEf4fAVYjGGSbY_E_s9ENmiGRX6Eb16Qqw,7665
61
+ quasarr/search/sources/dj.py,sha256=tkgwkEFWOcpXkD3N5SJdpG945uYT8tO9Y1y1bv7JYig,7909
62
62
  quasarr/search/sources/dl.py,sha256=QeKO7nKtMDzXLoWtus1Jl0uADcpKphfoLBNTlO85tYU,13888
63
63
  quasarr/search/sources/dt.py,sha256=yAr3MKCLq-KOLaIv7npNprKOxHCaOEJ4eOqQErguohU,10480
64
64
  quasarr/search/sources/dw.py,sha256=dbD5XErlPv3lJ2J7iyVKuFAuWmzidNNaOdT9mH_0b3k,9149
65
65
  quasarr/search/sources/fx.py,sha256=gJKEdMGNbnQNaj_pibUrajVB3Wei4hUqp8hmHski9Ow,10797
66
66
  quasarr/search/sources/he.py,sha256=m9zVU5NmctKQbc2aP7A-Yw-y94yX5HnjanVFOCnmdW0,7789
67
67
  quasarr/search/sources/hs.py,sha256=pq-MwK7FGokszTMiojAq3miw-yAqZhRDO7xGwRQdUMg,17815
68
- quasarr/search/sources/mb.py,sha256=f45R9Yh8kFtCudxhqNLFUwlQngMUfnZCowK65hhE3oM,8198
68
+ quasarr/search/sources/mb.py,sha256=Y4pn3Hsxeoi4NUcEqaMjTVwHJvBbAj5aFPsgVTb3PXs,7805
69
69
  quasarr/search/sources/nk.py,sha256=r7t4mU4CP4IU7sr07f9NGa9pdAJnkKA7SeGZoUAdsLI,7497
70
70
  quasarr/search/sources/nx.py,sha256=px29xMPSzNs60fM7mk59JgMZJaTHp-vbLAkYNy74uVU,8396
71
- quasarr/search/sources/sf.py,sha256=l0kZ0crgf-ZOBvZCT7wk_7coS3Siw0KRycXeconHxA0,17434
72
- quasarr/search/sources/sj.py,sha256=t3dp_SypujEfz0u8hjS5Xcflzf637EYrkUASAKUzhk0,7882
71
+ quasarr/search/sources/sf.py,sha256=ksVbjlf81PfwdlVmAKUU4LtpWegUjl27G582QI0UAxE,17434
72
+ quasarr/search/sources/sj.py,sha256=IHM-mdXLZOen1MGtary_GEE7eVQjq79v-bjoK4bG9Yo,8126
73
73
  quasarr/search/sources/sl.py,sha256=9IqxOMJxL-SI5xwDVYO6PPPuatHOAXyh0_0bvRSaIfc,11511
74
74
  quasarr/search/sources/wd.py,sha256=lJmeEZ9A3pDGX-BRTomZa7HyaRt1-zUwbPC_2oUNHdI,10389
75
75
  quasarr/search/sources/wx.py,sha256=VLWY_BuVnk__MPdfufmQ2zkq4pGU1eD1-lLhWXQQPP4,14663
@@ -77,8 +77,8 @@ quasarr/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  quasarr/storage/config.py,sha256=sjpfVq_Bxkj9gVwCXB_MAreB9ezf-YEJQKxQmQhUv9s,6540
78
78
  quasarr/storage/setup.py,sha256=zb83kvQfxMFHxC7EvWWaVTy0MtG7iEjMRyfY4hdcbOk,61520
79
79
  quasarr/storage/sqlite_database.py,sha256=tmHUotMWIwtyH-g244WvcGhMQMMjGokncv7JpFSi8NM,3639
80
- quasarr-2.7.0.dist-info/METADATA,sha256=NtSLKAF7rVh-4Y_5M1ibnP8fgG7EOS8jdy1h_qZsjdA,14822
81
- quasarr-2.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
82
- quasarr-2.7.0.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
83
- quasarr-2.7.0.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
84
- quasarr-2.7.0.dist-info/RECORD,,
80
+ quasarr-2.7.2.dist-info/METADATA,sha256=UtPWm-7fFS8MM231NCSjmSVEhJ4Vc-52gJSVojrEmvE,14822
81
+ quasarr-2.7.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
82
+ quasarr-2.7.2.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
83
+ quasarr-2.7.2.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
84
+ quasarr-2.7.2.dist-info/RECORD,,