quasarr 2.7.1__py3-none-any.whl → 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ from quasarr.downloads import download
15
15
  from quasarr.downloads.packages import delete_package, get_packages
16
16
  from quasarr.providers import shared_state
17
17
  from quasarr.providers.auth import require_api_key
18
- from quasarr.providers.log import debug, info
18
+ from quasarr.providers.log import debug, error, info, warn
19
19
  from quasarr.providers.version import get_version
20
20
  from quasarr.search import get_search_results
21
21
 
@@ -420,9 +420,8 @@ def setup_arr_routes(app):
420
420
  </channel>
421
421
  </rss>"""
422
422
  except Exception as e:
423
- info(f"Error loading search results: {e}")
424
- info(traceback.format_exc())
425
- info(f"[ERROR] Unknown indexer request: {dict(request.query)}")
423
+ error(f"Error loading search results: {e} " + traceback.format_exc())
424
+ warn(f"Unknown indexer request: {dict(request.query)}")
426
425
  return """<?xml version="1.0" encoding="UTF-8"?>
427
426
  <rss>
428
427
  <channel>
@@ -432,5 +431,5 @@ def setup_arr_routes(app):
432
431
  </channel>
433
432
  </rss>"""
434
433
 
435
- info(f"[ERROR] Unknown general request: {dict(request.query)}")
434
+ warn(f"[ERROR] Unknown general request: {dict(request.query)}")
436
435
  return {"error": True}
@@ -118,7 +118,7 @@ def extract_valid_hostname(url, shorthand):
118
118
  message = f"Error: {e}. Please provide a valid URL."
119
119
  domain = None
120
120
 
121
- print(message)
121
+ debug(message)
122
122
  return {"domain": domain, "message": message}
123
123
 
124
124
 
@@ -5,7 +5,7 @@
5
5
  import re
6
6
  import sys
7
7
 
8
- __version__ = "2.7.1"
8
+ __version__ = "2.7.2"
9
9
 
10
10
 
11
11
  def get_version():
@@ -4,9 +4,11 @@
4
4
 
5
5
  import time
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ from datetime import timezone
8
+ from email.utils import parsedate_to_datetime
7
9
 
8
10
  from quasarr.providers.imdb_metadata import get_imdb_metadata
9
- from quasarr.providers.log import debug, info
11
+ from quasarr.providers.log import debug, info, trace
10
12
  from quasarr.search.sources.al import al_feed, al_search
11
13
  from quasarr.search.sources.by import by_feed, by_search
12
14
  from quasarr.search.sources.dd import dd_feed, dd_search
@@ -72,54 +74,54 @@ def get_search_results(
72
74
 
73
75
  # Mappings
74
76
  imdb_map = [
75
- (al, al_search),
76
- (by, by_search),
77
- (dd, dd_search),
78
- (dl, dl_search),
79
- (dt, dt_search),
80
- (dj, dj_search),
81
- (dw, dw_search),
82
- (fx, fx_search),
83
- (he, he_search),
84
- (hs, hs_search),
85
- (mb, mb_search),
86
- (nk, nk_search),
87
- (nx, nx_search),
88
- (sf, sf_search),
89
- (sj, sj_search),
90
- (sl, sl_search),
91
- (wd, wd_search),
92
- (wx, wx_search),
77
+ ("al", al, al_search),
78
+ ("by", by, by_search),
79
+ ("dd", dd, dd_search),
80
+ ("dl", dl, dl_search),
81
+ ("dt", dt, dt_search),
82
+ ("dj", dj, dj_search),
83
+ ("dw", dw, dw_search),
84
+ ("fx", fx, fx_search),
85
+ ("he", he, he_search),
86
+ ("hs", hs, hs_search),
87
+ ("mb", mb, mb_search),
88
+ ("nk", nk, nk_search),
89
+ ("nx", nx, nx_search),
90
+ ("sf", sf, sf_search),
91
+ ("sj", sj, sj_search),
92
+ ("sl", sl, sl_search),
93
+ ("wd", wd, wd_search),
94
+ ("wx", wx, wx_search),
93
95
  ]
94
96
 
95
97
  phrase_map = [
96
- (by, by_search),
97
- (dl, dl_search),
98
- (dt, dt_search),
99
- (nx, nx_search),
100
- (sl, sl_search),
101
- (wd, wd_search),
98
+ ("by", by, by_search),
99
+ ("dl", dl, dl_search),
100
+ ("dt", dt, dt_search),
101
+ ("nx", nx, nx_search),
102
+ ("sl", sl, sl_search),
103
+ ("wd", wd, wd_search),
102
104
  ]
103
105
 
104
106
  feed_map = [
105
- (al, al_feed),
106
- (by, by_feed),
107
- (dd, dd_feed),
108
- (dj, dj_feed),
109
- (dl, dl_feed),
110
- (dt, dt_feed),
111
- (dw, dw_feed),
112
- (fx, fx_feed),
113
- (he, he_feed),
114
- (hs, hs_feed),
115
- (mb, mb_feed),
116
- (nk, nk_feed),
117
- (nx, nx_feed),
118
- (sf, sf_feed),
119
- (sj, sj_feed),
120
- (sl, sl_feed),
121
- (wd, wd_feed),
122
- (wx, wx_feed),
107
+ ("al", al, al_feed),
108
+ ("by", by, by_feed),
109
+ ("dd", dd, dd_feed),
110
+ ("dj", dj, dj_feed),
111
+ ("dl", dl, dl_feed),
112
+ ("dt", dt, dt_feed),
113
+ ("dw", dw, dw_feed),
114
+ ("fx", fx, fx_feed),
115
+ ("he", he, he_feed),
116
+ ("hs", hs, hs_feed),
117
+ ("mb", mb, mb_feed),
118
+ ("nk", nk, nk_feed),
119
+ ("nx", nx, nx_feed),
120
+ ("sf", sf, sf_feed),
121
+ ("sj", sj, sj_feed),
122
+ ("sl", sl, sl_feed),
123
+ ("wd", wd, wd_feed),
124
+ ("wx", wx, wx_feed),
123
125
  ]
124
126
 
125
127
  # Add searches
@@ -128,27 +130,27 @@ def get_search_results(
128
130
  (shared_state, start_time, request_from, imdb_id),
129
131
  {"mirror": mirror, "season": season, "episode": episode},
130
132
  )
131
- for flag, func in imdb_map:
132
- if flag:
133
- search_executor.add(func, args, kwargs, True)
133
+ for name, url, func in imdb_map:
134
+ if url:
135
+ search_executor.add(func, args, kwargs, True, name.upper())
134
136
 
135
137
  elif search_phrase and docs_search:
136
138
  args, kwargs = (
137
139
  (shared_state, start_time, request_from, search_phrase),
138
140
  {"mirror": mirror, "season": season, "episode": episode},
139
141
  )
140
- for flag, func in phrase_map:
141
- if flag:
142
- search_executor.add(func, args, kwargs)
142
+ for name, url, func in phrase_map:
143
+ if url:
144
+ search_executor.add(func, args, kwargs, source_name=name.upper())
143
145
 
144
146
  elif search_phrase:
145
147
  debug(f"Search phrase '{search_phrase}' is not supported for {request_from}.")
146
148
 
147
149
  else:
148
150
  args, kwargs = ((shared_state, start_time, request_from), {"mirror": mirror})
149
- for flag, func in feed_map:
150
- if flag:
151
- search_executor.add(func, args, kwargs)
151
+ for name, url, func in feed_map:
152
+ if url:
153
+ search_executor.add(func, args, kwargs, source_name=name.upper())
152
154
 
153
155
  # Clean description for Console UI
154
156
  if imdb_id:
@@ -165,12 +167,30 @@ def get_search_results(
165
167
 
166
168
  elapsed_time = time.time() - start_time
167
169
 
170
+ # Sort results by date (newest first)
171
+ def get_date(item):
172
+ try:
173
+ dt = parsedate_to_datetime(item.get("details", {}).get("date", ""))
174
+ if dt.tzinfo is None:
175
+ dt = dt.replace(tzinfo=timezone.utc)
176
+ return dt
177
+ except Exception:
178
+ return parsedate_to_datetime("Thu, 01 Jan 1970 00:00:00 +0000")
179
+
180
+ results.sort(key=get_date, reverse=True)
181
+
168
182
  # Calculate pagination for logging and return
169
183
  total_count = len(results)
170
184
 
171
185
  # Slicing
172
186
  sliced_results = results[offset : offset + limit]
173
187
 
188
+ if sliced_results:
189
+ trace(f"First {len(sliced_results)} results sorted by date:")
190
+ for i, res in enumerate(sliced_results):
191
+ details = res.get("details", {})
192
+ trace(f"{i + 1}. {details.get('date')} | {details.get('title')}")
193
+
174
194
  # Formatting for log (1-based index for humans)
175
195
  log_start = min(offset + 1, total_count) if total_count > 0 else 0
176
196
  log_end = min(offset + limit, total_count)
@@ -193,12 +213,19 @@ class SearchExecutor:
193
213
  def __init__(self):
194
214
  self.searches = []
195
215
 
196
- def add(self, func, args, kwargs, use_cache=False):
216
+ def add(self, func, args, kwargs, use_cache=False, source_name=None):
197
217
  key_args = list(args)
198
218
  key_args[1] = None
199
219
  key_args = tuple(key_args)
200
220
  key = hash((func.__name__, key_args, frozenset(kwargs.items())))
201
- self.searches.append((key, lambda: func(*args, **kwargs), use_cache))
221
+ self.searches.append(
222
+ (
223
+ key,
224
+ lambda: func(*args, **kwargs),
225
+ use_cache,
226
+ source_name or func.__name__,
227
+ )
228
+ )
202
229
 
203
230
  def run_all(self):
204
231
  results = []
@@ -213,7 +240,7 @@ class SearchExecutor:
213
240
  current_index = 0
214
241
  pending_futures = []
215
242
 
216
- for key, func, use_cache in self.searches:
243
+ for key, func, use_cache, source_name in self.searches:
217
244
  cached_result = None
218
245
  exp = 0
219
246
 
@@ -233,27 +260,34 @@ class SearchExecutor:
233
260
  all_cached = False
234
261
  future = executor.submit(func)
235
262
  cache_key = key if use_cache else None
236
- future_to_meta[future] = (current_index, cache_key)
263
+ future_to_meta[future] = (current_index, cache_key, source_name)
237
264
  pending_futures.append(future)
238
265
  current_index += 1
239
266
 
240
267
  if pending_futures:
241
- icons = ["▪️"] * len(pending_futures)
268
+ results_badges = [""] * len(pending_futures)
242
269
 
243
270
  for future in as_completed(pending_futures):
244
- index, cache_key = future_to_meta[future]
271
+ index, cache_key, source_name = future_to_meta[future]
245
272
  try:
246
273
  res = future.result()
247
- status = "✅" if res and len(res) > 0 else "⚪"
248
- icons[index] = status
274
+ if res and len(res) > 0:
275
+ badge = f"<bg green><black>{source_name}</black></bg green>"
276
+ else:
277
+ debug(f"❌ No results returned by {source_name}")
278
+ badge = f"<bg black><white>{source_name}</white></bg black>"
279
+
280
+ results_badges[index] = badge
249
281
  results.extend(res)
250
282
  if cache_key:
251
283
  search_cache.set(cache_key, res)
252
284
  except Exception as e:
253
- icons[index] = "❌"
285
+ results_badges[index] = (
286
+ f"<bg red><white>{source_name}</white></bg red>"
287
+ )
254
288
  info(f"Search error: {e}")
255
289
 
256
- bar_str = f" [{''.join(icons)}]"
290
+ bar_str = f" [{' '.join(results_badges)}]"
257
291
 
258
292
  return results, bar_str, all_cached, min_ttl
259
293
 
@@ -37,70 +37,73 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
37
37
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
38
38
  password = sj_host
39
39
 
40
- url = f"https://{sj_host}/api/releases/latest/0"
41
40
  headers = {"User-Agent": shared_state.values["user_agent"]}
42
41
 
43
- try:
44
- r = requests.get(url, headers=headers, timeout=30)
45
- r.raise_for_status()
46
- data = json.loads(r.content)
47
- except Exception as e:
48
- error(f"Feed load error: {e}")
49
- mark_hostname_issue(
50
- hostname, "feed", str(e) if "e" in dir() else "Error occurred"
51
- )
52
- return releases
42
+ for days in range(4):
43
+ url = f"https://{sj_host}/api/releases/latest/{days}"
53
44
 
54
- for release in data:
55
45
  try:
56
- title = release.get("name").rstrip(".")
57
- if not title:
58
- continue
46
+ r = requests.get(url, headers=headers, timeout=30)
47
+ r.raise_for_status()
48
+ data = json.loads(r.content)
49
+ except Exception as e:
50
+ error(f"Feed load error: {e}")
51
+ mark_hostname_issue(
52
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
+ )
54
+ return releases
59
55
 
60
- published = convert_to_rss_date(release.get("createdAt"))
61
- if not published:
62
- continue
56
+ for release in data:
57
+ try:
58
+ title = release.get("name").rstrip(".")
59
+ if not title:
60
+ continue
63
61
 
64
- media = release.get("_media", {})
65
- slug = media.get("slug")
66
- if not slug:
67
- continue
62
+ published = convert_to_rss_date(release.get("createdAt"))
63
+ if not published:
64
+ continue
68
65
 
69
- series_url = f"https://{sj_host}/serie/{slug}"
66
+ media = release.get("_media", {})
67
+ slug = media.get("slug")
68
+ if not slug:
69
+ continue
70
70
 
71
- mb = 0
72
- size = 0
73
- imdb_id = None
71
+ series_url = f"https://{sj_host}/serie/{slug}"
74
72
 
75
- payload = urlsafe_b64encode(
76
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
77
- "utf-8"
78
- )
79
- ).decode("utf-8")
73
+ mb = 0
74
+ size = 0
75
+ imdb_id = None
80
76
 
81
- link = (
82
- f"{shared_state.values['internal_address']}/download/?payload={payload}"
83
- )
77
+ payload = urlsafe_b64encode(
78
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
79
+ "utf-8"
80
+ )
81
+ ).decode("utf-8")
82
+
83
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
84
+
85
+ releases.append(
86
+ {
87
+ "details": {
88
+ "title": title,
89
+ "hostname": hostname,
90
+ "imdb_id": imdb_id,
91
+ "link": link,
92
+ "mirror": mirror,
93
+ "size": size,
94
+ "date": published,
95
+ "source": series_url,
96
+ },
97
+ "type": "protected",
98
+ }
99
+ )
84
100
 
85
- releases.append(
86
- {
87
- "details": {
88
- "title": title,
89
- "hostname": hostname,
90
- "imdb_id": imdb_id,
91
- "link": link,
92
- "mirror": mirror,
93
- "size": size,
94
- "date": published,
95
- "source": series_url,
96
- },
97
- "type": "protected",
98
- }
99
- )
101
+ except Exception as e:
102
+ warn(f"Feed parse error: {e}")
103
+ continue
100
104
 
101
- except Exception as e:
102
- warn(f"Feed parse error: {e}")
103
- continue
105
+ if releases:
106
+ break
104
107
 
105
108
  debug(f"Time taken: {time.time() - start_time:.2f}s")
106
109
 
@@ -12,7 +12,7 @@ import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
- from quasarr.providers.log import debug, warn
15
+ from quasarr.providers.log import debug, error, warn
16
16
 
17
17
  hostname = "mb"
18
18
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -104,21 +104,9 @@ def _parse_posts(
104
104
  if " " in title:
105
105
  continue
106
106
 
107
- # can't check for mirrors in search context
108
- if mirror_filter and mirror_filter not in supported_mirrors:
109
- continue
110
- else:
111
- mirror_candidates = []
112
- for strong in post.find_all(
113
- "strong", string=re.compile(r"^Download", re.I)
114
- ):
115
- link_tag = strong.find_next_sibling("a")
116
- if link_tag and link_tag.get_text(strip=True):
117
- host = link_tag.get_text(strip=True).split(".")[0].lower()
118
- mirror_candidates.append(host)
119
- valid = [m for m in mirror_candidates if m in supported_mirrors]
120
- if not valid or (mirror_filter and mirror_filter not in valid):
121
- continue
107
+ # can't check for mirrors in soup, so we use the hardcoded list
108
+ if mirror_filter and mirror_filter not in supported_mirrors:
109
+ continue
122
110
 
123
111
  # extract IMDb ID
124
112
  imdb_id = None
@@ -128,9 +116,18 @@ def _parse_posts(
128
116
  imdb_id = m.group(1)
129
117
  break
130
118
 
119
+ if not imdb_id:
120
+ m = IMDB_REGEX.search(post.get_text())
121
+ if m:
122
+ imdb_id = m.group(1)
123
+
131
124
  # size extraction
132
125
  mb = size_bytes = 0
133
- size_match = re.search(r"Größe:\s*([\d\.]+)\s*([GMK]B)", post.get_text())
126
+ size_match = re.search(
127
+ r"(?:Größe|Size).*?:\s*([\d\.]+)\s*([GMK]B)",
128
+ post.get_text(),
129
+ re.IGNORECASE,
130
+ )
134
131
  if size_match:
135
132
  sz = {"size": size_match.group(1), "sizeunit": size_match.group(2)}
136
133
  mb = shared_state.convert_to_mb(sz)
@@ -159,7 +156,7 @@ def _parse_posts(
159
156
  }
160
157
  )
161
158
  except Exception as e:
162
- debug(f"Error parsing {hostname.upper()} post: {e}")
159
+ error(f"Error parsing {hostname.upper()} post: {e}")
163
160
  continue
164
161
  return releases
165
162
 
@@ -316,7 +316,7 @@ def sf_search(
316
316
  # fetch API HTML
317
317
  epoch = str(datetime.now().timestamp()).replace(".", "")[:-3]
318
318
  api_url = f"https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}"
319
- debug(f"Requesting SF API URL: {api_url}")
319
+ trace(f"Requesting SF API URL: {api_url}")
320
320
  try:
321
321
  r = requests.get(api_url, headers=headers, timeout=10)
322
322
  r.raise_for_status()
@@ -39,70 +39,73 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
39
39
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
40
40
  password = sj_host
41
41
 
42
- url = f"https://{sj_host}/api/releases/latest/0"
43
42
  headers = {"User-Agent": shared_state.values["user_agent"]}
44
43
 
45
- try:
46
- r = requests.get(url, headers=headers, timeout=30)
47
- r.raise_for_status()
48
- data = json.loads(r.content)
49
- except Exception as e:
50
- info(f"{hostname.upper()}: feed load error: {e}")
51
- mark_hostname_issue(
52
- hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
- )
54
- return releases
44
+ for days in range(4):
45
+ url = f"https://{sj_host}/api/releases/latest/{days}"
55
46
 
56
- for release in data:
57
47
  try:
58
- title = release.get("name").rstrip(".")
59
- if not title:
60
- continue
48
+ r = requests.get(url, headers=headers, timeout=30)
49
+ r.raise_for_status()
50
+ data = json.loads(r.content)
51
+ except Exception as e:
52
+ info(f"{hostname.upper()}: feed load error: {e}")
53
+ mark_hostname_issue(
54
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
55
+ )
56
+ return releases
61
57
 
62
- published = convert_to_rss_date(release.get("createdAt"))
63
- if not published:
64
- continue
58
+ for release in data:
59
+ try:
60
+ title = release.get("name").rstrip(".")
61
+ if not title:
62
+ continue
65
63
 
66
- media = release.get("_media", {})
67
- slug = media.get("slug")
68
- if not slug:
69
- continue
64
+ published = convert_to_rss_date(release.get("createdAt"))
65
+ if not published:
66
+ continue
70
67
 
71
- series_url = f"https://{sj_host}/serie/{slug}"
68
+ media = release.get("_media", {})
69
+ slug = media.get("slug")
70
+ if not slug:
71
+ continue
72
72
 
73
- mb = 0
74
- size = 0
75
- imdb_id = None
73
+ series_url = f"https://{sj_host}/serie/{slug}"
76
74
 
77
- payload = urlsafe_b64encode(
78
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
79
- "utf-8"
80
- )
81
- ).decode("utf-8")
75
+ mb = 0
76
+ size = 0
77
+ imdb_id = None
82
78
 
83
- link = (
84
- f"{shared_state.values['internal_address']}/download/?payload={payload}"
85
- )
79
+ payload = urlsafe_b64encode(
80
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
81
+ "utf-8"
82
+ )
83
+ ).decode("utf-8")
84
+
85
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
86
+
87
+ releases.append(
88
+ {
89
+ "details": {
90
+ "title": title,
91
+ "hostname": hostname,
92
+ "imdb_id": imdb_id,
93
+ "link": link,
94
+ "mirror": mirror,
95
+ "size": size,
96
+ "date": published,
97
+ "source": series_url,
98
+ },
99
+ "type": "protected",
100
+ }
101
+ )
86
102
 
87
- releases.append(
88
- {
89
- "details": {
90
- "title": title,
91
- "hostname": hostname,
92
- "imdb_id": imdb_id,
93
- "link": link,
94
- "mirror": mirror,
95
- "size": size,
96
- "date": published,
97
- "source": series_url,
98
- },
99
- "type": "protected",
100
- }
101
- )
103
+ except Exception as e:
104
+ debug(f"{hostname.upper()}: feed parse error: {e}")
105
+ continue
102
106
 
103
- except Exception as e:
104
- debug(f"{hostname.upper()}: feed parse error: {e}")
105
- continue
107
+ if releases:
108
+ break
106
109
 
107
110
  debug(f"Time taken: {time.time() - start_time:.2f}s")
108
111
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.7.1
3
+ Version: 2.7.2
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Author-email: rix1337 <rix1337@users.noreply.github.com>
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  quasarr/__init__.py,sha256=nh1MU1Evh0G1Pm657qtMMWWX4NSHm6PpETqGFtK2QLE,17197
2
2
  quasarr/api/__init__.py,sha256=2CXR0JEjC3zooTB8Bk-z_aZgVM2cPE9ijfO5yJAE9CE,20142
3
- quasarr/api/arr/__init__.py,sha256=0g-2yJC73xfaPz7dWplWI5_ggGqw5kYwGm38wOYjGXM,18844
3
+ quasarr/api/arr/__init__.py,sha256=LVHD26vBG_n2y1voceCJFMOf7M_s3UscNQRcxP9M6wk,18831
4
4
  quasarr/api/captcha/__init__.py,sha256=9wBmdYKn0DImiFatHe4y2icV57d4710vfXFncvPKki8,78030
5
5
  quasarr/api/config/__init__.py,sha256=FJZHALhL6NExonhCk53vOYnM1ICkmbTRue5UMCy5Yzg,8813
6
6
  quasarr/api/jdownloader/__init__.py,sha256=SixcV-sgMAunjAT5LawASb1qSuOOokorQo2F7cQ3jZ4,9427
@@ -43,33 +43,33 @@ quasarr/providers/log.py,sha256=E5g5Angdn9iflW_Z0PNbAmhVK_ZC6IwLnOaJ_mVarqM,7018
43
43
  quasarr/providers/myjd_api.py,sha256=hCWVU5IAl7QQV_icMF0B91y7CLLM_j2xfyByTP7an0g,35206
44
44
  quasarr/providers/notifications.py,sha256=fL0HQdk7jBLXToM_URQiJq6y2UAHs0RzMFMCFdb3SHQ,4894
45
45
  quasarr/providers/obfuscated.py,sha256=IAN0-5m6UblLjaFdPhRy75ryqDMF0nlbkClq5-n1bQQ,2275634
46
- quasarr/providers/shared_state.py,sha256=alUxC0KJQEGsERcHUSn-nSY53PcUjmgHk5R04kj_hOs,33247
46
+ quasarr/providers/shared_state.py,sha256=SoxoXREaeC-Rbxh5S6P2zF6yfjp01tjfQTOFFiKutEI,33247
47
47
  quasarr/providers/statistics.py,sha256=1X_Aa7TE3W7ovwkemVMsgIx55Jw3eYMiyUxuCUDgO5s,8666
48
48
  quasarr/providers/utils.py,sha256=FR0tGwao1ytYtWbmUocaHwt29pHKqskKMH2YE2bgSFI,12481
49
- quasarr/providers/version.py,sha256=rlliPV3fGhAn-87DmPCAlY0Q74O2bvpzbG_pIl3ikSE,4424
49
+ quasarr/providers/version.py,sha256=8ZasSv75vxS89_3atRUTM_C82mBISgjkiiKPzP-rQsQ,4424
50
50
  quasarr/providers/web_server.py,sha256=tHkMxhV6eaHC8cWsEpbUqD_U29IFE24VsU6tjk-xCEM,1765
51
51
  quasarr/providers/sessions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  quasarr/providers/sessions/al.py,sha256=AOWl1v-wcwxUeo1bRizd7zAzbUludsFbgCGICHCVZFQ,13270
53
53
  quasarr/providers/sessions/dd.py,sha256=K503Ny-3zWolzpGVane4ag5Gu1yzPv49eni0I8Hw4v8,3353
54
54
  quasarr/providers/sessions/dl.py,sha256=PnyuX_h4gQIk81w0NKYCFxpg-Il0gi72BQxbdLED1ds,5820
55
55
  quasarr/providers/sessions/nx.py,sha256=BkEMEVAiJQBlsGQYw4ZTSyys8Ua-WToAmqL0Il41OAg,3491
56
- quasarr/search/__init__.py,sha256=O3rj40brKvimHRA2YAlk9aBL9lh3NvKDZ5c_MSJc2pQ,8790
56
+ quasarr/search/__init__.py,sha256=1EDjnppuzpxKEdedU4QP1goqF35xYKD8O0JsHvIW12c,10466
57
57
  quasarr/search/sources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  quasarr/search/sources/al.py,sha256=2RsaIfA4o3uMZuJZwPh1tETdLkNeAQ6-ymFVBL706Po,18206
59
59
  quasarr/search/sources/by.py,sha256=cgy39DN0LIMqO9Yfs6mx7Uio9unuEk4Our562BKQWz0,8971
60
60
  quasarr/search/sources/dd.py,sha256=J5SBHgItYPS3UL_Fu8a9wP1j9Rs_t9-ZbLcZaRIjU-I,6144
61
- quasarr/search/sources/dj.py,sha256=6mNuyhnG1MEf4fAVYjGGSbY_E_s9ENmiGRX6Eb16Qqw,7665
61
+ quasarr/search/sources/dj.py,sha256=tkgwkEFWOcpXkD3N5SJdpG945uYT8tO9Y1y1bv7JYig,7909
62
62
  quasarr/search/sources/dl.py,sha256=QeKO7nKtMDzXLoWtus1Jl0uADcpKphfoLBNTlO85tYU,13888
63
63
  quasarr/search/sources/dt.py,sha256=yAr3MKCLq-KOLaIv7npNprKOxHCaOEJ4eOqQErguohU,10480
64
64
  quasarr/search/sources/dw.py,sha256=dbD5XErlPv3lJ2J7iyVKuFAuWmzidNNaOdT9mH_0b3k,9149
65
65
  quasarr/search/sources/fx.py,sha256=gJKEdMGNbnQNaj_pibUrajVB3Wei4hUqp8hmHski9Ow,10797
66
66
  quasarr/search/sources/he.py,sha256=m9zVU5NmctKQbc2aP7A-Yw-y94yX5HnjanVFOCnmdW0,7789
67
67
  quasarr/search/sources/hs.py,sha256=pq-MwK7FGokszTMiojAq3miw-yAqZhRDO7xGwRQdUMg,17815
68
- quasarr/search/sources/mb.py,sha256=f45R9Yh8kFtCudxhqNLFUwlQngMUfnZCowK65hhE3oM,8198
68
+ quasarr/search/sources/mb.py,sha256=Y4pn3Hsxeoi4NUcEqaMjTVwHJvBbAj5aFPsgVTb3PXs,7805
69
69
  quasarr/search/sources/nk.py,sha256=r7t4mU4CP4IU7sr07f9NGa9pdAJnkKA7SeGZoUAdsLI,7497
70
70
  quasarr/search/sources/nx.py,sha256=px29xMPSzNs60fM7mk59JgMZJaTHp-vbLAkYNy74uVU,8396
71
- quasarr/search/sources/sf.py,sha256=l0kZ0crgf-ZOBvZCT7wk_7coS3Siw0KRycXeconHxA0,17434
72
- quasarr/search/sources/sj.py,sha256=t3dp_SypujEfz0u8hjS5Xcflzf637EYrkUASAKUzhk0,7882
71
+ quasarr/search/sources/sf.py,sha256=ksVbjlf81PfwdlVmAKUU4LtpWegUjl27G582QI0UAxE,17434
72
+ quasarr/search/sources/sj.py,sha256=IHM-mdXLZOen1MGtary_GEE7eVQjq79v-bjoK4bG9Yo,8126
73
73
  quasarr/search/sources/sl.py,sha256=9IqxOMJxL-SI5xwDVYO6PPPuatHOAXyh0_0bvRSaIfc,11511
74
74
  quasarr/search/sources/wd.py,sha256=lJmeEZ9A3pDGX-BRTomZa7HyaRt1-zUwbPC_2oUNHdI,10389
75
75
  quasarr/search/sources/wx.py,sha256=VLWY_BuVnk__MPdfufmQ2zkq4pGU1eD1-lLhWXQQPP4,14663
@@ -77,8 +77,8 @@ quasarr/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  quasarr/storage/config.py,sha256=sjpfVq_Bxkj9gVwCXB_MAreB9ezf-YEJQKxQmQhUv9s,6540
78
78
  quasarr/storage/setup.py,sha256=zb83kvQfxMFHxC7EvWWaVTy0MtG7iEjMRyfY4hdcbOk,61520
79
79
  quasarr/storage/sqlite_database.py,sha256=tmHUotMWIwtyH-g244WvcGhMQMMjGokncv7JpFSi8NM,3639
80
- quasarr-2.7.1.dist-info/METADATA,sha256=ZDpw6B-2AFHJZo-jop8bZa9CbFmaRAjTrKjpSDmp-bE,14822
81
- quasarr-2.7.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
82
- quasarr-2.7.1.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
83
- quasarr-2.7.1.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
84
- quasarr-2.7.1.dist-info/RECORD,,
80
+ quasarr-2.7.2.dist-info/METADATA,sha256=UtPWm-7fFS8MM231NCSjmSVEhJ4Vc-52gJSVojrEmvE,14822
81
+ quasarr-2.7.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
82
+ quasarr-2.7.2.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
83
+ quasarr-2.7.2.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
84
+ quasarr-2.7.2.dist-info/RECORD,,