quasarr 2.6.1__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (54) hide show
  1. quasarr/__init__.py +71 -61
  2. quasarr/api/__init__.py +1 -2
  3. quasarr/api/arr/__init__.py +66 -57
  4. quasarr/api/captcha/__init__.py +203 -154
  5. quasarr/downloads/__init__.py +12 -8
  6. quasarr/downloads/linkcrypters/al.py +4 -4
  7. quasarr/downloads/linkcrypters/filecrypt.py +1 -2
  8. quasarr/downloads/packages/__init__.py +62 -88
  9. quasarr/downloads/sources/al.py +3 -3
  10. quasarr/downloads/sources/by.py +3 -3
  11. quasarr/downloads/sources/he.py +8 -9
  12. quasarr/downloads/sources/nk.py +3 -3
  13. quasarr/downloads/sources/sl.py +6 -1
  14. quasarr/downloads/sources/wd.py +93 -37
  15. quasarr/downloads/sources/wx.py +11 -17
  16. quasarr/providers/auth.py +9 -13
  17. quasarr/providers/cloudflare.py +5 -4
  18. quasarr/providers/imdb_metadata.py +1 -3
  19. quasarr/providers/jd_cache.py +64 -90
  20. quasarr/providers/log.py +226 -8
  21. quasarr/providers/myjd_api.py +116 -94
  22. quasarr/providers/sessions/al.py +20 -22
  23. quasarr/providers/sessions/dd.py +1 -1
  24. quasarr/providers/sessions/dl.py +8 -10
  25. quasarr/providers/sessions/nx.py +1 -1
  26. quasarr/providers/shared_state.py +26 -15
  27. quasarr/providers/utils.py +15 -6
  28. quasarr/providers/version.py +1 -1
  29. quasarr/search/__init__.py +113 -82
  30. quasarr/search/sources/al.py +19 -23
  31. quasarr/search/sources/by.py +6 -6
  32. quasarr/search/sources/dd.py +8 -10
  33. quasarr/search/sources/dj.py +15 -18
  34. quasarr/search/sources/dl.py +25 -37
  35. quasarr/search/sources/dt.py +13 -15
  36. quasarr/search/sources/dw.py +24 -16
  37. quasarr/search/sources/fx.py +25 -11
  38. quasarr/search/sources/he.py +16 -14
  39. quasarr/search/sources/hs.py +7 -7
  40. quasarr/search/sources/mb.py +7 -7
  41. quasarr/search/sources/nk.py +24 -25
  42. quasarr/search/sources/nx.py +22 -15
  43. quasarr/search/sources/sf.py +18 -9
  44. quasarr/search/sources/sj.py +7 -7
  45. quasarr/search/sources/sl.py +26 -14
  46. quasarr/search/sources/wd.py +61 -31
  47. quasarr/search/sources/wx.py +33 -47
  48. quasarr/storage/config.py +1 -3
  49. {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/METADATA +4 -1
  50. quasarr-2.7.1.dist-info/RECORD +84 -0
  51. quasarr-2.6.1.dist-info/RECORD +0 -84
  52. {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/WHEEL +0 -0
  53. {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/entry_points.txt +0 -0
  54. {quasarr-2.6.1.dist-info → quasarr-2.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -13,6 +13,8 @@ from urllib.parse import urlparse
13
13
  import requests
14
14
  from PIL import Image
15
15
 
16
+ from quasarr.providers.log import crit, error
17
+
16
18
  # Fallback user agent when FlareSolverr is not available
17
19
  FALLBACK_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36"
18
20
 
@@ -95,7 +97,7 @@ def check_ip():
95
97
  def check_flaresolverr(shared_state, flaresolverr_url):
96
98
  # Ensure it ends with /v<digit+>
97
99
  if not re.search(r"/v\d+$", flaresolverr_url):
98
- print(f"FlareSolverr URL does not end with /v#: {flaresolverr_url}")
100
+ error(f"FlareSolverr URL does not end with /v#: {flaresolverr_url}")
99
101
  return False
100
102
 
101
103
  # Try sending a simple test request
@@ -115,25 +117,32 @@ def check_flaresolverr(shared_state, flaresolverr_url):
115
117
  solution_ua = solution.get("userAgent", None)
116
118
  if solution_ua:
117
119
  shared_state.update("user_agent", solution_ua)
118
- return True
120
+ try:
121
+ flaresolverr_version = json_data.get("version")
122
+ except Exception as e:
123
+ error(f"Could not grab Flaresolverr version: {str(e)}")
124
+ return False
125
+ return flaresolverr_version
119
126
  else:
120
- print(f"Unexpected FlareSolverr response: {json_data}")
127
+ error(f"Unexpected FlareSolverr response: {json_data}")
121
128
  return False
122
129
 
123
130
  except Exception as e:
124
- print(f"Failed to connect to FlareSolverr: {e}")
131
+ error(f"Failed to connect to FlareSolverr: {e}")
125
132
  return False
126
133
 
127
134
 
128
135
  def validate_address(address, name):
129
136
  if not address.startswith("http"):
130
- sys.exit(f"Error: {name} '{address}' is invalid. It must start with 'http'.")
137
+ crit(f"Error: {name} '{address}' is invalid. It must start with 'http'.")
138
+ sys.exit(1)
131
139
 
132
140
  colon_count = address.count(":")
133
141
  if colon_count < 1 or colon_count > 2:
134
- sys.exit(
142
+ crit(
135
143
  f"Error: {name} '{address}' is invalid. It must contain 1 or 2 colons, but it has {colon_count}."
136
144
  )
145
+ sys.exit(1)
137
146
 
138
147
 
139
148
  def is_flaresolverr_available(shared_state):
@@ -5,7 +5,7 @@
5
5
  import re
6
6
  import sys
7
7
 
8
- __version__ = "2.6.1"
8
+ __version__ = "2.7.1"
9
9
 
10
10
 
11
11
  def get_version():
@@ -35,40 +35,42 @@ def get_search_results(
35
35
  mirror=None,
36
36
  season="",
37
37
  episode="",
38
+ offset=0,
39
+ limit=1000,
38
40
  ):
39
41
  if imdb_id and not imdb_id.startswith("tt"):
40
42
  imdb_id = f"tt{imdb_id}"
41
43
 
42
- # Pre-populate IMDb metadata cache to avoid API hammering by search threads
43
44
  if imdb_id:
44
45
  get_imdb_metadata(imdb_id)
45
46
 
46
47
  docs_search = "lazylibrarian" in request_from.lower()
47
48
 
48
- al = shared_state.values["config"]("Hostnames").get("al")
49
- by = shared_state.values["config"]("Hostnames").get("by")
50
- dd = shared_state.values["config"]("Hostnames").get("dd")
51
- dl = shared_state.values["config"]("Hostnames").get("dl")
52
- dt = shared_state.values["config"]("Hostnames").get("dt")
53
- dj = shared_state.values["config"]("Hostnames").get("dj")
54
- dw = shared_state.values["config"]("Hostnames").get("dw")
55
- fx = shared_state.values["config"]("Hostnames").get("fx")
56
- he = shared_state.values["config"]("Hostnames").get("he")
57
- hs = shared_state.values["config"]("Hostnames").get("hs")
58
- mb = shared_state.values["config"]("Hostnames").get("mb")
59
- nk = shared_state.values["config"]("Hostnames").get("nk")
60
- nx = shared_state.values["config"]("Hostnames").get("nx")
61
- sf = shared_state.values["config"]("Hostnames").get("sf")
62
- sj = shared_state.values["config"]("Hostnames").get("sj")
63
- sl = shared_state.values["config"]("Hostnames").get("sl")
64
- wd = shared_state.values["config"]("Hostnames").get("wd")
65
- wx = shared_state.values["config"]("Hostnames").get("wx")
49
+ # Config retrieval
50
+ config = shared_state.values["config"]("Hostnames")
51
+ al = config.get("al")
52
+ by = config.get("by")
53
+ dd = config.get("dd")
54
+ dl = config.get("dl")
55
+ dt = config.get("dt")
56
+ dj = config.get("dj")
57
+ dw = config.get("dw")
58
+ fx = config.get("fx")
59
+ he = config.get("he")
60
+ hs = config.get("hs")
61
+ mb = config.get("mb")
62
+ nk = config.get("nk")
63
+ nx = config.get("nx")
64
+ sf = config.get("sf")
65
+ sj = config.get("sj")
66
+ sl = config.get("sl")
67
+ wd = config.get("wd")
68
+ wx = config.get("wx")
66
69
 
67
70
  start_time = time.time()
68
-
69
71
  search_executor = SearchExecutor()
70
72
 
71
- # Radarr/Sonarr use imdb_id for searches
73
+ # Mappings
72
74
  imdb_map = [
73
75
  (al, al_search),
74
76
  (by, by_search),
@@ -90,7 +92,6 @@ def get_search_results(
90
92
  (wx, wx_search),
91
93
  ]
92
94
 
93
- # LazyLibrarian uses search_phrase for searches
94
95
  phrase_map = [
95
96
  (by, by_search),
96
97
  (dl, dl_search),
@@ -100,7 +101,6 @@ def get_search_results(
100
101
  (wd, wd_search),
101
102
  ]
102
103
 
103
- # Feed searches omit imdb_id and search_phrase
104
104
  feed_map = [
105
105
  (al, al_feed),
106
106
  (by, by_feed),
@@ -122,7 +122,8 @@ def get_search_results(
122
122
  (wx, wx_feed),
123
123
  ]
124
124
 
125
- if imdb_id: # only Radarr/Sonarr are using imdb_id
125
+ # Add searches
126
+ if imdb_id:
126
127
  args, kwargs = (
127
128
  (shared_state, start_time, request_from, imdb_id),
128
129
  {"mirror": mirror, "season": season, "episode": episode},
@@ -131,9 +132,7 @@ def get_search_results(
131
132
  if flag:
132
133
  search_executor.add(func, args, kwargs, True)
133
134
 
134
- elif (
135
- search_phrase and docs_search
136
- ): # only LazyLibrarian is allowed to use search_phrase
135
+ elif search_phrase and docs_search:
137
136
  args, kwargs = (
138
137
  (shared_state, start_time, request_from, search_phrase),
139
138
  {"mirror": mirror, "season": season, "episode": episode},
@@ -143,9 +142,7 @@ def get_search_results(
143
142
  search_executor.add(func, args, kwargs)
144
143
 
145
144
  elif search_phrase:
146
- debug(
147
- f"Search phrase '{search_phrase}' is not supported for {request_from}. Only LazyLibrarian can use search phrases."
148
- )
145
+ debug(f"Search phrase '{search_phrase}' is not supported for {request_from}.")
149
146
 
150
147
  else:
151
148
  args, kwargs = ((shared_state, start_time, request_from), {"mirror": mirror})
@@ -153,23 +150,43 @@ def get_search_results(
153
150
  if flag:
154
151
  search_executor.add(func, args, kwargs)
155
152
 
153
+ # Clean description for Console UI
156
154
  if imdb_id:
157
- stype = f'IMDb-ID "{imdb_id}"'
155
+ stype = f"IMDb-ID <b>{imdb_id}</b>"
158
156
  elif search_phrase:
159
- stype = f'Search-Phrase "{search_phrase}"'
157
+ stype = f"Search-Phrase <b>{search_phrase}</b>"
160
158
  else:
161
- stype = "feed search"
159
+ stype = "<b>feed</b> search"
160
+
161
+ debug(f"Starting <g>{len(search_executor.searches)}</g> searches for {stype}...")
162
+
163
+ # Unpack the new return values (all_cached, min_ttl)
164
+ results, status_bar, all_cached, min_ttl = search_executor.run_all()
162
165
 
163
- info(
164
- f"Starting {len(search_executor.searches)} searches for {stype}... This may take some time."
165
- )
166
- results = search_executor.run_all()
167
166
  elapsed_time = time.time() - start_time
167
+
168
+ # Calculate pagination for logging and return
169
+ total_count = len(results)
170
+
171
+ # Slicing
172
+ sliced_results = results[offset : offset + limit]
173
+
174
+ # Formatting for log (1-based index for humans)
175
+ log_start = min(offset + 1, total_count) if total_count > 0 else 0
176
+ log_end = min(offset + limit, total_count)
177
+
178
+ # Logic to switch between "Time taken" and "from cache"
179
+ if all_cached:
180
+ time_info = f"from cache ({int(min_ttl)}s left)"
181
+ else:
182
+ time_info = f"Time taken: {elapsed_time:.2f} seconds"
183
+
168
184
  info(
169
- f"Providing {len(results)} releases to {request_from} for {stype}. Time taken: {elapsed_time:.2f} seconds"
185
+ f"Providing releases <g>{log_start}-{log_end}</g> of <g>{total_count}</g> to <d>{request_from}</d> "
186
+ f"for {stype}{status_bar} <blue>{time_info}</blue>"
170
187
  )
171
188
 
172
- return results
189
+ return sliced_results
173
190
 
174
191
 
175
192
  class SearchExecutor:
@@ -177,47 +194,68 @@ class SearchExecutor:
177
194
  self.searches = []
178
195
 
179
196
  def add(self, func, args, kwargs, use_cache=False):
180
- # create cache key
181
197
  key_args = list(args)
182
- key_args[1] = None # ignore start_time in cache key
198
+ key_args[1] = None
183
199
  key_args = tuple(key_args)
184
200
  key = hash((func.__name__, key_args, frozenset(kwargs.items())))
185
-
186
201
  self.searches.append((key, lambda: func(*args, **kwargs), use_cache))
187
202
 
188
203
  def run_all(self):
189
204
  results = []
190
- futures = []
191
- cache_keys = []
192
- cache_used = False
205
+ future_to_meta = {}
193
206
 
194
- with ThreadPoolExecutor() as executor:
195
- for key, func, use_cache in self.searches:
196
- if use_cache:
197
- cached_result = search_cache.get(key)
198
- if cached_result is not None:
199
- debug(f"Using cached result for {key}")
200
- cache_used = True
201
- results.extend(cached_result)
202
- continue
207
+ # Track cache state
208
+ all_cached = len(self.searches) > 0
209
+ min_ttl = float("inf")
210
+ bar_str = "" # Initialize to prevent UnboundLocalError on full cache
203
211
 
204
- futures.append(executor.submit(func))
205
- cache_keys.append(key if use_cache else None)
206
-
207
- for index, future in enumerate(as_completed(futures)):
208
- try:
209
- result = future.result()
210
- results.extend(result)
211
-
212
- if cache_keys[index]: # only cache if flag is set
213
- search_cache.set(cache_keys[index], result)
214
- except Exception as e:
215
- info(f"An error occurred: {e}")
212
+ with ThreadPoolExecutor() as executor:
213
+ current_index = 0
214
+ pending_futures = []
216
215
 
217
- if cache_used:
218
- info("Presenting cached results instead of searching online.")
216
+ for key, func, use_cache in self.searches:
217
+ cached_result = None
218
+ exp = 0
219
219
 
220
- return results
220
+ if use_cache:
221
+ # Get both result and expiry
222
+ cached_result, exp = search_cache.get(key)
223
+
224
+ if cached_result is not None:
225
+ debug(f"Using cached result for {key}")
226
+ results.extend(cached_result)
227
+
228
+ # Calculate TTL for this cached item
229
+ ttl = exp - time.time()
230
+ if ttl < min_ttl:
231
+ min_ttl = ttl
232
+ else:
233
+ all_cached = False
234
+ future = executor.submit(func)
235
+ cache_key = key if use_cache else None
236
+ future_to_meta[future] = (current_index, cache_key)
237
+ pending_futures.append(future)
238
+ current_index += 1
239
+
240
+ if pending_futures:
241
+ icons = ["▪️"] * len(pending_futures)
242
+
243
+ for future in as_completed(pending_futures):
244
+ index, cache_key = future_to_meta[future]
245
+ try:
246
+ res = future.result()
247
+ status = "✅" if res and len(res) > 0 else "⚪"
248
+ icons[index] = status
249
+ results.extend(res)
250
+ if cache_key:
251
+ search_cache.set(cache_key, res)
252
+ except Exception as e:
253
+ icons[index] = "❌"
254
+ info(f"Search error: {e}")
255
+
256
+ bar_str = f" [{''.join(icons)}]"
257
+
258
+ return results, bar_str, all_cached, min_ttl
221
259
 
222
260
 
223
261
  class SearchCache:
@@ -228,22 +266,15 @@ class SearchCache:
228
266
  def clean(self, now):
229
267
  if now - self.last_cleaned < 60:
230
268
  return
231
-
232
- keys_to_delete = [
233
- key for key, (_, expiry) in self.cache.items() if now >= expiry
234
- ]
235
-
236
- for key in keys_to_delete:
237
- del self.cache[key]
238
-
269
+ keys_to_delete = [k for k, (_, exp) in self.cache.items() if now >= exp]
270
+ for k in keys_to_delete:
271
+ del self.cache[k]
239
272
  self.last_cleaned = now
240
273
 
241
274
  def get(self, key):
242
- value, expiry = self.cache.get(key, (None, 0))
243
- if time.time() < expiry:
244
- return value
245
-
246
- return None
275
+ val, exp = self.cache.get(key, (None, 0))
276
+ # Return tuple (value, expiry) if valid, else (None, 0)
277
+ return (val, exp) if time.time() < exp else (None, 0)
247
278
 
248
279
  def set(self, key, value, ttl=300):
249
280
  now = time.time()
@@ -16,7 +16,7 @@ from quasarr.downloads.sources.al import (
16
16
  )
17
17
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
18
18
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
19
- from quasarr.providers.log import debug, info
19
+ from quasarr.providers.log import debug, error, info, trace
20
20
  from quasarr.providers.sessions.al import fetch_via_requests_session, invalidate_session
21
21
 
22
22
  hostname = "al"
@@ -37,9 +37,9 @@ def convert_to_rss_date(date_str: str) -> str:
37
37
  try:
38
38
  parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
39
39
  return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
40
- except ValueError:
40
+ except ValueError as e:
41
41
  # If parsing fails, return the original string or handle as needed
42
- raise ValueError(f"Could not parse date: {date_str}")
42
+ raise ValueError(f"Could not parse date: {date_str}") from e
43
43
 
44
44
 
45
45
  def parse_relative_date(raw: str) -> datetime | None:
@@ -122,7 +122,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
122
122
  host = shared_state.values["config"]("Hostnames").get(hostname)
123
123
 
124
124
  if not "arr" in request_from.lower():
125
- debug(f"{hostname}: Skipping {request_from} search (unsupported media type)!")
125
+ debug(f"<d>Skipping {request_from} search (unsupported media type)!</d>")
126
126
  return releases
127
127
 
128
128
  if "Radarr" in request_from:
@@ -131,7 +131,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
131
131
  wanted_type = "series"
132
132
 
133
133
  if mirror and mirror not in supported_mirrors:
134
- debug(f'Mirror "{mirror}" not supported by {hostname}.')
134
+ debug(f'Mirror "{mirror}" not supported.')
135
135
  return releases
136
136
 
137
137
  try:
@@ -140,7 +140,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
140
140
  )
141
141
  r.raise_for_status()
142
142
  except Exception as e:
143
- info(f"{hostname}: could not fetch feed: {e}")
143
+ error(f"Could not fetch feed: {e}")
144
144
  mark_hostname_issue(
145
145
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
146
146
  )
@@ -201,7 +201,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
201
201
  try:
202
202
  date_converted = convert_to_rss_date(raw_date_str)
203
203
  except Exception as e:
204
- debug(f"{hostname}: could not parse date '{raw_date_str}': {e}")
204
+ debug(f"Could not parse date '{raw_date_str}': {e}")
205
205
 
206
206
  # Each of these signifies an individual release block
207
207
  mt_blocks = tr.find_all("div", class_="mt10")
@@ -239,13 +239,13 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
239
239
  )
240
240
 
241
241
  except Exception as e:
242
- info(f"{hostname}: error parsing feed item: {e}")
242
+ info(f"Error parsing feed item: {e}")
243
243
  mark_hostname_issue(
244
244
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
245
245
  )
246
246
 
247
247
  elapsed = time.time() - start_time
248
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
248
+ debug(f"Time taken: {elapsed:.2f}s")
249
249
 
250
250
  if releases:
251
251
  clear_hostname_issue(hostname)
@@ -272,7 +272,7 @@ def al_search(
272
272
  host = shared_state.values["config"]("Hostnames").get(hostname)
273
273
 
274
274
  if not "arr" in request_from.lower():
275
- debug(f"{hostname}: Skipping {request_from} search (unsupported media type)!")
275
+ debug(f"<d>Skipping {request_from} search (unsupported media type)!</d>")
276
276
  return releases
277
277
 
278
278
  if "Radarr" in request_from:
@@ -281,14 +281,14 @@ def al_search(
281
281
  valid_type = "series"
282
282
 
283
283
  if mirror and mirror not in supported_mirrors:
284
- debug(f'{hostname}: Mirror "{mirror}" not supported.')
284
+ debug(f'Mirror "{mirror}" not supported.')
285
285
  return releases
286
286
 
287
287
  imdb_id = shared_state.is_imdb_id(search_string)
288
288
  if imdb_id:
289
289
  title = get_localized_title(shared_state, imdb_id, "de")
290
290
  if not title:
291
- info(f"{hostname}: no title for IMDb {imdb_id}")
291
+ info(f"No title for IMDb {imdb_id}")
292
292
  return releases
293
293
  search_string = title
294
294
 
@@ -307,7 +307,7 @@ def al_search(
307
307
  )
308
308
  r.raise_for_status()
309
309
  except Exception as e:
310
- info(f"{hostname}: search load error: {e}")
310
+ info(f"Search load error: {e}")
311
311
  mark_hostname_issue(
312
312
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
313
313
  )
@@ -322,7 +322,7 @@ def al_search(
322
322
  last_redirect.url, redirect_location
323
323
  ) # in case of relative URL
324
324
  debug(
325
- f"{hostname}: {search_string} redirected to {absolute_redirect_url} instead of search results page"
325
+ f"{search_string} redirected to {absolute_redirect_url} instead of search results page"
326
326
  )
327
327
 
328
328
  try:
@@ -350,13 +350,9 @@ def al_search(
350
350
  sanitized_search_string = shared_state.sanitize_string(search_string)
351
351
  sanitized_title = shared_state.sanitize_string(name)
352
352
  if not sanitized_search_string in sanitized_title:
353
- debug(
354
- f"{hostname}: Search string '{search_string}' doesn't match '{name}'"
355
- )
353
+ debug(f"Search string '{search_string}' doesn't match '{name}'")
356
354
  continue
357
- debug(
358
- f"{hostname}: Matched search string '{search_string}' with result '{name}'"
359
- )
355
+ trace(f"Matched search string '{search_string}' with result '{name}'")
360
356
 
361
357
  type_label = None
362
358
  for lbl in body.select("div.label-group a[href]"):
@@ -388,7 +384,7 @@ def al_search(
388
384
  use_cache = ts and ts > datetime.now() - timedelta(seconds=threshold)
389
385
 
390
386
  if use_cache and entry.get("html"):
391
- debug(f"{hostname}: Using cached content for '{url}'")
387
+ debug(f"Using cached content for '{url}'")
392
388
  data_html = entry["html"]
393
389
  else:
394
390
  entry = {"timestamp": datetime.now()}
@@ -494,13 +490,13 @@ def al_search(
494
490
  )
495
491
 
496
492
  except Exception as e:
497
- info(f"{hostname}: error parsing search item: {e}")
493
+ info(f"Error parsing search item: {e}")
498
494
  mark_hostname_issue(
499
495
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
500
496
  )
501
497
 
502
498
  elapsed = time.time() - start_time
503
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
499
+ debug(f"Time taken: {elapsed:.2f}s")
504
500
 
505
501
  if releases:
506
502
  clear_hostname_issue(hostname)
@@ -14,7 +14,7 @@ from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
16
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
- from quasarr.providers.log import debug, info
17
+ from quasarr.providers.log import debug, error, info
18
18
 
19
19
  hostname = "by"
20
20
  supported_mirrors = ["rapidgator", "ddownload", "nitroflare"]
@@ -168,7 +168,7 @@ def _parse_posts(
168
168
  }
169
169
  )
170
170
  except Exception as e:
171
- debug(f"Error parsing {hostname.upper()}: {e}")
171
+ debug(f"Error parsing: {e}")
172
172
  continue
173
173
 
174
174
  return releases
@@ -201,12 +201,12 @@ def by_feed(shared_state, start_time, request_from, mirror=None):
201
201
  mirror_filter=mirror,
202
202
  )
203
203
  except Exception as e:
204
- info(f"Error loading {hostname.upper()} feed: {e}")
204
+ error(f"Error loading feed: {e}")
205
205
  mark_hostname_issue(
206
206
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
207
207
  )
208
208
  releases = []
209
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
209
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
210
210
 
211
211
  if releases:
212
212
  clear_hostname_issue(hostname)
@@ -257,12 +257,12 @@ def by_search(
257
257
  episode=episode,
258
258
  )
259
259
  except Exception as e:
260
- info(f"Error loading {hostname.upper()} search: {e}")
260
+ error(f"Error loading search: {e}")
261
261
  mark_hostname_issue(
262
262
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
263
263
  )
264
264
  releases = []
265
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
265
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
266
266
 
267
267
  if releases:
268
268
  clear_hostname_issue(hostname)
@@ -9,7 +9,7 @@ from datetime import datetime, timezone
9
9
 
10
10
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
11
11
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
12
- from quasarr.providers.log import debug, info
12
+ from quasarr.providers.log import debug, error, info, warn
13
13
  from quasarr.providers.sessions.dd import (
14
14
  create_and_persist_session,
15
15
  retrieve_and_validate_session,
@@ -48,9 +48,7 @@ def dd_search(
48
48
  password = dd
49
49
 
50
50
  if not "arr" in request_from.lower():
51
- debug(
52
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
53
- )
51
+ debug(f"<d>Skipping {request_from} search (unsupported media type)!</d>")
54
52
  return releases
55
53
 
56
54
  try:
@@ -65,7 +63,7 @@ def dd_search(
65
63
 
66
64
  if mirror and mirror not in supported_mirrors:
67
65
  debug(
68
- f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
66
+ f'Mirror "{mirror}" not supported. Supported mirrors: {supported_mirrors}.'
69
67
  " Skipping search!"
70
68
  )
71
69
  return releases
@@ -123,7 +121,7 @@ def dd_search(
123
121
  try:
124
122
  if release.get("fake"):
125
123
  debug(
126
- f"{hostname}: Release {release.get('release')} marked as fake. Invalidating {hostname.upper()} session..."
124
+ f"Release {release.get('release')} marked as fake. Invalidating session..."
127
125
  )
128
126
  create_and_persist_session(shared_state)
129
127
  return []
@@ -138,7 +136,7 @@ def dd_search(
138
136
  release_imdb = release.get("imdbid", None)
139
137
  if release_imdb and imdb_id and imdb_id != release_imdb:
140
138
  debug(
141
- f"{hostname}: Release {title} IMDb-ID mismatch ({imdb_id} != {release.get('imdbid', None)})"
139
+ f"Release {title} IMDb-ID mismatch ({imdb_id} != {release.get('imdbid', None)})"
142
140
  )
143
141
  continue
144
142
 
@@ -169,20 +167,20 @@ def dd_search(
169
167
  }
170
168
  )
171
169
  except Exception as e:
172
- info(f"Error parsing {hostname.upper()} feed: {e}")
170
+ warn(f"Error parsing feed: {e}")
173
171
  mark_hostname_issue(
174
172
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
175
173
  )
176
174
  continue
177
175
 
178
176
  except Exception as e:
179
- info(f"Error loading {hostname.upper()} {search_type}: {e}")
177
+ error(f"Error loading feed: {e}")
180
178
  mark_hostname_issue(
181
179
  hostname, search_type, str(e) if "e" in dir() else "Error occurred"
182
180
  )
183
181
 
184
182
  elapsed_time = time.time() - start_time
185
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
183
+ debug(f"Time taken: {elapsed_time:.2f}s")
186
184
 
187
185
  if releases:
188
186
  clear_hostname_issue(hostname)