quasarr 2.6.1__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (54) hide show
  1. quasarr/__init__.py +71 -61
  2. quasarr/api/__init__.py +1 -2
  3. quasarr/api/arr/__init__.py +159 -56
  4. quasarr/api/captcha/__init__.py +203 -154
  5. quasarr/downloads/__init__.py +12 -8
  6. quasarr/downloads/linkcrypters/al.py +3 -3
  7. quasarr/downloads/linkcrypters/filecrypt.py +1 -2
  8. quasarr/downloads/packages/__init__.py +62 -88
  9. quasarr/downloads/sources/al.py +3 -3
  10. quasarr/downloads/sources/by.py +3 -3
  11. quasarr/downloads/sources/he.py +8 -9
  12. quasarr/downloads/sources/nk.py +3 -3
  13. quasarr/downloads/sources/sl.py +6 -1
  14. quasarr/downloads/sources/wd.py +93 -37
  15. quasarr/downloads/sources/wx.py +11 -17
  16. quasarr/providers/auth.py +9 -13
  17. quasarr/providers/cloudflare.py +4 -3
  18. quasarr/providers/imdb_metadata.py +0 -2
  19. quasarr/providers/jd_cache.py +64 -90
  20. quasarr/providers/log.py +226 -8
  21. quasarr/providers/myjd_api.py +116 -94
  22. quasarr/providers/sessions/al.py +20 -22
  23. quasarr/providers/sessions/dd.py +1 -1
  24. quasarr/providers/sessions/dl.py +8 -10
  25. quasarr/providers/sessions/nx.py +1 -1
  26. quasarr/providers/shared_state.py +26 -15
  27. quasarr/providers/utils.py +15 -6
  28. quasarr/providers/version.py +1 -1
  29. quasarr/search/__init__.py +91 -78
  30. quasarr/search/sources/al.py +19 -23
  31. quasarr/search/sources/by.py +6 -6
  32. quasarr/search/sources/dd.py +8 -10
  33. quasarr/search/sources/dj.py +15 -18
  34. quasarr/search/sources/dl.py +25 -37
  35. quasarr/search/sources/dt.py +13 -15
  36. quasarr/search/sources/dw.py +24 -16
  37. quasarr/search/sources/fx.py +25 -11
  38. quasarr/search/sources/he.py +16 -14
  39. quasarr/search/sources/hs.py +7 -7
  40. quasarr/search/sources/mb.py +7 -7
  41. quasarr/search/sources/nk.py +24 -25
  42. quasarr/search/sources/nx.py +22 -15
  43. quasarr/search/sources/sf.py +18 -9
  44. quasarr/search/sources/sj.py +7 -7
  45. quasarr/search/sources/sl.py +26 -14
  46. quasarr/search/sources/wd.py +61 -31
  47. quasarr/search/sources/wx.py +33 -47
  48. quasarr/storage/config.py +1 -3
  49. {quasarr-2.6.1.dist-info → quasarr-2.7.0.dist-info}/METADATA +4 -1
  50. quasarr-2.7.0.dist-info/RECORD +84 -0
  51. quasarr-2.6.1.dist-info/RECORD +0 -84
  52. {quasarr-2.6.1.dist-info → quasarr-2.7.0.dist-info}/WHEEL +0 -0
  53. {quasarr-2.6.1.dist-info → quasarr-2.7.0.dist-info}/entry_points.txt +0 -0
  54. {quasarr-2.6.1.dist-info → quasarr-2.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -13,7 +13,7 @@ from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
15
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
16
- from quasarr.providers.log import debug, info
16
+ from quasarr.providers.log import debug, info, trace
17
17
 
18
18
  hostname = "he"
19
19
  supported_mirrors = ["rapidgator", "nitroflare"]
@@ -74,7 +74,7 @@ def he_search(
74
74
 
75
75
  if not "arr" in request_from.lower():
76
76
  debug(
77
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
77
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
78
78
  )
79
79
  return releases
80
80
 
@@ -84,7 +84,7 @@ def he_search(
84
84
  tag = "tv-shows"
85
85
 
86
86
  if mirror and mirror not in supported_mirrors:
87
- debug(f'Mirror "{mirror}" not supported by {hostname}.')
87
+ debug(f'Mirror "{mirror}" not supported.')
88
88
  return releases
89
89
 
90
90
  source_search = ""
@@ -93,7 +93,7 @@ def he_search(
93
93
  if imdb_id:
94
94
  local_title = get_localized_title(shared_state, imdb_id, "en")
95
95
  if not local_title:
96
- info(f"{hostname}: no title for IMDb {imdb_id}")
96
+ info(f"No title for IMDb {imdb_id}")
97
97
  return releases
98
98
  if not season:
99
99
  year = get_year(imdb_id)
@@ -130,7 +130,7 @@ def he_search(
130
130
  soup = BeautifulSoup(r.content, "html.parser")
131
131
  results = soup.find_all("div", class_="item")
132
132
  except Exception as e:
133
- info(f"{hostname}: {search_type} load error: {e}")
133
+ info(f"{search_type} load error: {e}")
134
134
  mark_hostname_issue(
135
135
  hostname, search_type, str(e) if "e" in dir() else "Error occurred"
136
136
  )
@@ -196,22 +196,25 @@ def he_search(
196
196
  mark_hostname_issue(
197
197
  hostname, search_type, str(e) if "e" in dir() else "Error occurred"
198
198
  )
199
+ release_imdb_id = None
199
200
  try:
200
201
  imdb_link = soup.find(
201
202
  "a", href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE)
202
203
  )
203
204
  if imdb_link:
204
205
  release_imdb_id = re.search(r"tt\d+", imdb_link["href"]).group()
205
- if imdb_id and release_imdb_id != imdb_id:
206
- debug(
207
- f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
206
+ if imdb_id and release_imdb_id and release_imdb_id != imdb_id:
207
+ trace(
208
+ f"IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
208
209
  )
209
210
  continue
210
211
  else:
211
- debug(f"{hostname}: imdb link not found for title {title}")
212
+ trace(f"imdb link not found for title {title}")
212
213
  except Exception:
213
- debug(f"{hostname}: failed to determine imdb_id for title {title}")
214
- continue
214
+ debug(f"failed to determine imdb_id for title {title}")
215
+
216
+ if release_imdb_id is None:
217
+ release_imdb_id = imdb_id
215
218
 
216
219
  password = None
217
220
  payload = urlsafe_b64encode(
@@ -239,12 +242,11 @@ def he_search(
239
242
  }
240
243
  )
241
244
  except Exception as e:
242
- debug(f"{hostname}: error parsing search result: {e}")
245
+ debug(f"error parsing search result: {e}")
243
246
  continue
244
247
 
245
248
  elapsed = time.time() - start_time
246
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
247
-
249
+ debug(f"Time taken: {elapsed:.2f}s")
248
250
  if releases:
249
251
  clear_hostname_issue(hostname)
250
252
  return releases
@@ -14,7 +14,7 @@ from bs4 import BeautifulSoup, XMLParsedAsHTMLWarning
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
15
 
16
16
  warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
17
- from quasarr.providers.log import debug, info
17
+ from quasarr.providers.log import debug, warn
18
18
 
19
19
  hostname = "hs"
20
20
  supported_mirrors = ["rapidgator", "ddownload", "katfile"]
@@ -353,7 +353,7 @@ def hs_feed(shared_state, start_time, request_from, mirror=None):
353
353
  # HS only supports movies and series
354
354
  if "lazylibrarian" in request_from.lower():
355
355
  debug(
356
- f'Skipping {request_from} feed on "{hostname.upper()}" (unsupported media type)!'
356
+ f'<d>Skipping {request_from} feed on "{hostname.upper()}" (unsupported media type)!</d>'
357
357
  )
358
358
  return releases
359
359
 
@@ -431,12 +431,12 @@ def hs_feed(shared_state, start_time, request_from, mirror=None):
431
431
  continue
432
432
 
433
433
  except Exception as e:
434
- info(f"Error loading {hostname.upper()} feed: {e}")
434
+ warn(f"Error loading {hostname.upper()} feed: {e}")
435
435
  mark_hostname_issue(hostname, "feed", str(e))
436
436
  return releases
437
437
 
438
438
  elapsed_time = time.time() - start_time
439
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
439
+ debug(f"Time taken: {elapsed_time:.2f}s")
440
440
 
441
441
  if releases:
442
442
  clear_hostname_issue(hostname)
@@ -463,7 +463,7 @@ def hs_search(
463
463
  # HS only supports movies and series
464
464
  if "lazylibrarian" in request_from.lower():
465
465
  debug(
466
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
466
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
467
467
  )
468
468
  return releases
469
469
 
@@ -503,12 +503,12 @@ def hs_search(
503
503
  )
504
504
 
505
505
  except Exception as e:
506
- info(f"Error loading {hostname.upper()} search: {e}")
506
+ warn(f"Error loading {hostname.upper()} search: {e}")
507
507
  mark_hostname_issue(hostname, "search", str(e))
508
508
  return releases
509
509
 
510
510
  elapsed_time = time.time() - start_time
511
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
511
+ debug(f"Time taken: {elapsed_time:.2f}s")
512
512
 
513
513
  if releases:
514
514
  clear_hostname_issue(hostname)
@@ -12,7 +12,7 @@ import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
- from quasarr.providers.log import debug, info
15
+ from quasarr.providers.log import debug, warn
16
16
 
17
17
  hostname = "mb"
18
18
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -169,7 +169,7 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
169
169
 
170
170
  if not "arr" in request_from.lower():
171
171
  debug(
172
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
172
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
173
173
  )
174
174
  return []
175
175
 
@@ -183,12 +183,12 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
183
183
  soup = BeautifulSoup(r.content, "html.parser")
184
184
  releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
185
185
  except Exception as e:
186
- info(f"Error loading {hostname.upper()} feed: {e}")
186
+ warn(f"Error loading {hostname.upper()} feed: {e}")
187
187
  mark_hostname_issue(
188
188
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
189
189
  )
190
190
  releases = []
191
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
191
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
192
192
 
193
193
  if releases:
194
194
  clear_hostname_issue(hostname)
@@ -208,7 +208,7 @@ def mb_search(
208
208
 
209
209
  if not "arr" in request_from.lower():
210
210
  debug(
211
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
211
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
212
212
  )
213
213
  return []
214
214
 
@@ -236,12 +236,12 @@ def mb_search(
236
236
  episode=episode,
237
237
  )
238
238
  except Exception as e:
239
- info(f"Error loading {hostname.upper()} search: {e}")
239
+ warn(f"Error loading {hostname.upper()} search: {e}")
240
240
  mark_hostname_issue(
241
241
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
242
242
  )
243
243
  releases = []
244
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
244
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
245
245
 
246
246
  if releases:
247
247
  clear_hostname_issue(hostname)
@@ -14,7 +14,7 @@ from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
16
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
- from quasarr.providers.log import debug, info
17
+ from quasarr.providers.log import debug, info, trace
18
18
 
19
19
  hostname = "nk"
20
20
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -74,12 +74,12 @@ def nk_search(
74
74
 
75
75
  if not "arr" in request_from.lower():
76
76
  debug(
77
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
77
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
78
78
  )
79
79
  return releases
80
80
 
81
81
  if mirror and mirror not in supported_mirrors:
82
- debug(f'Mirror "{mirror}" not supported by {hostname}.')
82
+ debug(f'Mirror "{mirror}" not supported.')
83
83
  return releases
84
84
 
85
85
  source_search = ""
@@ -88,7 +88,7 @@ def nk_search(
88
88
  if imdb_id:
89
89
  local_title = get_localized_title(shared_state, imdb_id, "de")
90
90
  if not local_title:
91
- info(f"{hostname}: no title for IMDb {imdb_id}")
91
+ info(f"No title for IMDb {imdb_id}")
92
92
  return releases
93
93
  if not season:
94
94
  year = get_year(imdb_id)
@@ -124,7 +124,7 @@ def nk_search(
124
124
  soup = BeautifulSoup(r.content, "html.parser")
125
125
  results = soup.find_all("div", class_="article-right")
126
126
  except Exception as e:
127
- info(f"{hostname}: {search_type} load error: {e}")
127
+ info(f"{search_type} load error: {e}")
128
128
  mark_hostname_issue(
129
129
  hostname, search_type, str(e) if "e" in dir() else "Error occurred"
130
130
  )
@@ -135,23 +135,6 @@ def nk_search(
135
135
 
136
136
  for result in results:
137
137
  try:
138
- imdb_a = result.select_one("a.imdb")
139
- if imdb_a and imdb_a.get("href"):
140
- try:
141
- release_imdb_id = re.search(r"tt\d+", imdb_a["href"]).group()
142
- if imdb_id:
143
- if release_imdb_id != imdb_id:
144
- debug(
145
- f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
146
- )
147
- continue
148
- except Exception:
149
- debug(f"{hostname}: could not extract IMDb ID")
150
- continue
151
- else:
152
- debug(f"{hostname}: could not extract IMDb ID")
153
- continue
154
-
155
138
  a = result.find("a", class_="release-details", href=True)
156
139
  if not a:
157
140
  continue
@@ -162,6 +145,23 @@ def nk_search(
162
145
  else:
163
146
  continue
164
147
 
148
+ imdb_a = result.select_one("a.imdb")
149
+ if imdb_a and imdb_a.get("href"):
150
+ try:
151
+ release_imdb_id = re.search(r"tt\d+", imdb_a["href"]).group()
152
+ if imdb_id and release_imdb_id and release_imdb_id != imdb_id:
153
+ trace(
154
+ f"IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
155
+ )
156
+ continue
157
+ except Exception:
158
+ debug(f"failed to determine imdb_id for {title}")
159
+ else:
160
+ trace(f"imdb link not found for {title}")
161
+
162
+ if release_imdb_id is None:
163
+ release_imdb_id = imdb_id
164
+
165
165
  if not shared_state.is_valid_release(
166
166
  title, request_from, search_string, season, episode
167
167
  ):
@@ -230,12 +230,11 @@ def nk_search(
230
230
  )
231
231
  except Exception as e:
232
232
  info(e)
233
- debug(f"{hostname}: error parsing search result: {e}")
233
+ debug(f"error parsing search result: {e}")
234
234
  continue
235
235
 
236
236
  elapsed = time.time() - start_time
237
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
238
-
237
+ debug(f"Time taken: {elapsed:.2f}s")
239
238
  if releases:
240
239
  clear_hostname_issue(hostname)
241
240
  return releases
@@ -10,7 +10,7 @@ import requests
10
10
 
11
11
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
12
12
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
13
- from quasarr.providers.log import debug, info
13
+ from quasarr.providers.log import debug, info, trace, warn
14
14
 
15
15
  hostname = "nx"
16
16
  supported_mirrors = ["filer"]
@@ -30,8 +30,8 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
30
30
 
31
31
  if mirror and mirror not in supported_mirrors:
32
32
  debug(
33
- f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
34
- " Skipping search!"
33
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}. '
34
+ "Skipping search!"
35
35
  )
36
36
  return releases
37
37
 
@@ -47,7 +47,7 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
47
47
  r.raise_for_status()
48
48
  feed = r.json()
49
49
  except Exception as e:
50
- info(f"Error loading {hostname.upper()} feed: {e}")
50
+ warn(f"Error loading {hostname.upper()} feed: {e}")
51
51
  mark_hostname_issue(
52
52
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
53
  )
@@ -103,13 +103,13 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
103
103
  )
104
104
 
105
105
  except Exception as e:
106
- info(f"Error parsing {hostname.upper()} feed: {e}")
106
+ warn(f"Error parsing {hostname.upper()} feed: {e}")
107
107
  mark_hostname_issue(
108
108
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
109
109
  )
110
110
 
111
111
  elapsed_time = time.time() - start_time
112
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
112
+ debug(f"Time taken: {elapsed_time:.2f}s")
113
113
 
114
114
  if releases:
115
115
  clear_hostname_issue(hostname)
@@ -138,8 +138,8 @@ def nx_search(
138
138
 
139
139
  if mirror and mirror not in supported_mirrors:
140
140
  debug(
141
- f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
142
- " Skipping search!"
141
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}. '
142
+ "Skipping search!"
143
143
  )
144
144
  return releases
145
145
 
@@ -164,7 +164,7 @@ def nx_search(
164
164
  r.raise_for_status()
165
165
  feed = r.json()
166
166
  except Exception as e:
167
- info(f"Error loading {hostname.upper()} search: {e}")
167
+ warn(f"Error loading {hostname.upper()} search: {e}")
168
168
  mark_hostname_issue(
169
169
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
170
170
  )
@@ -187,12 +187,19 @@ def nx_search(
187
187
 
188
188
  try:
189
189
  source = f"https://{nx}/release/{item['slug']}"
190
- if not imdb_id:
191
- imdb_id = item.get("_media", {}).get("imdbid", None)
190
+ release_imdb_id = item.get("_media", {}).get("imdbid", None)
191
+ if imdb_id and release_imdb_id and release_imdb_id != imdb_id:
192
+ trace(
193
+ f"{hostname.upper()}: Skipping result '{title}' due to IMDb ID mismatch."
194
+ )
195
+ continue
196
+
197
+ if release_imdb_id is None:
198
+ release_imdb_id = imdb_id
192
199
 
193
200
  mb = shared_state.convert_to_mb(item)
194
201
  payload = urlsafe_b64encode(
195
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode(
202
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode(
196
203
  "utf-8"
197
204
  )
198
205
  ).decode("utf-8")
@@ -215,7 +222,7 @@ def nx_search(
215
222
  "details": {
216
223
  "title": title,
217
224
  "hostname": hostname.lower(),
218
- "imdb_id": imdb_id,
225
+ "imdb_id": release_imdb_id,
219
226
  "link": link,
220
227
  "mirror": mirror,
221
228
  "size": size,
@@ -227,13 +234,13 @@ def nx_search(
227
234
  )
228
235
 
229
236
  except Exception as e:
230
- info(f"Error parsing {hostname.upper()} search: {e}")
237
+ warn(f"Error parsing {hostname.upper()} search: {e}")
231
238
  mark_hostname_issue(
232
239
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
233
240
  )
234
241
 
235
242
  elapsed_time = time.time() - start_time
236
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
243
+ debug(f"Time taken: {elapsed_time:.2f}s")
237
244
 
238
245
  if releases:
239
246
  clear_hostname_issue(hostname)
@@ -12,7 +12,7 @@ import requests
12
12
 
13
13
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
14
14
  from quasarr.providers.imdb_metadata import get_localized_title
15
- from quasarr.providers.log import debug, info
15
+ from quasarr.providers.log import debug, info, trace, warn
16
16
 
17
17
  hostname = "sf"
18
18
  supported_mirrors = ["1fichier", "ddownload", "katfile", "rapidgator", "turbobit"]
@@ -102,7 +102,7 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
102
102
 
103
103
  if not "sonarr" in request_from.lower():
104
104
  debug(
105
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
105
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
106
106
  )
107
107
  return releases
108
108
 
@@ -131,7 +131,7 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
131
131
  )
132
132
  r.raise_for_status()
133
133
  except Exception as e:
134
- info(f"Error loading {hostname.upper()} feed: {e} for {formatted_date}")
134
+ warn(f"Error loading {hostname.upper()} feed: {e} for {formatted_date}")
135
135
  mark_hostname_issue(
136
136
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
137
137
  )
@@ -194,7 +194,7 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
194
194
  )
195
195
 
196
196
  elapsed_time = time.time() - start_time
197
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
197
+ debug(f"Time taken: {elapsed_time:.2f}s")
198
198
 
199
199
  if releases:
200
200
  clear_hostname_issue(hostname)
@@ -234,7 +234,7 @@ def sf_search(
234
234
 
235
235
  if not "sonarr" in request_from.lower():
236
236
  debug(
237
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
237
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
238
238
  )
239
239
  return releases
240
240
 
@@ -255,7 +255,7 @@ def sf_search(
255
255
  r.raise_for_status()
256
256
  feed = r.json()
257
257
  except Exception as e:
258
- info(f"Error loading {hostname.upper()} search: {e}")
258
+ warn(f"Error loading {hostname.upper()} search: {e}")
259
259
  mark_hostname_issue(
260
260
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
261
261
  )
@@ -266,11 +266,11 @@ def sf_search(
266
266
  sanitized_search_string = shared_state.sanitize_string(search_string)
267
267
  sanitized_title = shared_state.sanitize_string(result.get("title", ""))
268
268
  if not re.search(rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title):
269
- debug(
269
+ trace(
270
270
  f"Search string '{search_string}' doesn't match '{result.get('title')}'"
271
271
  )
272
272
  continue
273
- debug(
273
+ trace(
274
274
  f"Matched search string '{search_string}' with result '{result.get('title')}'"
275
275
  )
276
276
 
@@ -334,6 +334,15 @@ def sf_search(
334
334
  )
335
335
  continue
336
336
 
337
+ if imdb_id_in_search and imdb_id and imdb_id != imdb_id_in_search:
338
+ trace(
339
+ f"{hostname.upper()}: Skipping result '{result.get('title')}' due to IMDb ID mismatch."
340
+ )
341
+ continue
342
+
343
+ if imdb_id is None:
344
+ imdb_id = imdb_id_in_search
345
+
337
346
  # cache content and imdb_id
338
347
  entry["content"] = data_html
339
348
  entry["imdb_id"] = imdb_id
@@ -451,7 +460,7 @@ def sf_search(
451
460
  debug(f"Error parsing item for '{search_string}': {e}")
452
461
 
453
462
  elapsed_time = time.time() - start_time
454
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
463
+ debug(f"Time taken: {elapsed_time:.2f}s")
455
464
 
456
465
  if releases:
457
466
  clear_hostname_issue(hostname)
@@ -13,7 +13,7 @@ from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
15
  from quasarr.providers.imdb_metadata import get_localized_title
16
- from quasarr.providers.log import debug, info
16
+ from quasarr.providers.log import debug, info, trace
17
17
 
18
18
  hostname = "sj"
19
19
 
@@ -32,7 +32,7 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
32
32
 
33
33
  if "sonarr" not in request_from.lower():
34
34
  debug(
35
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
35
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
36
36
  )
37
37
  return releases
38
38
 
@@ -104,7 +104,7 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
104
104
  debug(f"{hostname.upper()}: feed parse error: {e}")
105
105
  continue
106
106
 
107
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
107
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
108
108
 
109
109
  if releases:
110
110
  clear_hostname_issue(hostname)
@@ -124,7 +124,7 @@ def sj_search(
124
124
 
125
125
  if "sonarr" not in request_from.lower():
126
126
  debug(
127
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
127
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
128
128
  )
129
129
  return releases
130
130
 
@@ -168,12 +168,12 @@ def sj_search(
168
168
  if not re.search(
169
169
  rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title
170
170
  ):
171
- debug(
171
+ trace(
172
172
  f"Search string '{localized_title}' doesn't match '{result_title}'"
173
173
  )
174
174
  continue
175
175
 
176
- debug(
176
+ trace(
177
177
  f"Matched search string '{localized_title}' with result '{result_title}'"
178
178
  )
179
179
 
@@ -240,7 +240,7 @@ def sj_search(
240
240
  debug(f"{hostname.upper()}: search parse error: {e}")
241
241
  continue
242
242
 
243
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
243
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
244
244
 
245
245
  if releases:
246
246
  clear_hostname_issue(hostname)