quasarr 2.6.0__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (57) hide show
  1. quasarr/__init__.py +71 -61
  2. quasarr/api/__init__.py +3 -4
  3. quasarr/api/arr/__init__.py +159 -56
  4. quasarr/api/captcha/__init__.py +203 -154
  5. quasarr/api/config/__init__.py +1 -1
  6. quasarr/api/jdownloader/__init__.py +19 -12
  7. quasarr/downloads/__init__.py +12 -8
  8. quasarr/downloads/linkcrypters/al.py +3 -3
  9. quasarr/downloads/linkcrypters/filecrypt.py +1 -2
  10. quasarr/downloads/packages/__init__.py +62 -88
  11. quasarr/downloads/sources/al.py +3 -3
  12. quasarr/downloads/sources/by.py +3 -3
  13. quasarr/downloads/sources/he.py +8 -9
  14. quasarr/downloads/sources/nk.py +3 -3
  15. quasarr/downloads/sources/sl.py +6 -1
  16. quasarr/downloads/sources/wd.py +132 -62
  17. quasarr/downloads/sources/wx.py +11 -17
  18. quasarr/providers/auth.py +9 -13
  19. quasarr/providers/cloudflare.py +50 -4
  20. quasarr/providers/imdb_metadata.py +0 -2
  21. quasarr/providers/jd_cache.py +64 -90
  22. quasarr/providers/log.py +226 -8
  23. quasarr/providers/myjd_api.py +116 -94
  24. quasarr/providers/sessions/al.py +20 -22
  25. quasarr/providers/sessions/dd.py +1 -1
  26. quasarr/providers/sessions/dl.py +8 -10
  27. quasarr/providers/sessions/nx.py +1 -1
  28. quasarr/providers/shared_state.py +26 -15
  29. quasarr/providers/utils.py +15 -6
  30. quasarr/providers/version.py +1 -1
  31. quasarr/search/__init__.py +91 -78
  32. quasarr/search/sources/al.py +19 -23
  33. quasarr/search/sources/by.py +6 -6
  34. quasarr/search/sources/dd.py +8 -10
  35. quasarr/search/sources/dj.py +15 -18
  36. quasarr/search/sources/dl.py +25 -37
  37. quasarr/search/sources/dt.py +13 -15
  38. quasarr/search/sources/dw.py +24 -16
  39. quasarr/search/sources/fx.py +25 -11
  40. quasarr/search/sources/he.py +16 -14
  41. quasarr/search/sources/hs.py +7 -7
  42. quasarr/search/sources/mb.py +7 -7
  43. quasarr/search/sources/nk.py +24 -25
  44. quasarr/search/sources/nx.py +22 -15
  45. quasarr/search/sources/sf.py +18 -9
  46. quasarr/search/sources/sj.py +7 -7
  47. quasarr/search/sources/sl.py +26 -14
  48. quasarr/search/sources/wd.py +63 -9
  49. quasarr/search/sources/wx.py +33 -47
  50. quasarr/storage/config.py +1 -3
  51. quasarr/storage/setup.py +13 -4
  52. {quasarr-2.6.0.dist-info → quasarr-2.7.0.dist-info}/METADATA +4 -1
  53. quasarr-2.7.0.dist-info/RECORD +84 -0
  54. quasarr-2.6.0.dist-info/RECORD +0 -84
  55. {quasarr-2.6.0.dist-info → quasarr-2.7.0.dist-info}/WHEEL +0 -0
  56. {quasarr-2.6.0.dist-info → quasarr-2.7.0.dist-info}/entry_points.txt +0 -0
  57. {quasarr-2.6.0.dist-info → quasarr-2.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -13,7 +13,7 @@ from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
15
  from quasarr.providers.imdb_metadata import get_localized_title
16
- from quasarr.providers.log import debug, info
16
+ from quasarr.providers.log import debug, error, trace, warn
17
17
 
18
18
  hostname = "dj"
19
19
 
@@ -31,9 +31,7 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
31
31
  releases = []
32
32
 
33
33
  if "sonarr" not in request_from.lower():
34
- debug(
35
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
36
- )
34
+ debug(f"<d>Skipping {request_from} search (unsupported media type)!</d>")
37
35
  return releases
38
36
 
39
37
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
@@ -47,7 +45,7 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
47
45
  r.raise_for_status()
48
46
  data = json.loads(r.content)
49
47
  except Exception as e:
50
- info(f"{hostname.upper()}: feed load error: {e}")
48
+ error(f"Feed load error: {e}")
51
49
  mark_hostname_issue(
52
50
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
53
51
  )
@@ -101,10 +99,10 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
101
99
  )
102
100
 
103
101
  except Exception as e:
104
- debug(f"{hostname.upper()}: feed parse error: {e}")
102
+ warn(f"Feed parse error: {e}")
105
103
  continue
106
104
 
107
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
105
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
108
106
 
109
107
  if releases:
110
108
  clear_hostname_issue(hostname)
@@ -123,9 +121,7 @@ def dj_search(
123
121
  releases = []
124
122
 
125
123
  if "sonarr" not in request_from.lower():
126
- debug(
127
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
128
- )
124
+ debug(f"<d>Skipping {request_from} search (unsupported media type)!</d>")
129
125
  return releases
130
126
 
131
127
  sj_host = shared_state.values["config"]("Hostnames").get(hostname)
@@ -133,11 +129,12 @@ def dj_search(
133
129
 
134
130
  imdb_id = shared_state.is_imdb_id(search_string)
135
131
  if not imdb_id:
132
+ error(f"No IMDb ID found in search string '{search_string}'")
136
133
  return releases
137
134
 
138
135
  localized_title = get_localized_title(shared_state, imdb_id, "de")
139
136
  if not localized_title:
140
- info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
137
+ error(f"No localized title for IMDb {imdb_id}")
141
138
  return releases
142
139
 
143
140
  headers = {"User-Agent": shared_state.values["user_agent"]}
@@ -149,7 +146,7 @@ def dj_search(
149
146
  soup = BeautifulSoup(r.content, "html.parser")
150
147
  results = soup.find_all("a", href=re.compile(r"^/serie/"))
151
148
  except Exception as e:
152
- info(f"{hostname.upper()}: search load error: {e}")
149
+ error(f"Search load error: {e}")
153
150
  mark_hostname_issue(
154
151
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
155
152
  )
@@ -167,12 +164,12 @@ def dj_search(
167
164
  if not re.search(
168
165
  rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title
169
166
  ):
170
- debug(
167
+ trace(
171
168
  f"Search string '{localized_title}' doesn't match '{result_title}'"
172
169
  )
173
170
  continue
174
171
 
175
- debug(
172
+ trace(
176
173
  f"Matched search string '{localized_title}' with result '{result_title}'"
177
174
  )
178
175
 
@@ -181,7 +178,7 @@ def dj_search(
181
178
  r = requests.get(series_url, headers=headers, timeout=10)
182
179
  media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
183
180
  if not media_id_match:
184
- debug(f"{hostname.upper()}: no media id for {result_title}")
181
+ warn(f"No media id for {result_title}")
185
182
  continue
186
183
 
187
184
  media_id = media_id_match.group(1)
@@ -204,7 +201,7 @@ def dj_search(
204
201
 
205
202
  published = convert_to_rss_date(item.get("createdAt"))
206
203
  if not published:
207
- debug(f"{hostname.upper()}: no published date for {title}")
204
+ debug(f"No published date for {title}")
208
205
  published = one_hour_ago
209
206
 
210
207
  mb = 0
@@ -235,10 +232,10 @@ def dj_search(
235
232
  )
236
233
 
237
234
  except Exception as e:
238
- debug(f"{hostname.upper()}: search parse error: {e}")
235
+ warn(f"Search parse error: {e}")
239
236
  continue
240
237
 
241
- debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
238
+ debug(f"Time taken: {time.time() - start_time:.2f}s")
242
239
 
243
240
  if releases:
244
241
  clear_hostname_issue(hostname)
@@ -12,7 +12,7 @@ from bs4 import BeautifulSoup
12
12
 
13
13
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
14
14
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
15
- from quasarr.providers.log import debug, info
15
+ from quasarr.providers.log import debug, info, trace, warn
16
16
  from quasarr.providers.sessions.dl import (
17
17
  fetch_via_requests_session,
18
18
  invalidate_session,
@@ -70,13 +70,13 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
70
70
  forum = "hd.14"
71
71
 
72
72
  if not host:
73
- debug(f"{hostname}: hostname not configured")
73
+ debug("hostname not configured")
74
74
  return releases
75
75
 
76
76
  try:
77
77
  sess = retrieve_and_validate_session(shared_state)
78
78
  if not sess:
79
- info(f"Could not retrieve valid session for {host}")
79
+ warn(f"Could not retrieve valid session for {host}")
80
80
  return releases
81
81
 
82
82
  forum_url = f"https://www.{host}/forums/{forum}/?order=post_date&direction=desc"
@@ -89,7 +89,7 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
89
89
  items = soup.select("div.structItem.structItem--thread")
90
90
 
91
91
  if not items:
92
- debug(f"{hostname}: No entries found in Forum")
92
+ debug("No entries found in Forum")
93
93
  return releases
94
94
 
95
95
  for item in items:
@@ -148,18 +148,18 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
148
148
  )
149
149
 
150
150
  except Exception as e:
151
- debug(f"{hostname}: error parsing Forum item: {e}")
151
+ debug(f"error parsing Forum item: {e}")
152
152
  continue
153
153
 
154
154
  except Exception as e:
155
- info(f"{hostname}: Forum feed error: {e}")
155
+ warn(f"Forum feed error: {e}")
156
156
  mark_hostname_issue(
157
157
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
158
158
  )
159
159
  invalidate_session(shared_state)
160
160
 
161
161
  elapsed = time.time() - start_time
162
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
162
+ debug(f"Time taken: {elapsed:.2f}s")
163
163
 
164
164
  if releases:
165
165
  clear_hostname_issue(hostname)
@@ -222,9 +222,7 @@ def _search_single_page(
222
222
  )
223
223
 
224
224
  if search_response.status_code != 200:
225
- debug(
226
- f"{hostname}: [Page {page_num}] returned status {search_response.status_code}"
227
- )
225
+ debug(f"[Page {page_num}] returned status {search_response.status_code}")
228
226
  return page_releases, None
229
227
 
230
228
  # Extract search ID from first page
@@ -233,18 +231,16 @@ def _search_single_page(
233
231
  match = re.search(r"/search/(\d+)/", search_response.url)
234
232
  if match:
235
233
  extracted_search_id = match.group(1)
236
- debug(
237
- f"{hostname}: [Page 1] Extracted search ID: {extracted_search_id}"
238
- )
234
+ trace(f"[Page 1] Extracted search ID: {extracted_search_id}")
239
235
 
240
236
  soup = BeautifulSoup(search_response.text, "html.parser")
241
237
  result_items = soup.select("li.block-row")
242
238
 
243
239
  if not result_items:
244
- debug(f"{hostname}: [Page {page_num}] found 0 results")
240
+ trace(f"[Page {page_num}] found 0 results")
245
241
  return page_releases, extracted_search_id
246
242
 
247
- debug(f"{hostname}: [Page {page_num}] found {len(result_items)} results")
243
+ trace(f"[Page {page_num}] found {len(result_items)} results")
248
244
 
249
245
  for item in result_items:
250
246
  try:
@@ -319,12 +315,12 @@ def _search_single_page(
319
315
  )
320
316
 
321
317
  except Exception as e:
322
- debug(f"{hostname}: [Page {page_num}] error parsing item: {e}")
318
+ debug(f"[Page {page_num}] error parsing item: {e}")
323
319
 
324
320
  return page_releases, extracted_search_id
325
321
 
326
322
  except Exception as e:
327
- info(f"{hostname}: [Page {page_num}] error: {e}")
323
+ warn(f"[Page {page_num}] error: {e}")
328
324
  mark_hostname_issue(
329
325
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
330
326
  )
@@ -351,7 +347,7 @@ def dl_search(
351
347
  if imdb_id:
352
348
  title = get_localized_title(shared_state, imdb_id, "de")
353
349
  if not title:
354
- info(f"{hostname}: no title for IMDb {imdb_id}")
350
+ info(f"no title for IMDb {imdb_id}")
355
351
  return releases
356
352
  search_string = title
357
353
  if not season:
@@ -361,14 +357,14 @@ def dl_search(
361
357
  search_string = unescape(search_string)
362
358
  max_search_duration = 7
363
359
 
364
- debug(
365
- f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - max {max_search_duration}s"
360
+ trace(
361
+ f"Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - max {max_search_duration}s"
366
362
  )
367
363
 
368
364
  try:
369
365
  sess = retrieve_and_validate_session(shared_state)
370
366
  if not sess:
371
- info(f"Could not retrieve valid session for {host}")
367
+ warn(f"Could not retrieve valid session for {host}")
372
368
  return releases
373
369
 
374
370
  search_id = None
@@ -395,9 +391,7 @@ def dl_search(
395
391
 
396
392
  page_release_titles = tuple(pr["details"]["title"] for pr in page_releases)
397
393
  if page_release_titles in release_titles_per_page:
398
- debug(
399
- f"{hostname}: [Page {page_num}] duplicate page detected, stopping"
400
- )
394
+ trace(f"[Page {page_num}] duplicate page detected, stopping")
401
395
  break
402
396
  release_titles_per_page.add(page_release_titles)
403
397
 
@@ -405,37 +399,31 @@ def dl_search(
405
399
  if page_num == 1:
406
400
  search_id = extracted_search_id
407
401
  if not search_id:
408
- info(
409
- f"{hostname}: Could not extract search ID, stopping pagination"
410
- )
402
+ trace("Could not extract search ID, stopping pagination")
411
403
  break
412
404
 
413
405
  # Add releases from this page
414
406
  releases.extend(page_releases)
415
- debug(
416
- f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases"
407
+ trace(
408
+ f"[Page {page_num}] completed with {len(page_releases)} valid releases"
417
409
  )
418
410
 
419
411
  # Stop if this page returned 0 results
420
412
  if len(page_releases) == 0:
421
- debug(
422
- f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination"
423
- )
413
+ trace(f"[Page {page_num}] returned 0 results, stopping pagination")
424
414
  break
425
415
 
426
416
  except Exception as e:
427
- info(f"{hostname}: search error: {e}")
417
+ info(f"search error: {e}")
428
418
  mark_hostname_issue(
429
419
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
430
420
  )
431
421
  invalidate_session(shared_state)
432
422
 
433
- debug(
434
- f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}"
435
- )
423
+ trace(f"FINAL - Found {len(releases)} valid releases - providing to {request_from}")
436
424
 
437
425
  elapsed = time.time() - start_time
438
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
426
+ debug(f"Time taken: {elapsed:.2f}s")
439
427
 
440
428
  if releases:
441
429
  clear_hostname_issue(hostname)
@@ -15,7 +15,7 @@ from bs4 import BeautifulSoup
15
15
 
16
16
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
17
17
  from quasarr.providers.imdb_metadata import get_localized_title
18
- from quasarr.providers.log import debug, info
18
+ from quasarr.providers.log import debug, error, info, warn
19
19
 
20
20
  hostname = "dt"
21
21
  supported_mirrors = ["rapidgator", "nitroflare", "ddownload"]
@@ -69,8 +69,8 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
69
69
  feed_type = "media/tv-show/"
70
70
 
71
71
  if mirror and mirror not in supported_mirrors:
72
- debug(
73
- f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!'
72
+ error(
73
+ f'Mirror "{mirror}" not supported. Supported: {supported_mirrors}. Skipping!'
74
74
  )
75
75
  return releases
76
76
 
@@ -86,9 +86,7 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
86
86
  try:
87
87
  link_tag = article.select_one("h4.font-weight-bold a")
88
88
  if not link_tag:
89
- debug(
90
- f"Link tag not found in article: {article} at {hostname.upper()}"
91
- )
89
+ warn(f"Link tag not found in article: {article}")
92
90
  continue
93
91
 
94
92
  source = link_tag["href"]
@@ -114,7 +112,7 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
114
112
  r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE
115
113
  )
116
114
  if not size_match:
117
- debug(f"Size not found in article: {article} at {hostname.upper()}")
115
+ warn(f"Size not found in article: {article}")
118
116
  continue
119
117
  size_info = size_match.group(1).strip()
120
118
  size_item = extract_size(size_info)
@@ -131,7 +129,7 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
131
129
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
132
130
 
133
131
  except Exception as e:
134
- info(f"Error parsing {hostname.upper()} feed: {e}")
132
+ warn(f"Error parsing feed: {e}")
135
133
  mark_hostname_issue(
136
134
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
137
135
  )
@@ -154,13 +152,13 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
154
152
  )
155
153
 
156
154
  except Exception as e:
157
- info(f"Error loading {hostname.upper()} feed: {e}")
155
+ error(f"Error loading feed: {e}")
158
156
  mark_hostname_issue(
159
157
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
160
158
  )
161
159
 
162
160
  elapsed = time.time() - start_time
163
- debug(f"Time taken: {elapsed:.2f}s ({hostname})")
161
+ debug(f"Time taken: {elapsed:.2f}s")
164
162
 
165
163
  if releases:
166
164
  clear_hostname_issue(hostname)
@@ -188,8 +186,8 @@ def dt_search(
188
186
  cat_id = "64"
189
187
 
190
188
  if mirror and mirror not in supported_mirrors:
191
- debug(
192
- f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!'
189
+ error(
190
+ f'Mirror "{mirror}" not supported. Supported: {supported_mirrors}. Skipping search!'
193
191
  )
194
192
  return releases
195
193
 
@@ -278,7 +276,7 @@ def dt_search(
278
276
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
279
277
 
280
278
  except Exception as e:
281
- info(f"Error parsing {hostname.upper()} search item: {e}")
279
+ warn(f"Error parsing search item: {e}")
282
280
  mark_hostname_issue(
283
281
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
284
282
  )
@@ -301,13 +299,13 @@ def dt_search(
301
299
  )
302
300
 
303
301
  except Exception as e:
304
- info(f"Error loading {hostname.upper()} search page: {e}")
302
+ error(f"Error loading search page: {e}")
305
303
  mark_hostname_issue(
306
304
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
307
305
  )
308
306
 
309
307
  elapsed = time.time() - start_time
310
- debug(f"Search time: {elapsed:.2f}s ({hostname})")
308
+ debug(f"Search time: {elapsed:.2f}s")
311
309
 
312
310
  if releases:
313
311
  clear_hostname_issue(hostname)
@@ -11,7 +11,7 @@ import requests
11
11
  from bs4 import BeautifulSoup
12
12
 
13
13
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
14
- from quasarr.providers.log import debug, info
14
+ from quasarr.providers.log import debug, info, trace, warn
15
15
 
16
16
  hostname = "dw"
17
17
  supported_mirrors = ["1fichier", "rapidgator", "ddownload", "katfile"]
@@ -47,7 +47,7 @@ def convert_to_rss_date(date_str):
47
47
  "December",
48
48
  ]
49
49
 
50
- for german, english in zip(german_months, english_months):
50
+ for german, english in zip(german_months, english_months, strict=False):
51
51
  if german in date_str:
52
52
  date_str = date_str.replace(german, english)
53
53
  break
@@ -84,7 +84,7 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
84
84
 
85
85
  if not "arr" in request_from.lower():
86
86
  debug(
87
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
87
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
88
88
  )
89
89
  return releases
90
90
 
@@ -159,13 +159,13 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
159
159
  )
160
160
 
161
161
  except Exception as e:
162
- info(f"Error loading {hostname.upper()} feed: {e}")
162
+ warn(f"Error loading {hostname.upper()} feed: {e}")
163
163
  mark_hostname_issue(
164
164
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
165
165
  )
166
166
 
167
167
  elapsed_time = time.time() - start_time
168
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
168
+ debug(f"Time taken: {elapsed_time:.2f}s")
169
169
 
170
170
  if releases:
171
171
  clear_hostname_issue(hostname)
@@ -187,7 +187,7 @@ def dw_search(
187
187
 
188
188
  if not "arr" in request_from.lower():
189
189
  debug(
190
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
190
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
191
191
  )
192
192
  return releases
193
193
 
@@ -214,7 +214,7 @@ def dw_search(
214
214
  results = search.find_all("h4")
215
215
 
216
216
  except Exception as e:
217
- info(f"Error loading {hostname.upper()} search feed: {e}")
217
+ warn(f"Error loading {hostname.upper()} search feed: {e}")
218
218
  mark_hostname_issue(
219
219
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
220
220
  )
@@ -232,11 +232,19 @@ def dw_search(
232
232
  ):
233
233
  continue
234
234
 
235
- if not imdb_id:
236
- try:
237
- imdb_id = re.search(r"tt\d+", str(result)).group()
238
- except:
239
- imdb_id = None
235
+ try:
236
+ release_imdb_id = re.search(r"tt\d+", str(result)).group()
237
+ except:
238
+ release_imdb_id = None
239
+
240
+ if imdb_id and release_imdb_id and release_imdb_id != imdb_id:
241
+ trace(
242
+ f"{hostname.upper()}: Skipping result '{title}' due to IMDb ID mismatch."
243
+ )
244
+ continue
245
+
246
+ if release_imdb_id is None:
247
+ release_imdb_id = imdb_id
240
248
 
241
249
  source = result.a["href"]
242
250
  size_info = result.find("span").text.strip()
@@ -248,13 +256,13 @@ def dw_search(
248
256
  ).text.strip()
249
257
  published = convert_to_rss_date(date)
250
258
  payload = urlsafe_b64encode(
251
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
259
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode(
252
260
  "utf-8"
253
261
  )
254
262
  ).decode("utf-8")
255
263
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
256
264
  except Exception as e:
257
- info(f"Error parsing {hostname.upper()} search: {e}")
265
+ warn(f"Error parsing {hostname.upper()} search: {e}")
258
266
  mark_hostname_issue(
259
267
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
260
268
  )
@@ -265,7 +273,7 @@ def dw_search(
265
273
  "details": {
266
274
  "title": title,
267
275
  "hostname": hostname.lower(),
268
- "imdb_id": imdb_id,
276
+ "imdb_id": release_imdb_id,
269
277
  "link": link,
270
278
  "mirror": mirror,
271
279
  "size": size,
@@ -277,7 +285,7 @@ def dw_search(
277
285
  )
278
286
 
279
287
  elapsed_time = time.time() - start_time
280
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
288
+ debug(f"Time taken: {elapsed_time:.2f}s")
281
289
 
282
290
  if releases:
283
291
  clear_hostname_issue(hostname)
@@ -10,7 +10,7 @@ import requests
10
10
  from bs4 import BeautifulSoup
11
11
 
12
12
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
13
- from quasarr.providers.log import debug, info
13
+ from quasarr.providers.log import debug, info, trace, warn
14
14
 
15
15
  hostname = "fx"
16
16
  supported_mirrors = ["rapidgator"]
@@ -33,7 +33,7 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
33
33
 
34
34
  if not "arr" in request_from.lower():
35
35
  debug(
36
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
36
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
37
37
  )
38
38
  return releases
39
39
 
@@ -56,7 +56,7 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
56
56
  feed = BeautifulSoup(r.content, "html.parser")
57
57
  items = feed.find_all("article")
58
58
  except Exception as e:
59
- info(f"Error loading {hostname.upper()} feed: {e}")
59
+ warn(f"Error loading {hostname.upper()} feed: {e}")
60
60
  mark_hostname_issue(
61
61
  hostname, "feed", str(e) if "e" in dir() else "Error occurred"
62
62
  )
@@ -135,7 +135,7 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
135
135
  )
136
136
 
137
137
  elapsed_time = time.time() - start_time
138
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
138
+ debug(f"Time taken: {elapsed_time:.2f}s")
139
139
 
140
140
  if releases:
141
141
  clear_hostname_issue(hostname)
@@ -157,7 +157,7 @@ def fx_search(
157
157
 
158
158
  if not "arr" in request_from.lower():
159
159
  debug(
160
- f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
160
+ f'<d>Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!</d>'
161
161
  )
162
162
  return releases
163
163
 
@@ -168,6 +168,11 @@ def fx_search(
168
168
  )
169
169
  return releases
170
170
 
171
+ if search_string != "":
172
+ imdb_id = shared_state.is_imdb_id(search_string)
173
+ else:
174
+ imdb_id = None
175
+
171
176
  url = f"https://{fx}/?s={search_string}"
172
177
  headers = {
173
178
  "User-Agent": shared_state.values["user_agent"],
@@ -180,7 +185,7 @@ def fx_search(
180
185
  results = search.find("h2", class_="entry-title")
181
186
 
182
187
  except Exception as e:
183
- info(f"Error loading {hostname.upper()} feed: {e}")
188
+ warn(f"Error loading {hostname.upper()} feed: {e}")
184
189
  mark_hostname_issue(
185
190
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
186
191
  )
@@ -195,7 +200,7 @@ def fx_search(
195
200
  feed = BeautifulSoup(result_r.content, "html.parser")
196
201
  items = feed.find_all("article")
197
202
  except Exception as e:
198
- info(f"Error loading {hostname.upper()} feed: {e}")
203
+ warn(f"Error loading {hostname.upper()} feed: {e}")
199
204
  mark_hostname_issue(
200
205
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
201
206
  )
@@ -220,9 +225,18 @@ def fx_search(
220
225
 
221
226
  try:
222
227
  imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
223
- imdb_id = re.search(r"tt\d+", str(imdb_link)).group()
228
+ release_imdb_id = re.search(
229
+ r"tt\d+", str(imdb_link)
230
+ ).group()
224
231
  except:
225
- imdb_id = None
232
+ release_imdb_id = None
233
+
234
+ if imdb_id and release_imdb_id and release_imdb_id != imdb_id:
235
+ trace(f"Skipping result '{title}' due to IMDb ID mismatch.")
236
+ continue
237
+
238
+ if release_imdb_id is None:
239
+ release_imdb_id = imdb_id
226
240
 
227
241
  try:
228
242
  size_info = (
@@ -269,13 +283,13 @@ def fx_search(
269
283
  )
270
284
 
271
285
  except Exception as e:
272
- info(f"Error parsing {hostname.upper()} search: {e}")
286
+ info(f"Error parsing search: {e}")
273
287
  mark_hostname_issue(
274
288
  hostname, "search", str(e) if "e" in dir() else "Error occurred"
275
289
  )
276
290
 
277
291
  elapsed_time = time.time() - start_time
278
- debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
292
+ debug(f"Time taken: {elapsed_time:.2f}s")
279
293
 
280
294
  if releases:
281
295
  clear_hostname_issue(hostname)