quasarr 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (57) hide show
  1. quasarr/api/__init__.py +94 -23
  2. quasarr/api/captcha/__init__.py +0 -12
  3. quasarr/api/config/__init__.py +22 -11
  4. quasarr/api/packages/__init__.py +26 -34
  5. quasarr/api/statistics/__init__.py +15 -15
  6. quasarr/downloads/__init__.py +9 -1
  7. quasarr/downloads/packages/__init__.py +2 -2
  8. quasarr/downloads/sources/al.py +6 -0
  9. quasarr/downloads/sources/by.py +29 -20
  10. quasarr/downloads/sources/dd.py +9 -1
  11. quasarr/downloads/sources/dl.py +3 -0
  12. quasarr/downloads/sources/dt.py +16 -7
  13. quasarr/downloads/sources/dw.py +22 -17
  14. quasarr/downloads/sources/he.py +11 -6
  15. quasarr/downloads/sources/mb.py +9 -3
  16. quasarr/downloads/sources/nk.py +9 -3
  17. quasarr/downloads/sources/nx.py +21 -17
  18. quasarr/downloads/sources/sf.py +21 -13
  19. quasarr/downloads/sources/sl.py +10 -2
  20. quasarr/downloads/sources/wd.py +18 -9
  21. quasarr/downloads/sources/wx.py +7 -11
  22. quasarr/providers/auth.py +1 -1
  23. quasarr/providers/cloudflare.py +1 -1
  24. quasarr/providers/hostname_issues.py +63 -0
  25. quasarr/providers/html_images.py +1 -18
  26. quasarr/providers/html_templates.py +104 -12
  27. quasarr/providers/obfuscated.py +11 -11
  28. quasarr/providers/sessions/al.py +27 -11
  29. quasarr/providers/sessions/dd.py +12 -4
  30. quasarr/providers/sessions/dl.py +19 -11
  31. quasarr/providers/sessions/nx.py +12 -4
  32. quasarr/providers/version.py +1 -1
  33. quasarr/search/sources/al.py +12 -1
  34. quasarr/search/sources/by.py +15 -4
  35. quasarr/search/sources/dd.py +22 -3
  36. quasarr/search/sources/dj.py +12 -1
  37. quasarr/search/sources/dl.py +12 -6
  38. quasarr/search/sources/dt.py +17 -4
  39. quasarr/search/sources/dw.py +15 -4
  40. quasarr/search/sources/fx.py +19 -6
  41. quasarr/search/sources/he.py +15 -2
  42. quasarr/search/sources/mb.py +15 -4
  43. quasarr/search/sources/nk.py +15 -2
  44. quasarr/search/sources/nx.py +15 -4
  45. quasarr/search/sources/sf.py +25 -8
  46. quasarr/search/sources/sj.py +14 -1
  47. quasarr/search/sources/sl.py +17 -2
  48. quasarr/search/sources/wd.py +15 -4
  49. quasarr/search/sources/wx.py +16 -18
  50. quasarr/storage/setup.py +150 -35
  51. {quasarr-2.1.5.dist-info → quasarr-2.2.0.dist-info}/METADATA +6 -3
  52. quasarr-2.2.0.dist-info/RECORD +82 -0
  53. {quasarr-2.1.5.dist-info → quasarr-2.2.0.dist-info}/WHEEL +1 -1
  54. quasarr-2.1.5.dist-info/RECORD +0 -81
  55. {quasarr-2.1.5.dist-info → quasarr-2.2.0.dist-info}/entry_points.txt +0 -0
  56. {quasarr-2.1.5.dist-info → quasarr-2.2.0.dist-info}/licenses/LICENSE +0 -0
  57. {quasarr-2.1.5.dist-info → quasarr-2.2.0.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,7 @@ import re
6
6
 
7
7
  from bs4 import BeautifulSoup, NavigableString
8
8
 
9
+ from quasarr.providers.hostname_issues import mark_hostname_issue
9
10
  from quasarr.providers.log import info, debug
10
11
  from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
11
12
  from quasarr.providers.utils import generate_status_url, check_links_online_status
@@ -313,6 +314,7 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
313
314
  sess = retrieve_and_validate_session(shared_state)
314
315
  if not sess:
315
316
  info(f"Could not retrieve valid session for {host}")
317
+ mark_hostname_issue(hostname, "download", "Session error")
316
318
  return {"links": [], "password": ""}
317
319
 
318
320
  try:
@@ -376,5 +378,6 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
376
378
 
377
379
  except Exception as e:
378
380
  info(f"Error extracting download links from {url}: {e}")
381
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
379
382
  invalidate_session(shared_state)
380
383
  return {"links": [], "password": ""}
@@ -8,19 +8,22 @@ from urllib.parse import urlparse
8
8
  import requests
9
9
  from bs4 import BeautifulSoup
10
10
 
11
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
11
12
  from quasarr.providers.log import info
12
13
 
14
+ hostname = "dt"
15
+
13
16
 
14
17
  def derive_mirror_from_url(url):
15
18
  """Extract hoster name from URL hostname."""
16
19
  try:
17
- hostname = urlparse(url).netloc.lower()
18
- if hostname.startswith('www.'):
19
- hostname = hostname[4:]
20
- parts = hostname.split('.')
20
+ mirror_hostname = urlparse(url).netloc.lower()
21
+ if mirror_hostname.startswith('www.'):
22
+ mirror_hostname = mirror_hostname[4:]
23
+ parts = mirror_hostname.split('.')
21
24
  if len(parts) >= 2:
22
25
  return parts[-2]
23
- return hostname
26
+ return mirror_hostname
24
27
  except:
25
28
  return "unknown"
26
29
 
@@ -36,23 +39,27 @@ def get_dt_download_links(shared_state, url, mirror, title, password):
36
39
  session = requests.Session()
37
40
 
38
41
  try:
39
- resp = session.get(url, headers=headers, timeout=10)
40
- soup = BeautifulSoup(resp.text, "html.parser")
42
+ r = session.get(url, headers=headers, timeout=10)
43
+ r.raise_for_status()
44
+ soup = BeautifulSoup(r.text, "html.parser")
41
45
 
42
46
  article = soup.find("article")
43
47
  if not article:
44
48
  info(f"Could not find article block on DT page for {title}")
49
+ mark_hostname_issue(hostname, "download", "Could not find article block")
45
50
  return None
46
51
 
47
52
  body = article.find("div", class_="card-body")
48
53
  if not body:
49
54
  info(f"Could not find download section for {title}")
55
+ mark_hostname_issue(hostname, "download", "Could not find download section")
50
56
  return None
51
57
 
52
58
  anchors = body.find_all("a", href=True)
53
59
 
54
60
  except Exception as e:
55
61
  info(f"DT site has been updated. Grabbing download links for {title} not possible! ({e})")
62
+ mark_hostname_issue(hostname, "download", str(e))
56
63
  return None
57
64
 
58
65
  filtered = []
@@ -85,4 +92,6 @@ def get_dt_download_links(shared_state, url, mirror, title, password):
85
92
  mirror_name = derive_mirror_from_url(u)
86
93
  filtered.append([u, mirror_name])
87
94
 
95
+ if filtered:
96
+ clear_hostname_issue(hostname)
88
97
  return {"links": filtered} if filtered else None
@@ -7,8 +7,11 @@ import re
7
7
  import requests
8
8
  from bs4 import BeautifulSoup
9
9
 
10
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
10
11
  from quasarr.providers.log import info, debug
11
12
 
13
+ hostname = "dw"
14
+
12
15
 
13
16
  def get_dw_download_links(shared_state, url, mirror, title, password):
14
17
  """
@@ -27,11 +30,13 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
27
30
  session = requests.Session()
28
31
 
29
32
  try:
30
- request = session.get(url, headers=headers, timeout=10)
31
- content = BeautifulSoup(request.text, "html.parser")
33
+ r = session.get(url, headers=headers, timeout=10)
34
+ r.raise_for_status()
35
+ content = BeautifulSoup(r.text, "html.parser")
32
36
  download_buttons = content.find_all("button", {"class": "show_link"})
33
- except:
37
+ except Exception as e:
34
38
  info(f"DW site has been updated. Grabbing download links for {title} not possible!")
39
+ mark_hostname_issue(hostname, "download", str(e))
35
40
  return {"links": []}
36
41
 
37
42
  download_links = []
@@ -43,19 +48,17 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
43
48
  'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
44
49
  }
45
50
 
46
- response = session.post(ajax_url, payload, headers=headers, timeout=10)
47
- if response.status_code != 200:
48
- info(f"DW site has been updated. Grabbing download links for {title} not possible!")
49
- continue
50
- else:
51
- response = response.json()
52
- link = response["data"].split(",")[0]
51
+ r = session.post(ajax_url, payload, headers=headers, timeout=10)
52
+ r.raise_for_status()
53
+
54
+ response = r.json()
55
+ link = response["data"].split(",")[0]
53
56
 
54
- if dw in link:
55
- match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link)
56
- if match:
57
- link = (f'https://filecrypt.cc/Container/{match.group(1)}'
58
- f'.html{match.group(2) if match.group(2) else ""}')
57
+ if dw in link:
58
+ match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link)
59
+ if match:
60
+ link = (f'https://filecrypt.cc/Container/{match.group(1)}'
61
+ f'.html{match.group(2) if match.group(2) else ""}')
59
62
 
60
63
  hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
61
64
  hoster = f"1fichier" if hoster.startswith("fichier") else hoster # align with expected mirror name
@@ -64,8 +67,10 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
64
67
  continue
65
68
 
66
69
  download_links.append([link, hoster])
67
- except:
70
+ except Exception as e:
68
71
  info(f"DW site has been updated. Parsing download links for {title} not possible!")
69
- pass
72
+ mark_hostname_issue(hostname, "download", str(e))
70
73
 
74
+ if download_links:
75
+ clear_hostname_issue(hostname)
71
76
  return {"links": download_links}
@@ -8,6 +8,7 @@ from urllib.parse import urlparse, urljoin
8
8
  import requests
9
9
  from bs4 import BeautifulSoup
10
10
 
11
+ from quasarr.providers.hostname_issues import mark_hostname_issue
11
12
  from quasarr.providers.log import info, debug
12
13
 
13
14
  hostname = "he"
@@ -27,10 +28,12 @@ def get_he_download_links(shared_state, url, mirror, title, password):
27
28
  session = requests.Session()
28
29
 
29
30
  try:
30
- resp = session.get(url, headers=headers, timeout=30)
31
- soup = BeautifulSoup(resp.text, 'html.parser')
31
+ r = session.get(url, headers=headers, timeout=10)
32
+ r.raise_for_status()
33
+ soup = BeautifulSoup(r.text, 'html.parser')
32
34
  except Exception as e:
33
35
  info(f"{hostname}: could not fetch release for {title}: {e}")
36
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
34
37
  return {"links": [], "imdb_id": None}
35
38
 
36
39
  imdb_id = None
@@ -55,7 +58,7 @@ def get_he_download_links(shared_state, url, mirror, title, password):
55
58
  return {"links": [], "imdb_id": None}
56
59
 
57
60
  action = form.get('action') or url
58
- action_url = urljoin(resp.url, action)
61
+ action_url = urljoin(r.url, action)
59
62
 
60
63
  payload = {}
61
64
  for inp in form.find_all('input'):
@@ -76,12 +79,14 @@ def get_he_download_links(shared_state, url, mirror, title, password):
76
79
  payload[m.group('key')] = m.group('val')
77
80
 
78
81
  post_headers = headers.copy()
79
- post_headers.update({'Referer': resp.url})
82
+ post_headers.update({'Referer': r.url})
80
83
  try:
81
- resp = session.post(action_url, data=payload, headers=post_headers, timeout=30)
82
- soup = BeautifulSoup(resp.text, 'html.parser')
84
+ r = session.post(action_url, data=payload, headers=post_headers, timeout=10)
85
+ r.raise_for_status()
86
+ soup = BeautifulSoup(r.text, 'html.parser')
83
87
  except Exception as e:
84
88
  info(f"{hostname}: could not submit protector form for {title}: {e}")
89
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
85
90
  break
86
91
 
87
92
  unlocked = soup.select('.content-protector-access-form')
@@ -7,8 +7,11 @@ import re
7
7
  import requests
8
8
  from bs4 import BeautifulSoup
9
9
 
10
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
10
11
  from quasarr.providers.log import info, debug
11
12
 
13
+ hostname = "mb"
14
+
12
15
 
13
16
  def get_mb_download_links(shared_state, url, mirror, title, password):
14
17
  """
@@ -22,13 +25,14 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
22
25
  }
23
26
 
24
27
  try:
25
- response = requests.get(url, headers=headers, timeout=10)
26
- response.raise_for_status()
28
+ r = requests.get(url, headers=headers, timeout=10)
29
+ r.raise_for_status()
27
30
  except Exception as e:
28
31
  info(f"Failed to fetch page for {title or url}: {e}")
32
+ mark_hostname_issue(hostname, "download", str(e))
29
33
  return {"links": []}
30
34
 
31
- soup = BeautifulSoup(response.text, "html.parser")
35
+ soup = BeautifulSoup(r.text, "html.parser")
32
36
 
33
37
  download_links = []
34
38
 
@@ -48,6 +52,8 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
48
52
 
49
53
  if not download_links:
50
54
  info(f"No download links found for {title}. Site structure may have changed. - {url}")
55
+ mark_hostname_issue(hostname, "download", "No download links found - site structure may have changed")
51
56
  return {"links": []}
52
57
 
58
+ clear_hostname_issue(hostname)
53
59
  return {"links": download_links}
@@ -5,6 +5,7 @@
5
5
  import requests
6
6
  from bs4 import BeautifulSoup
7
7
 
8
+ from quasarr.providers.hostname_issues import mark_hostname_issue
8
9
  from quasarr.providers.log import info
9
10
 
10
11
  hostname = "nk"
@@ -26,10 +27,12 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
26
27
  session = requests.Session()
27
28
 
28
29
  try:
29
- resp = session.get(url, headers=headers, timeout=20)
30
- soup = BeautifulSoup(resp.text, 'html.parser')
30
+ r = session.get(url, headers=headers, timeout=10)
31
+ r.raise_for_status()
32
+ soup = BeautifulSoup(r.text, 'html.parser')
31
33
  except Exception as e:
32
34
  info(f"{hostname}: could not fetch release page for {title}: {e}")
35
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
33
36
  return {"links": []}
34
37
 
35
38
  anchors = soup.select('a.btn-orange')
@@ -47,9 +50,12 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
47
50
  href = 'https://' + host + href
48
51
 
49
52
  try:
50
- href = requests.head(href, headers=headers, allow_redirects=True, timeout=20).url
53
+ r = requests.head(href, headers=headers, allow_redirects=True, timeout=10)
54
+ r.raise_for_status()
55
+ href = r.url
51
56
  except Exception as e:
52
57
  info(f"{hostname}: could not resolve download link for {title}: {e}")
58
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
53
59
  continue
54
60
 
55
61
  candidates.append([href, mirror])
@@ -7,9 +7,12 @@ from urllib.parse import urlparse
7
7
 
8
8
  import requests
9
9
 
10
+ from quasarr.providers.hostname_issues import mark_hostname_issue
10
11
  from quasarr.providers.log import info
11
12
  from quasarr.providers.sessions.nx import retrieve_and_validate_session
12
13
 
14
+ hostname = "nx"
15
+
13
16
 
14
17
  def derive_mirror_from_url(url):
15
18
  """Extract hoster name from URL hostname."""
@@ -39,11 +42,10 @@ def get_filer_folder_links_via_api(shared_state, url):
39
42
  folder_hash = m.group(1)
40
43
  api_url = f"https://filer.net/api/folder/{folder_hash}"
41
44
 
42
- response = requests.get(api_url, headers=headers, timeout=10)
43
- if not response or response.status_code != 200:
44
- return url
45
+ r = requests.get(api_url, headers=headers, timeout=10)
46
+ r.raise_for_status()
45
47
 
46
- data = response.json()
48
+ data = r.json()
47
49
  files = data.get("files", [])
48
50
  links = []
49
51
 
@@ -75,6 +77,7 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
75
77
  nx_session = retrieve_and_validate_session(shared_state)
76
78
  if not nx_session:
77
79
  info(f"Could not retrieve valid session for {nx}")
80
+ mark_hostname_issue(hostname, "download", "Session error")
78
81
  return {"links": []}
79
82
 
80
83
  headers = {
@@ -87,23 +90,24 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
87
90
  url_segments = url.split('/')
88
91
  payload_url = '/'.join(url_segments[:-2]) + '/api/getLinks/' + url_segments[-1]
89
92
 
90
- payload = nx_session.post(payload_url,
91
- headers=headers,
92
- json=json_data,
93
- timeout=10
94
- )
95
-
96
- if payload.status_code == 200:
97
- try:
98
- payload = payload.json()
99
- except:
100
- info("Invalid response decrypting " + str(title) + " URL: " + str(url))
101
- shared_state.values["database"]("sessions").delete("nx")
102
- return {"links": []}
93
+ try:
94
+ r = nx_session.post(payload_url,
95
+ headers=headers,
96
+ json=json_data,
97
+ timeout=10
98
+ )
99
+ r.raise_for_status()
100
+
101
+ payload = r.json()
102
+ except Exception as e:
103
+ info(f"Could not get NX Links: {e}")
104
+ mark_hostname_issue(hostname, "download", str(e))
105
+ return {"links": []}
103
106
 
104
107
  if payload and any(key in payload for key in ("err", "error")):
105
108
  error_msg = payload.get("err") or payload.get("error")
106
109
  info(f"Error decrypting {title!r} URL: {url!r} - {error_msg}")
110
+ mark_hostname_issue(hostname, "download", "Download error")
107
111
  shared_state.values["database"]("sessions").delete("nx")
108
112
  return {"links": []}
109
113
 
@@ -8,9 +8,12 @@ from datetime import datetime
8
8
  import requests
9
9
  from bs4 import BeautifulSoup
10
10
 
11
+ from quasarr.providers.hostname_issues import mark_hostname_issue
11
12
  from quasarr.providers.log import info, debug
12
13
  from quasarr.search.sources.sf import parse_mirrors
13
14
 
15
+ hostname = "sf"
16
+
14
17
 
15
18
  def is_last_section_integer(url):
16
19
  last_section = url.rstrip('/').split('/')[-1]
@@ -22,19 +25,20 @@ def is_last_section_integer(url):
22
25
  def resolve_sf_redirect(url, user_agent):
23
26
  """Follow redirects and return final URL or None if 404."""
24
27
  try:
25
- response = requests.get(url, allow_redirects=True, timeout=10,
26
- headers={'User-Agent': user_agent})
27
- if response.history:
28
- for resp in response.history:
29
- debug(f"Redirected from {resp.url} to {response.url}")
30
- if "/404.html" in response.url:
31
- info(f"SF link redirected to 404 page: {response.url}")
28
+ r = requests.get(url, allow_redirects=True, timeout=10, headers={'User-Agent': user_agent})
29
+ r.raise_for_status()
30
+ if r.history:
31
+ for resp in r.history:
32
+ debug(f"Redirected from {resp.url} to {r.url}")
33
+ if "/404.html" in r.url:
34
+ info(f"SF link redirected to 404 page: {r.url}")
32
35
  return None
33
- return response.url
36
+ return r.url
34
37
  else:
35
38
  info(f"SF blocked attempt to resolve {url}. Your IP may be banned. Try again later.")
36
39
  except Exception as e:
37
40
  info(f"Error fetching redirected URL for {url}: {e}")
41
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
38
42
  return None
39
43
 
40
44
 
@@ -84,7 +88,9 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
84
88
  season = "ALL"
85
89
 
86
90
  headers = {'User-Agent': user_agent}
87
- series_page = requests.get(url, headers=headers, timeout=10).text
91
+ r = requests.get(url, headers=headers, timeout=10)
92
+ r.raise_for_status()
93
+ series_page = r.text
88
94
  soup = BeautifulSoup(series_page, "html.parser")
89
95
 
90
96
  # Extract IMDb id if present
@@ -100,14 +106,16 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
100
106
  epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
101
107
  api_url = 'https://' + sf + '/api/v1/' + season_id + f'/season/{season}?lang=ALL&_=' + epoch
102
108
 
103
- response = requests.get(api_url, headers=headers, timeout=10)
109
+ r = requests.get(api_url, headers=headers, timeout=10)
110
+ r.raise_for_status()
104
111
  try:
105
- data = response.json()["html"]
112
+ data = r.json()["html"]
106
113
  except ValueError:
107
114
  epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
108
115
  api_url = 'https://' + sf + '/api/v1/' + season_id + f'/season/ALL?lang=ALL&_=' + epoch
109
- response = requests.get(api_url, headers=headers, timeout=10)
110
- data = response.json()["html"]
116
+ r = requests.get(api_url, headers=headers, timeout=10)
117
+ r.raise_for_status()
118
+ data = r.json()["html"]
111
119
 
112
120
  content = BeautifulSoup(data, "html.parser")
113
121
  items = content.find_all("h3")
@@ -8,8 +8,10 @@ from urllib.parse import urlparse
8
8
  import requests
9
9
  from bs4 import BeautifulSoup
10
10
 
11
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
11
12
  from quasarr.providers.log import info, debug
12
13
 
14
+ hostname = "sl"
13
15
  supported_mirrors = ["nitroflare", "ddownload"]
14
16
 
15
17
 
@@ -31,12 +33,15 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
31
33
  session = requests.Session()
32
34
 
33
35
  try:
34
- resp = session.get(url, headers=headers, timeout=10)
35
- soup = BeautifulSoup(resp.text, "html.parser")
36
+ r = session.get(url, headers=headers, timeout=10)
37
+ r.raise_for_status()
38
+
39
+ soup = BeautifulSoup(r.text, "html.parser")
36
40
 
37
41
  entry = soup.find("div", class_="entry")
38
42
  if not entry:
39
43
  info(f"Could not find main content section for {title}")
44
+ mark_hostname_issue(hostname, "download", "Could not find main content section")
40
45
  return {"links": [], "imdb_id": None}
41
46
 
42
47
  imdb_id = None
@@ -62,6 +67,7 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
62
67
 
63
68
  except Exception as e:
64
69
  info(f"SL site has been updated. Grabbing download links for {title} not possible! ({e})")
70
+ mark_hostname_issue(hostname, "download", str(e))
65
71
  return {"links": [], "imdb_id": None}
66
72
 
67
73
  filtered = []
@@ -97,6 +103,8 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
97
103
  mirror_name = derive_mirror_from_host(host)
98
104
  filtered.append([u, mirror_name])
99
105
 
106
+ if filtered:
107
+ clear_hostname_issue(hostname)
100
108
  return {
101
109
  "links": filtered,
102
110
  "imdb_id": imdb_id,
@@ -9,29 +9,34 @@ import requests
9
9
  from bs4 import BeautifulSoup
10
10
 
11
11
  from quasarr.providers.cloudflare import flaresolverr_get, is_cloudflare_challenge
12
+ from quasarr.providers.hostname_issues import mark_hostname_issue
12
13
  from quasarr.providers.log import info, debug
13
14
  from quasarr.providers.utils import is_flaresolverr_available
14
15
 
16
+ hostname = "wd"
17
+
15
18
 
16
19
  def resolve_wd_redirect(url, user_agent):
17
20
  """
18
21
  Follow redirects for a WD mirror URL and return the final destination.
19
22
  """
20
23
  try:
21
- response = requests.get(
24
+ r = requests.get(
22
25
  url,
23
26
  allow_redirects=True,
24
27
  timeout=10,
25
28
  headers={"User-Agent": user_agent},
26
29
  )
27
- if response.history:
28
- for resp in response.history:
29
- debug(f"Redirected from {resp.url} to {response.url}")
30
- return response.url
30
+ r.raise_for_status()
31
+ if r.history:
32
+ for resp in r.history:
33
+ debug(f"Redirected from {resp.url} to {r.url}")
34
+ return r.url
31
35
  else:
32
36
  info(f"WD blocked attempt to resolve {url}. Your IP may be banned. Try again later.")
33
37
  except Exception as e:
34
38
  info(f"Error fetching redirected URL for {url}: {e}")
39
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
35
40
  return None
36
41
 
37
42
 
@@ -46,17 +51,21 @@ def get_wd_download_links(shared_state, url, mirror, title, password):
46
51
  user_agent = shared_state.values["user_agent"]
47
52
 
48
53
  try:
49
- output = requests.get(url)
50
- if output.status_code == 403 or is_cloudflare_challenge(output.text):
54
+ r = requests.get(url)
55
+ if r.status_code >= 400 or is_cloudflare_challenge(r.text):
51
56
  if is_flaresolverr_available(shared_state):
52
57
  info("WD is protected by Cloudflare. Using FlareSolverr to bypass protection.")
53
- output = flaresolverr_get(shared_state, url)
58
+ r = flaresolverr_get(shared_state, url)
54
59
  else:
55
60
  info("WD is protected by Cloudflare but FlareSolverr is not configured. "
56
61
  "Please configure FlareSolverr in the web UI to access this site.")
62
+ mark_hostname_issue(hostname, "download", "FlareSolverr required but missing.")
57
63
  return {"links": [], "imdb_id": None}
58
64
 
59
- soup = BeautifulSoup(output.text, "html.parser")
65
+ if r.status_code >= 400:
66
+ mark_hostname_issue(hostname, "download", f"Download error: {str(r.status_code)}")
67
+
68
+ soup = BeautifulSoup(r.text, "html.parser")
60
69
 
61
70
  # extract IMDb id if present
62
71
  imdb_id = None
@@ -6,6 +6,7 @@ import re
6
6
 
7
7
  import requests
8
8
 
9
+ from quasarr.providers.hostname_issues import mark_hostname_issue
9
10
  from quasarr.providers.log import info, debug
10
11
  from quasarr.providers.utils import check_links_online_status
11
12
 
@@ -32,11 +33,8 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
32
33
  session = requests.Session()
33
34
 
34
35
  # First, load the page to establish session cookies
35
- response = session.get(url, headers=headers, timeout=30)
36
-
37
- if response.status_code != 200:
38
- info(f"{hostname.upper()}: Failed to load page: {url} (Status: {response.status_code})")
39
- return {"links": []}
36
+ r = session.get(url, headers=headers, timeout=30)
37
+ r.raise_for_status()
40
38
 
41
39
  # Extract slug from URL
42
40
  slug_match = re.search(r'/detail/([^/?]+)', url)
@@ -53,13 +51,10 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
53
51
  }
54
52
 
55
53
  debug(f"{hostname.upper()}: Fetching API data from: {api_url}")
56
- api_response = session.get(api_url, headers=api_headers, timeout=30)
57
-
58
- if api_response.status_code != 200:
59
- info(f"{hostname.upper()}: Failed to load API: {api_url} (Status: {api_response.status_code})")
60
- return {"links": []}
54
+ api_r = session.get(api_url, headers=api_headers, timeout=30)
55
+ api_r.raise_for_status()
61
56
 
62
- data = api_response.json()
57
+ data = api_r.json()
63
58
 
64
59
  # Navigate to releases in the API response
65
60
  if 'item' not in data or 'releases' not in data['item']:
@@ -165,4 +160,5 @@ def get_wx_download_links(shared_state, url, mirror, title, password):
165
160
 
166
161
  except Exception as e:
167
162
  info(f"{hostname.upper()}: Error extracting download links from {url}: {e}")
163
+ mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
168
164
  return {"links": []}
quasarr/providers/auth.py CHANGED
@@ -273,7 +273,7 @@ def add_auth_routes(app):
273
273
  return _handle_logout()
274
274
 
275
275
 
276
- def add_auth_hook(app, whitelist_prefixes=None, whitelist_suffixes=None):
276
+ def add_auth_hook(app, whitelist_prefixes=[], whitelist_suffixes=[]):
277
277
  """Add authentication hook to a Bottle app.
278
278
 
279
279
  Args:
@@ -162,7 +162,7 @@ class FlareSolverrResponse:
162
162
 
163
163
  def raise_for_status(self):
164
164
  if 400 <= self.status_code:
165
- raise requests.HTTPError(f"{self.status_code} Error for URL: {self.url}")
165
+ raise requests.HTTPError(f"{self.status_code} Error at {self.url}")
166
166
 
167
167
 
168
168
  def flaresolverr_get(shared_state, url, timeout=60):