quasarr 1.23.0__py3-none-any.whl → 1.24.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -19,30 +19,62 @@ def is_last_section_integer(url):
19
19
  return None
20
20
 
21
21
 
22
- def get_sf_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
22
+ def resolve_sf_redirect(url, user_agent):
23
+ """Follow redirects and return final URL or None if 404."""
24
+ try:
25
+ response = requests.get(url, allow_redirects=True, timeout=10,
26
+ headers={'User-Agent': user_agent})
27
+ if response.history:
28
+ for resp in response.history:
29
+ debug(f"Redirected from {resp.url} to {response.url}")
30
+ if "/404.html" in response.url:
31
+ info(f"SF link redirected to 404 page: {response.url}")
32
+ return None
33
+ return response.url
34
+ else:
35
+ info(f"SF blocked attempt to resolve {url}. Your IP may be banned. Try again later.")
36
+ except Exception as e:
37
+ info(f"Error fetching redirected URL for {url}: {e}")
38
+ return None
39
+
40
+
41
+ def get_sf_download_links(shared_state, url, mirror, title, password):
42
+ """
43
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
44
+
45
+ SF source handler - resolves redirects and returns filecrypt links.
46
+ """
47
+
48
+ sf = shared_state.values["config"]("Hostnames").get("sf")
49
+ user_agent = shared_state.values["user_agent"]
50
+
51
+ # Handle external redirect URLs
52
+ if url.startswith(f"https://{sf}/external"):
53
+ resolved_url = resolve_sf_redirect(url, user_agent)
54
+ if not resolved_url:
55
+ return {"links": [], "imdb_id": None}
56
+ return {"links": [[resolved_url, "filecrypt"]], "imdb_id": None}
57
+
58
+ # Handle series page URLs - need to find the right release
23
59
  release_pattern = re.compile(
24
60
  r'''
25
- ^ # start of string
26
- (?P<name>.+?)\. # show name (dots in name) up to the dot before “S”
27
- S(?P<season>\d+) # “S” + season number
28
- (?:E\d+(?:-E\d+)?)? # optional “E##” or “E##-E##”
29
- \. # literal dot
30
- .*?\. # anything (e.g. language/codec) up to next dot
31
- (?P<resolution>\d+p) # resolution “720p”, “1080p”, etc.
32
- \..+? # dot + more junk (e.g. “.WEB.h264”)
33
- -(?P<group>\w+) # dash + release group at end
34
- $ # end of string
61
+ ^
62
+ (?P<name>.+?)\.
63
+ S(?P<season>\d+)
64
+ (?:E\d+(?:-E\d+)?)?
65
+ \.
66
+ .*?\.
67
+ (?P<resolution>\d+p)
68
+ \..+?
69
+ -(?P<group>\w+)
70
+ $
35
71
  ''',
36
72
  re.IGNORECASE | re.VERBOSE
37
73
  )
38
74
 
39
75
  release_match = release_pattern.match(title)
40
-
41
76
  if not release_match:
42
- return {
43
- "real_url": None,
44
- "imdb_id": None,
45
- }
77
+ return {"links": [], "imdb_id": None}
46
78
 
47
79
  release_parts = release_match.groupdict()
48
80
 
@@ -51,15 +83,11 @@ def get_sf_download_links(shared_state, url, mirror, title): # signature must a
51
83
  if not season:
52
84
  season = "ALL"
53
85
 
54
- sf = shared_state.values["config"]("Hostnames").get("sf")
55
- headers = {
56
- 'User-Agent': shared_state.values["user_agent"],
57
- }
58
-
86
+ headers = {'User-Agent': user_agent}
59
87
  series_page = requests.get(url, headers=headers, timeout=10).text
60
-
61
88
  soup = BeautifulSoup(series_page, "html.parser")
62
- # extract IMDb id if present
89
+
90
+ # Extract IMDb id if present
63
91
  imdb_id = None
64
92
  a_imdb = soup.find("a", href=re.compile(r"imdb\.com/title/tt\d+"))
65
93
  if a_imdb:
@@ -82,7 +110,6 @@ def get_sf_download_links(shared_state, url, mirror, title): # signature must a
82
110
  data = response.json()["html"]
83
111
 
84
112
  content = BeautifulSoup(data, "html.parser")
85
-
86
113
  items = content.find_all("h3")
87
114
 
88
115
  for item in items:
@@ -101,15 +128,13 @@ def get_sf_download_links(shared_state, url, mirror, title): # signature must a
101
128
 
102
129
  result_parts = result_match.groupdict()
103
130
 
104
- # Normalize all relevant fields for case-insensitive comparison
105
131
  name_match = release_parts['name'].lower() == result_parts['name'].lower()
106
- season_match = release_parts['season'] == result_parts['season'] # Numbers are case-insensitive
132
+ season_match = release_parts['season'] == result_parts['season']
107
133
  resolution_match = release_parts['resolution'].lower() == result_parts['resolution'].lower()
108
134
 
109
- # Handle multiple groups and case-insensitive matching
110
135
  result_groups = {g.lower() for g in result_parts['group'].split('/')}
111
136
  release_groups = {g.lower() for g in release_parts['group'].split('/')}
112
- group_match = not result_groups.isdisjoint(release_groups) # Checks if any group matches
137
+ group_match = not result_groups.isdisjoint(release_groups)
113
138
 
114
139
  if name_match and season_match and resolution_match and group_match:
115
140
  info(f'Release "{name}" found on SF at: {url}')
@@ -125,35 +150,16 @@ def get_sf_download_links(shared_state, url, mirror, title): # signature must a
125
150
  else:
126
151
  release_url = next(iter(mirrors["season"].values()))
127
152
 
128
- real_url = resolve_sf_redirect(release_url, shared_state.values["user_agent"])
129
- return {
130
- "real_url": real_url,
131
- "imdb_id": imdb_id,
132
- }
153
+ real_url = resolve_sf_redirect(release_url, user_agent)
154
+ if real_url:
155
+ # Use the mirror name if we have it, otherwise use "filecrypt"
156
+ mirror_name = mirror if mirror else "filecrypt"
157
+ return {"links": [[real_url, mirror_name]], "imdb_id": imdb_id}
158
+ else:
159
+ return {"links": [], "imdb_id": imdb_id}
133
160
  except:
134
161
  continue
135
162
  except:
136
163
  pass
137
164
 
138
- return {
139
- "real_url": None,
140
- "imdb_id": None,
141
- }
142
-
143
-
144
- def resolve_sf_redirect(url, user_agent):
145
- try:
146
- response = requests.get(url, allow_redirects=True, timeout=10,
147
- headers={'User-Agent': user_agent})
148
- if response.history:
149
- for resp in response.history:
150
- debug(f"Redirected from {resp.url} to {response.url}")
151
- if "/404.html" in response.url:
152
- info(f"SF link redirected to 404 page: {response.url}")
153
- return None
154
- return response.url
155
- else:
156
- info(f"SF blocked attempt to resolve {url}. Your IP may be banned. Try again later.")
157
- except Exception as e:
158
- info(f"Error fetching redirected URL for {url}: {e}")
159
- return None
165
+ return {"links": [], "imdb_id": None}
@@ -3,5 +3,14 @@
3
3
  # Project by https://github.com/rix1337
4
4
 
5
5
 
6
- def get_sj_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
7
- return [url]
6
+ def get_sj_download_links(shared_state, url, mirror, title, password):
7
+ """
8
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
9
+
10
+ SJ source handler - the site itself acts as a protected crypter.
11
+ Returns the URL for CAPTCHA solving via userscript.
12
+ """
13
+
14
+ return {
15
+ "links": [[url, "junkies"]]
16
+ }
@@ -10,10 +10,23 @@ from bs4 import BeautifulSoup
10
10
 
11
11
  from quasarr.providers.log import info, debug
12
12
 
13
- supported_mirrors = ["nitroflare", "ddownload"] # ignoring captcha-protected multiup/mirrorace for now
13
+ supported_mirrors = ["nitroflare", "ddownload"]
14
14
 
15
15
 
16
- def get_sl_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
16
+ def derive_mirror_from_host(host):
17
+ """Get mirror name from hostname."""
18
+ for m in supported_mirrors:
19
+ if host.startswith(m + "."):
20
+ return m
21
+ return host.split('.')[0] if host else "unknown"
22
+
23
+
24
+ def get_sl_download_links(shared_state, url, mirror, title, password):
25
+ """
26
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
27
+
28
+ SL source handler - returns plain download links.
29
+ """
17
30
  headers = {"User-Agent": shared_state.values["user_agent"]}
18
31
  session = requests.Session()
19
32
 
@@ -24,9 +37,8 @@ def get_sl_download_links(shared_state, url, mirror, title): # signature must al
24
37
  entry = soup.find("div", class_="entry")
25
38
  if not entry:
26
39
  info(f"Could not find main content section for {title}")
27
- return False
40
+ return {"links": [], "imdb_id": None}
28
41
 
29
- # extract IMDb id if present
30
42
  imdb_id = None
31
43
  a_imdb = soup.find("a", href=re.compile(r"imdb\.com/title/tt\d+"))
32
44
  if a_imdb:
@@ -50,7 +62,7 @@ def get_sl_download_links(shared_state, url, mirror, title): # signature must al
50
62
 
51
63
  except Exception as e:
52
64
  info(f"SL site has been updated. Grabbing download links for {title} not possible! ({e})")
53
- return False
65
+ return {"links": [], "imdb_id": None}
54
66
 
55
67
  filtered = []
56
68
  for a in anchors:
@@ -59,14 +71,14 @@ def get_sl_download_links(shared_state, url, mirror, title): # signature must al
59
71
  continue
60
72
 
61
73
  host = (urlparse(href).hostname or "").lower()
62
- # require host to start with one of supported_mirrors + "."
63
74
  if not any(host.startswith(m + ".") for m in supported_mirrors):
64
75
  continue
65
76
 
66
77
  if not mirror or mirror in href:
67
- filtered.append(href)
78
+ mirror_name = derive_mirror_from_host(host)
79
+ filtered.append([href, mirror_name])
68
80
 
69
- # regexfallback if still empty
81
+ # regex fallback if still empty
70
82
  if not filtered:
71
83
  text = "".join(str(x) for x in anchors)
72
84
  urls = re.findall(r"https?://[^\s<>'\"]+", text)
@@ -82,7 +94,8 @@ def get_sl_download_links(shared_state, url, mirror, title): # signature must al
82
94
  continue
83
95
 
84
96
  if not mirror or mirror in u:
85
- filtered.append(u)
97
+ mirror_name = derive_mirror_from_host(host)
98
+ filtered.append([u, mirror_name])
86
99
 
87
100
  return {
88
101
  "links": filtered,
@@ -34,7 +34,13 @@ def resolve_wd_redirect(url, user_agent):
34
34
  return None
35
35
 
36
36
 
37
- def get_wd_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
37
+ def get_wd_download_links(shared_state, url, mirror, title, password):
38
+ """
39
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
40
+
41
+ WD source handler - resolves redirects and returns protected download links.
42
+ """
43
+
38
44
  wd = shared_state.values["config"]("Hostnames").get("wd")
39
45
  user_agent = shared_state.values["user_agent"]
40
46
 
@@ -63,7 +69,7 @@ def get_wd_download_links(shared_state, url, mirror, title): # signature must a
63
69
  )
64
70
  if not header:
65
71
  info(f"WD Downloads section not found. Grabbing download links for {title} not possible!")
66
- return False
72
+ return {"links": [], "imdb_id": None}
67
73
 
68
74
  card = header.find_parent("div", class_="card")
69
75
  body = card.find("div", class_="card-body")
@@ -72,7 +78,7 @@ def get_wd_download_links(shared_state, url, mirror, title): # signature must a
72
78
  )
73
79
  except Exception:
74
80
  info(f"WD site has been updated. Grabbing download links for {title} not possible!")
75
- return False
81
+ return {"links": [], "imdb_id": None}
76
82
 
77
83
  results = []
78
84
  try:
@@ -11,12 +11,11 @@ from quasarr.providers.log import info, debug
11
11
  hostname = "wx"
12
12
 
13
13
 
14
- def get_wx_download_links(shared_state, url, mirror, title):
14
+ def get_wx_download_links(shared_state, url, mirror, title, password):
15
15
  """
16
- Get download links from API based on title and mirror.
16
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
17
17
 
18
- Returns:
19
- list of [url, hoster] pairs where hoster is the actual mirror (e.g., 'ddownload.com', 'rapidgator.net')
18
+ WX source handler - Grabs download links from API based on title and mirror.
20
19
  """
21
20
  host = shared_state.values["config"]("Hostnames").get(hostname)
22
21
 
@@ -33,13 +32,13 @@ def get_wx_download_links(shared_state, url, mirror, title):
33
32
 
34
33
  if response.status_code != 200:
35
34
  info(f"{hostname.upper()}: Failed to load page: {url} (Status: {response.status_code})")
36
- return []
35
+ return {"links": []}
37
36
 
38
37
  # Extract slug from URL
39
38
  slug_match = re.search(r'/detail/([^/]+)', url)
40
39
  if not slug_match:
41
40
  info(f"{hostname.upper()}: Could not extract slug from URL: {url}")
42
- return []
41
+ return {"links": []}
43
42
 
44
43
  api_url = f'https://api.{host}/start/d/{slug_match.group(1)}'
45
44
 
@@ -54,14 +53,14 @@ def get_wx_download_links(shared_state, url, mirror, title):
54
53
 
55
54
  if api_response.status_code != 200:
56
55
  info(f"{hostname.upper()}: Failed to load API: {api_url} (Status: {api_response.status_code})")
57
- return []
56
+ return {"links": []}
58
57
 
59
58
  data = api_response.json()
60
59
 
61
60
  # Navigate to releases in the API response
62
61
  if 'item' not in data or 'releases' not in data['item']:
63
62
  info(f"{hostname.upper()}: No releases found in API response")
64
- return []
63
+ return {"links": []}
65
64
 
66
65
  releases = data['item']['releases']
67
66
 
@@ -74,14 +73,14 @@ def get_wx_download_links(shared_state, url, mirror, title):
74
73
 
75
74
  if not matching_release:
76
75
  info(f"{hostname.upper()}: No release found matching title: {title}")
77
- return []
76
+ return {"links": []}
78
77
 
79
78
  # Extract crypted_links based on mirror
80
79
  crypted_links = matching_release.get('crypted_links', {})
81
80
 
82
81
  if not crypted_links:
83
82
  info(f"{hostname.upper()}: No crypted_links found for: {title}")
84
- return []
83
+ return {"links": []}
85
84
 
86
85
  links = []
87
86
 
@@ -117,11 +116,11 @@ def get_wx_download_links(shared_state, url, mirror, title):
117
116
 
118
117
  if not links:
119
118
  info(f"{hostname.upper()}: No supported crypted links found for: {title}")
120
- return []
119
+ return {"links": []}
121
120
 
122
121
  debug(f"{hostname.upper()}: Found {len(links)} crypted link(s) for: {title}")
123
- return links
122
+ return {"links": links}
124
123
 
125
124
  except Exception as e:
126
125
  info(f"{hostname.upper()}: Error extracting download links from {url}: {e}")
127
- return []
126
+ return {"links": []}