quasarr 1.22.0__py3-none-any.whl → 1.24.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/api/captcha/__init__.py +108 -33
- quasarr/downloads/__init__.py +217 -278
- quasarr/downloads/sources/al.py +28 -3
- quasarr/downloads/sources/by.py +8 -2
- quasarr/downloads/sources/dd.py +15 -8
- quasarr/downloads/sources/dj.py +11 -2
- quasarr/downloads/sources/dl.py +51 -58
- quasarr/downloads/sources/dt.py +34 -12
- quasarr/downloads/sources/dw.py +9 -3
- quasarr/downloads/sources/he.py +10 -4
- quasarr/downloads/sources/mb.py +10 -4
- quasarr/downloads/sources/nk.py +9 -3
- quasarr/downloads/sources/nx.py +31 -10
- quasarr/downloads/sources/sf.py +61 -55
- quasarr/downloads/sources/sj.py +11 -2
- quasarr/downloads/sources/sl.py +22 -9
- quasarr/downloads/sources/wd.py +9 -3
- quasarr/downloads/sources/wx.py +12 -13
- quasarr/providers/obfuscated.py +37 -18
- quasarr/providers/sessions/al.py +38 -10
- quasarr/providers/version.py +1 -1
- quasarr/search/sources/dl.py +10 -6
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/METADATA +2 -2
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/RECORD +28 -28
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/WHEEL +0 -0
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/entry_points.txt +0 -0
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/licenses/LICENSE +0 -0
- {quasarr-1.22.0.dist-info → quasarr-1.24.0.dist-info}/top_level.txt +0 -0
quasarr/downloads/sources/dd.py
CHANGED
|
@@ -6,13 +6,19 @@ from quasarr.providers.log import info, debug
|
|
|
6
6
|
from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
|
|
7
7
|
|
|
8
8
|
|
|
9
|
-
def get_dd_download_links(shared_state, url, mirror, title):
|
|
9
|
+
def get_dd_download_links(shared_state, url, mirror, title, password):
|
|
10
|
+
"""
|
|
11
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
12
|
+
|
|
13
|
+
Returns plain download links from DD API.
|
|
14
|
+
"""
|
|
15
|
+
|
|
10
16
|
dd = shared_state.values["config"]("Hostnames").get("dd")
|
|
11
17
|
|
|
12
18
|
dd_session = retrieve_and_validate_session(shared_state)
|
|
13
19
|
if not dd_session:
|
|
14
20
|
info(f"Could not retrieve valid session for {dd}")
|
|
15
|
-
return []
|
|
21
|
+
return {"links": []}
|
|
16
22
|
|
|
17
23
|
links = []
|
|
18
24
|
|
|
@@ -35,9 +41,9 @@ def get_dd_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
35
41
|
try:
|
|
36
42
|
release_list = []
|
|
37
43
|
for page in range(0, 100, 20):
|
|
38
|
-
|
|
44
|
+
api_url = f'https://{dd}/index/search/keyword/{title}/qualities/{",".join(qualities)}/from/{page}/search'
|
|
39
45
|
|
|
40
|
-
releases_on_page = dd_session.get(
|
|
46
|
+
releases_on_page = dd_session.get(api_url, headers=headers, timeout=10).json()
|
|
41
47
|
if releases_on_page:
|
|
42
48
|
release_list.extend(releases_on_page)
|
|
43
49
|
|
|
@@ -46,7 +52,7 @@ def get_dd_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
46
52
|
if release.get("fake"):
|
|
47
53
|
debug(f"Release {release.get('release')} marked as fake. Invalidating DD session...")
|
|
48
54
|
create_and_persist_session(shared_state)
|
|
49
|
-
return []
|
|
55
|
+
return {"links": []}
|
|
50
56
|
elif release.get("release") == title:
|
|
51
57
|
filtered_links = []
|
|
52
58
|
for link in release["links"]:
|
|
@@ -61,10 +67,11 @@ def get_dd_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
61
67
|
for existing_link in filtered_links
|
|
62
68
|
):
|
|
63
69
|
debug(f"Skipping duplicate `.mkv` link from {link['hostname']}")
|
|
64
|
-
continue
|
|
70
|
+
continue
|
|
65
71
|
filtered_links.append(link)
|
|
66
72
|
|
|
67
|
-
|
|
73
|
+
# Build [[url, mirror], ...] format
|
|
74
|
+
links = [[link["url"], link["hostname"]] for link in filtered_links]
|
|
68
75
|
break
|
|
69
76
|
except Exception as e:
|
|
70
77
|
info(f"Error parsing DD download: {e}")
|
|
@@ -73,4 +80,4 @@ def get_dd_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
73
80
|
except Exception as e:
|
|
74
81
|
info(f"Error loading DD download: {e}")
|
|
75
82
|
|
|
76
|
-
return links
|
|
83
|
+
return {"links": links}
|
quasarr/downloads/sources/dj.py
CHANGED
|
@@ -3,5 +3,14 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
def get_dj_download_links(shared_state, url, mirror, title):
|
|
7
|
-
|
|
6
|
+
def get_dj_download_links(shared_state, url, mirror, title, password):
|
|
7
|
+
"""
|
|
8
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
9
|
+
|
|
10
|
+
DJ source handler - the site itself acts as a protected crypter.
|
|
11
|
+
Returns the URL for CAPTCHA solving via userscript.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
return {
|
|
15
|
+
"links": [[url, "junkies"]]
|
|
16
|
+
}
|
quasarr/downloads/sources/dl.py
CHANGED
|
@@ -17,24 +17,19 @@ def extract_password_from_post(soup, host):
|
|
|
17
17
|
Extract password from forum post using multiple strategies.
|
|
18
18
|
Returns empty string if no password found or if explicitly marked as 'no password'.
|
|
19
19
|
"""
|
|
20
|
-
# Get flattened text from the post - collapse whitespace to single spaces
|
|
21
20
|
post_text = soup.get_text()
|
|
22
21
|
post_text = re.sub(r'\s+', ' ', post_text).strip()
|
|
23
22
|
|
|
24
|
-
# Strategy 1: Look for password label followed by the password value
|
|
25
|
-
# Pattern: "Passwort:" followed by optional separators, then the password
|
|
26
23
|
password_pattern = r'(?:passwort|password|pass|pw)[\s:]+([a-zA-Z0-9._-]{2,50})'
|
|
27
24
|
match = re.search(password_pattern, post_text, re.IGNORECASE)
|
|
28
25
|
|
|
29
26
|
if match:
|
|
30
27
|
password = match.group(1).strip()
|
|
31
|
-
# Skip if it looks like a section header or common word
|
|
32
28
|
if not re.match(r'^(?:download|mirror|link|episode|info|mediainfo|spoiler|hier|click|klick|kein|none|no)',
|
|
33
29
|
password, re.IGNORECASE):
|
|
34
30
|
debug(f"Found password: {password}")
|
|
35
31
|
return password
|
|
36
32
|
|
|
37
|
-
# Strategy 2: Look for explicit "no password" indicators (only if no valid password found)
|
|
38
33
|
no_password_patterns = [
|
|
39
34
|
r'(?:passwort|password|pass|pw)[\s:]*(?:kein(?:es)?|none|no|nicht|not|nein|-|–|—)',
|
|
40
35
|
r'(?:kein(?:es)?|none|no|nicht|not|nein)\s*(?:passwort|password|pass|pw)',
|
|
@@ -45,7 +40,6 @@ def extract_password_from_post(soup, host):
|
|
|
45
40
|
debug("No password required (explicitly stated)")
|
|
46
41
|
return ""
|
|
47
42
|
|
|
48
|
-
# Strategy 3: Default to hostname-based password
|
|
49
43
|
default_password = f"www.{host}"
|
|
50
44
|
debug(f"No password found, using default: {default_password}")
|
|
51
45
|
return default_password
|
|
@@ -54,40 +48,53 @@ def extract_password_from_post(soup, host):
|
|
|
54
48
|
def extract_mirror_name_from_link(link_element):
|
|
55
49
|
"""
|
|
56
50
|
Extract the mirror/hoster name from the link text or nearby text.
|
|
57
|
-
Returns the extracted name or None.
|
|
58
51
|
"""
|
|
59
|
-
# Get the link text
|
|
60
52
|
link_text = link_element.get_text(strip=True)
|
|
61
|
-
|
|
62
|
-
# Try to extract a meaningful name from the link text
|
|
63
|
-
# Look for text that looks like a hoster name (alphanumeric, may contain numbers/dashes)
|
|
64
|
-
# Filter out common non-hoster words
|
|
65
53
|
common_non_hosters = {'download', 'mirror', 'link', 'hier', 'click', 'klick', 'code', 'spoiler'}
|
|
66
54
|
|
|
67
|
-
#
|
|
55
|
+
# Known hoster patterns for image detection
|
|
56
|
+
known_hosters = {
|
|
57
|
+
'rapidgator': ['rapidgator', 'rg'],
|
|
58
|
+
'ddownload': ['ddownload', 'ddl'],
|
|
59
|
+
'turbobit': ['turbobit'],
|
|
60
|
+
'1fichier': ['1fichier'],
|
|
61
|
+
}
|
|
62
|
+
|
|
68
63
|
if link_text and len(link_text) > 2:
|
|
69
|
-
# Remove common symbols and whitespace
|
|
70
64
|
cleaned = re.sub(r'[^\w\s-]', '', link_text).strip().lower()
|
|
71
|
-
|
|
72
|
-
# If it's a single word or hyphenated word and not in common non-hosters
|
|
73
65
|
if cleaned and cleaned not in common_non_hosters:
|
|
74
|
-
# Extract the main part (first word if multiple)
|
|
75
66
|
main_part = cleaned.split()[0] if ' ' in cleaned else cleaned
|
|
76
|
-
if len(main_part)
|
|
67
|
+
if 2 < len(main_part) < 30:
|
|
77
68
|
return main_part
|
|
78
69
|
|
|
79
|
-
# Check if there's a bold tag or nearby text in parent
|
|
80
70
|
parent = link_element.parent
|
|
81
71
|
if parent:
|
|
82
|
-
parent_text = parent.get_text(strip=True)
|
|
83
|
-
# Look for text before the link that might be the mirror name
|
|
84
72
|
for sibling in link_element.previous_siblings:
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
73
|
+
# Only process Tag elements, skip NavigableString (text nodes)
|
|
74
|
+
if not hasattr(sibling, 'name') or sibling.name is None:
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
# Skip spoiler elements entirely
|
|
78
|
+
classes = sibling.get('class', [])
|
|
79
|
+
if classes and any('spoiler' in str(c).lower() for c in classes):
|
|
80
|
+
continue
|
|
81
|
+
|
|
82
|
+
# Check for images with hoster names in src/alt/data-url
|
|
83
|
+
img = sibling.find('img') if sibling.name != 'img' else sibling
|
|
84
|
+
if img:
|
|
85
|
+
img_identifiers = (img.get('src', '') + img.get('alt', '') + img.get('data-url', '')).lower()
|
|
86
|
+
for hoster, patterns in known_hosters.items():
|
|
87
|
+
if any(pattern in img_identifiers for pattern in patterns):
|
|
88
|
+
return hoster
|
|
89
|
+
|
|
90
|
+
sibling_text = sibling.get_text(strip=True).lower()
|
|
91
|
+
# Skip if text is too long - likely NFO content or other non-mirror text
|
|
92
|
+
if len(sibling_text) > 30:
|
|
93
|
+
continue
|
|
94
|
+
if sibling_text and len(sibling_text) > 2 and sibling_text not in common_non_hosters:
|
|
95
|
+
cleaned = re.sub(r'[^\w\s-]', '', sibling_text).strip()
|
|
96
|
+
if cleaned and 2 < len(cleaned) < 30:
|
|
97
|
+
return cleaned.split()[0] if ' ' in cleaned else cleaned
|
|
91
98
|
|
|
92
99
|
return None
|
|
93
100
|
|
|
@@ -95,12 +102,6 @@ def extract_mirror_name_from_link(link_element):
|
|
|
95
102
|
def extract_links_and_password_from_post(post_content, host):
|
|
96
103
|
"""
|
|
97
104
|
Extract download links and password from a forum post.
|
|
98
|
-
Only filecrypt and hide are supported - other link crypters will cause an error.
|
|
99
|
-
|
|
100
|
-
Returns:
|
|
101
|
-
tuple of (links, password) where:
|
|
102
|
-
- links: list of [url, mirror_name] pairs where mirror_name is the actual hoster
|
|
103
|
-
- password: extracted password string
|
|
104
105
|
"""
|
|
105
106
|
links = []
|
|
106
107
|
soup = BeautifulSoup(post_content, 'html.parser')
|
|
@@ -108,30 +109,24 @@ def extract_links_and_password_from_post(post_content, host):
|
|
|
108
109
|
for link in soup.find_all('a', href=True):
|
|
109
110
|
href = link.get('href')
|
|
110
111
|
|
|
111
|
-
# Skip internal forum links
|
|
112
112
|
if href.startswith('/') or host in href:
|
|
113
113
|
continue
|
|
114
114
|
|
|
115
|
-
# Check supported link crypters
|
|
116
|
-
crypter_type = None
|
|
117
115
|
if re.search(r'filecrypt\.', href, re.IGNORECASE):
|
|
118
116
|
crypter_type = "filecrypt"
|
|
119
117
|
elif re.search(r'hide\.', href, re.IGNORECASE):
|
|
120
118
|
crypter_type = "hide"
|
|
121
119
|
elif re.search(r'keeplinks\.', href, re.IGNORECASE):
|
|
122
120
|
crypter_type = "keeplinks"
|
|
121
|
+
elif re.search(r'tolink\.', href, re.IGNORECASE):
|
|
122
|
+
crypter_type = "tolink"
|
|
123
123
|
else:
|
|
124
124
|
debug(f"Unsupported link crypter/hoster found: {href}")
|
|
125
|
-
debug(f"Currently only filecrypt and hide are supported. Other crypters may be added later.")
|
|
126
125
|
continue
|
|
127
126
|
|
|
128
|
-
# Extract mirror name from link text or nearby context
|
|
129
127
|
mirror_name = extract_mirror_name_from_link(link)
|
|
130
|
-
|
|
131
|
-
# Use mirror name if found, otherwise fall back to crypter type
|
|
132
128
|
identifier = mirror_name if mirror_name else crypter_type
|
|
133
129
|
|
|
134
|
-
# Avoid duplicates
|
|
135
130
|
if [href, identifier] not in links:
|
|
136
131
|
links.append([href, identifier])
|
|
137
132
|
if mirror_name:
|
|
@@ -139,7 +134,6 @@ def extract_links_and_password_from_post(post_content, host):
|
|
|
139
134
|
else:
|
|
140
135
|
debug(f"Found {crypter_type} link (no mirror name detected)")
|
|
141
136
|
|
|
142
|
-
# Only extract password if we found links
|
|
143
137
|
password = ""
|
|
144
138
|
if links:
|
|
145
139
|
password = extract_password_from_post(soup, host)
|
|
@@ -147,52 +141,51 @@ def extract_links_and_password_from_post(post_content, host):
|
|
|
147
141
|
return links, password
|
|
148
142
|
|
|
149
143
|
|
|
150
|
-
def get_dl_download_links(shared_state, url, mirror, title):
|
|
144
|
+
def get_dl_download_links(shared_state, url, mirror, title, password):
|
|
151
145
|
"""
|
|
152
|
-
|
|
146
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
147
|
+
|
|
148
|
+
DL source handler - extracts links and password from forum thread.
|
|
153
149
|
|
|
154
|
-
|
|
155
|
-
tuple of (links, password) where:
|
|
156
|
-
- links: list of [url, mirror_name] pairs
|
|
157
|
-
- password: extracted password string
|
|
150
|
+
Note: The password parameter is unused intentionally - password must be extracted from the post.
|
|
158
151
|
"""
|
|
152
|
+
|
|
159
153
|
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
160
154
|
|
|
161
155
|
sess = retrieve_and_validate_session(shared_state)
|
|
162
156
|
if not sess:
|
|
163
157
|
info(f"Could not retrieve valid session for {host}")
|
|
164
|
-
return [], ""
|
|
158
|
+
return {"links": [], "password": ""}
|
|
165
159
|
|
|
166
160
|
try:
|
|
167
161
|
response = fetch_via_requests_session(shared_state, method="GET", target_url=url, timeout=30)
|
|
168
162
|
|
|
169
163
|
if response.status_code != 200:
|
|
170
164
|
info(f"Failed to load thread page: {url} (Status: {response.status_code})")
|
|
171
|
-
return [], ""
|
|
165
|
+
return {"links": [], "password": ""}
|
|
172
166
|
|
|
173
167
|
soup = BeautifulSoup(response.text, 'html.parser')
|
|
174
168
|
|
|
175
169
|
first_post = soup.select_one('article.message--post')
|
|
176
170
|
if not first_post:
|
|
177
171
|
info(f"Could not find first post in thread: {url}")
|
|
178
|
-
return [], ""
|
|
172
|
+
return {"links": [], "password": ""}
|
|
179
173
|
|
|
180
174
|
post_content = first_post.select_one('div.bbWrapper')
|
|
181
175
|
if not post_content:
|
|
182
176
|
info(f"Could not find post content in thread: {url}")
|
|
183
|
-
return [], ""
|
|
177
|
+
return {"links": [], "password": ""}
|
|
184
178
|
|
|
185
|
-
|
|
186
|
-
links, password = extract_links_and_password_from_post(str(post_content), host)
|
|
179
|
+
links, extracted_password = extract_links_and_password_from_post(str(post_content), host)
|
|
187
180
|
|
|
188
181
|
if not links:
|
|
189
182
|
info(f"No supported download links found in thread: {url}")
|
|
190
|
-
return [], ""
|
|
183
|
+
return {"links": [], "password": ""}
|
|
191
184
|
|
|
192
|
-
debug(f"Found {len(links)} download link(s) for: {title} (password: {
|
|
193
|
-
return links, password
|
|
185
|
+
debug(f"Found {len(links)} download link(s) for: {title} (password: {extracted_password})")
|
|
186
|
+
return {"links": links, "password": extracted_password}
|
|
194
187
|
|
|
195
188
|
except Exception as e:
|
|
196
189
|
info(f"Error extracting download links from {url}: {e}")
|
|
197
190
|
invalidate_session(shared_state)
|
|
198
|
-
return [], ""
|
|
191
|
+
return {"links": [], "password": ""}
|
quasarr/downloads/sources/dt.py
CHANGED
|
@@ -3,12 +3,35 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
import re
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
6
8
|
import requests
|
|
7
9
|
from bs4 import BeautifulSoup
|
|
10
|
+
|
|
8
11
|
from quasarr.providers.log import info
|
|
9
12
|
|
|
10
13
|
|
|
11
|
-
def
|
|
14
|
+
def derive_mirror_from_url(url):
|
|
15
|
+
"""Extract hoster name from URL hostname."""
|
|
16
|
+
try:
|
|
17
|
+
hostname = urlparse(url).netloc.lower()
|
|
18
|
+
if hostname.startswith('www.'):
|
|
19
|
+
hostname = hostname[4:]
|
|
20
|
+
parts = hostname.split('.')
|
|
21
|
+
if len(parts) >= 2:
|
|
22
|
+
return parts[-2]
|
|
23
|
+
return hostname
|
|
24
|
+
except:
|
|
25
|
+
return "unknown"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def get_dt_download_links(shared_state, url, mirror, title, password):
|
|
29
|
+
"""
|
|
30
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
31
|
+
|
|
32
|
+
DT source handler - returns plain download links.
|
|
33
|
+
"""
|
|
34
|
+
|
|
12
35
|
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
13
36
|
session = requests.Session()
|
|
14
37
|
|
|
@@ -19,20 +42,19 @@ def get_dt_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
19
42
|
article = soup.find("article")
|
|
20
43
|
if not article:
|
|
21
44
|
info(f"Could not find article block on DT page for {title}")
|
|
22
|
-
return
|
|
45
|
+
return None
|
|
46
|
+
|
|
23
47
|
body = article.find("div", class_="card-body")
|
|
24
48
|
if not body:
|
|
25
49
|
info(f"Could not find download section for {title}")
|
|
26
|
-
return
|
|
50
|
+
return None
|
|
27
51
|
|
|
28
|
-
# grab all <a href="…">
|
|
29
52
|
anchors = body.find_all("a", href=True)
|
|
30
53
|
|
|
31
54
|
except Exception as e:
|
|
32
55
|
info(f"DT site has been updated. Grabbing download links for {title} not possible! ({e})")
|
|
33
|
-
return
|
|
56
|
+
return None
|
|
34
57
|
|
|
35
|
-
# first do your normal filtering
|
|
36
58
|
filtered = []
|
|
37
59
|
for a in anchors:
|
|
38
60
|
href = a["href"].strip()
|
|
@@ -45,22 +67,22 @@ def get_dt_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
45
67
|
if mirror and mirror not in href:
|
|
46
68
|
continue
|
|
47
69
|
|
|
48
|
-
|
|
70
|
+
mirror_name = derive_mirror_from_url(href)
|
|
71
|
+
filtered.append([href, mirror_name])
|
|
49
72
|
|
|
50
|
-
#
|
|
73
|
+
# regex fallback if still empty
|
|
51
74
|
if not filtered:
|
|
52
75
|
text = body.get_text(separator="\n")
|
|
53
76
|
urls = re.findall(r'https?://[^\s<>"\']+', text)
|
|
54
|
-
# de-dupe preserving order
|
|
55
77
|
seen = set()
|
|
56
78
|
for u in urls:
|
|
57
79
|
u = u.strip()
|
|
58
80
|
if u not in seen:
|
|
59
81
|
seen.add(u)
|
|
60
|
-
# apply same filters
|
|
61
82
|
low = u.lower()
|
|
62
83
|
if low.startswith(("http://", "https://")) and "imdb.com" not in low and "?ref=" not in low:
|
|
63
84
|
if not mirror or mirror in u:
|
|
64
|
-
|
|
85
|
+
mirror_name = derive_mirror_from_url(u)
|
|
86
|
+
filtered.append([u, mirror_name])
|
|
65
87
|
|
|
66
|
-
return filtered
|
|
88
|
+
return {"links": filtered} if filtered else None
|
quasarr/downloads/sources/dw.py
CHANGED
|
@@ -10,7 +10,13 @@ from bs4 import BeautifulSoup
|
|
|
10
10
|
from quasarr.providers.log import info, debug
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
def get_dw_download_links(shared_state, url, mirror, title):
|
|
13
|
+
def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
14
|
+
"""
|
|
15
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
16
|
+
|
|
17
|
+
DW source handler - fetches protected download links from DW site.
|
|
18
|
+
"""
|
|
19
|
+
|
|
14
20
|
dw = shared_state.values["config"]("Hostnames").get("dw")
|
|
15
21
|
ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
|
|
16
22
|
|
|
@@ -26,7 +32,7 @@ def get_dw_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
26
32
|
download_buttons = content.find_all("button", {"class": "show_link"})
|
|
27
33
|
except:
|
|
28
34
|
info(f"DW site has been updated. Grabbing download links for {title} not possible!")
|
|
29
|
-
return
|
|
35
|
+
return {"links": []}
|
|
30
36
|
|
|
31
37
|
download_links = []
|
|
32
38
|
try:
|
|
@@ -62,4 +68,4 @@ def get_dw_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
62
68
|
info(f"DW site has been updated. Parsing download links for {title} not possible!")
|
|
63
69
|
pass
|
|
64
70
|
|
|
65
|
-
return download_links
|
|
71
|
+
return {"links": download_links}
|
quasarr/downloads/sources/he.py
CHANGED
|
@@ -13,7 +13,13 @@ from quasarr.providers.log import info, debug
|
|
|
13
13
|
hostname = "he"
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
def get_he_download_links(shared_state, url, mirror, title):
|
|
16
|
+
def get_he_download_links(shared_state, url, mirror, title, password):
|
|
17
|
+
"""
|
|
18
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
19
|
+
|
|
20
|
+
HE source handler - fetches plain download links from HE pages.
|
|
21
|
+
"""
|
|
22
|
+
|
|
17
23
|
headers = {
|
|
18
24
|
'User-Agent': shared_state.values["user_agent"],
|
|
19
25
|
}
|
|
@@ -25,7 +31,7 @@ def get_he_download_links(shared_state, url, mirror, title):
|
|
|
25
31
|
soup = BeautifulSoup(resp.text, 'html.parser')
|
|
26
32
|
except Exception as e:
|
|
27
33
|
info(f"{hostname}: could not fetch release for {title}: {e}")
|
|
28
|
-
return
|
|
34
|
+
return {"links": [], "imdb_id": None}
|
|
29
35
|
|
|
30
36
|
imdb_id = None
|
|
31
37
|
try:
|
|
@@ -46,7 +52,7 @@ def get_he_download_links(shared_state, url, mirror, title):
|
|
|
46
52
|
for retries in range(10):
|
|
47
53
|
form = soup.find('form', id=re.compile(r'content-protector-access-form'))
|
|
48
54
|
if not form:
|
|
49
|
-
return
|
|
55
|
+
return {"links": [], "imdb_id": None}
|
|
50
56
|
|
|
51
57
|
action = form.get('action') or url
|
|
52
58
|
action_url = urljoin(resp.url, action)
|
|
@@ -104,7 +110,7 @@ def get_he_download_links(shared_state, url, mirror, title):
|
|
|
104
110
|
|
|
105
111
|
if not links:
|
|
106
112
|
info(f"No external download links found on {hostname} page for {title}")
|
|
107
|
-
return
|
|
113
|
+
return {"links": [], "imdb_id": None}
|
|
108
114
|
|
|
109
115
|
return {
|
|
110
116
|
"links": links,
|
quasarr/downloads/sources/mb.py
CHANGED
|
@@ -10,7 +10,13 @@ from bs4 import BeautifulSoup
|
|
|
10
10
|
from quasarr.providers.log import info, debug
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
def get_mb_download_links(shared_state, url, mirror, title):
|
|
13
|
+
def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
14
|
+
"""
|
|
15
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
16
|
+
|
|
17
|
+
MB source handler - fetches protected download links from MB pages.
|
|
18
|
+
"""
|
|
19
|
+
|
|
14
20
|
headers = {
|
|
15
21
|
'User-Agent': shared_state.values["user_agent"],
|
|
16
22
|
}
|
|
@@ -20,7 +26,7 @@ def get_mb_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
20
26
|
response.raise_for_status()
|
|
21
27
|
except Exception as e:
|
|
22
28
|
info(f"Failed to fetch page for {title or url}: {e}")
|
|
23
|
-
return
|
|
29
|
+
return {"links": []}
|
|
24
30
|
|
|
25
31
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
26
32
|
|
|
@@ -42,6 +48,6 @@ def get_mb_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
42
48
|
|
|
43
49
|
if not download_links:
|
|
44
50
|
info(f"No download links found for {title}. Site structure may have changed. - {url}")
|
|
45
|
-
return
|
|
51
|
+
return {"links": []}
|
|
46
52
|
|
|
47
|
-
return download_links
|
|
53
|
+
return {"links": download_links}
|
quasarr/downloads/sources/nk.py
CHANGED
|
@@ -11,7 +11,13 @@ hostname = "nk"
|
|
|
11
11
|
supported_mirrors = ["rapidgator", "ddownload"]
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def get_nk_download_links(shared_state, url, mirror, title):
|
|
14
|
+
def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
15
|
+
"""
|
|
16
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
17
|
+
|
|
18
|
+
NK source handler - fetches protected download links from NK pages.
|
|
19
|
+
"""
|
|
20
|
+
|
|
15
21
|
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
16
22
|
headers = {
|
|
17
23
|
'User-Agent': shared_state.values["user_agent"],
|
|
@@ -24,7 +30,7 @@ def get_nk_download_links(shared_state, url, mirror, title):
|
|
|
24
30
|
soup = BeautifulSoup(resp.text, 'html.parser')
|
|
25
31
|
except Exception as e:
|
|
26
32
|
info(f"{hostname}: could not fetch release page for {title}: {e}")
|
|
27
|
-
return
|
|
33
|
+
return {"links": []}
|
|
28
34
|
|
|
29
35
|
anchors = soup.select('a.btn-orange')
|
|
30
36
|
candidates = []
|
|
@@ -51,4 +57,4 @@ def get_nk_download_links(shared_state, url, mirror, title):
|
|
|
51
57
|
if not candidates:
|
|
52
58
|
info(f"No external download links found on {hostname} page for {title}")
|
|
53
59
|
|
|
54
|
-
return candidates
|
|
60
|
+
return {"links": candidates}
|
quasarr/downloads/sources/nx.py
CHANGED
|
@@ -3,14 +3,28 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
import re
|
|
6
|
+
from urllib.parse import urlparse
|
|
6
7
|
|
|
7
8
|
import requests
|
|
8
|
-
from bs4 import BeautifulSoup
|
|
9
9
|
|
|
10
10
|
from quasarr.providers.log import info
|
|
11
11
|
from quasarr.providers.sessions.nx import retrieve_and_validate_session
|
|
12
12
|
|
|
13
13
|
|
|
14
|
+
def derive_mirror_from_url(url):
|
|
15
|
+
"""Extract hoster name from URL hostname."""
|
|
16
|
+
try:
|
|
17
|
+
hostname = urlparse(url).netloc.lower()
|
|
18
|
+
if hostname.startswith('www.'):
|
|
19
|
+
hostname = hostname[4:]
|
|
20
|
+
parts = hostname.split('.')
|
|
21
|
+
if len(parts) >= 2:
|
|
22
|
+
return parts[-2]
|
|
23
|
+
return hostname
|
|
24
|
+
except:
|
|
25
|
+
return "unknown"
|
|
26
|
+
|
|
27
|
+
|
|
14
28
|
def get_filer_folder_links_via_api(shared_state, url):
|
|
15
29
|
try:
|
|
16
30
|
headers = {
|
|
@@ -20,7 +34,7 @@ def get_filer_folder_links_via_api(shared_state, url):
|
|
|
20
34
|
|
|
21
35
|
m = re.search(r"/folder/([A-Za-z0-9]+)", url)
|
|
22
36
|
if not m:
|
|
23
|
-
return url
|
|
37
|
+
return url
|
|
24
38
|
|
|
25
39
|
folder_hash = m.group(1)
|
|
26
40
|
api_url = f"https://filer.net/api/folder/{folder_hash}"
|
|
@@ -33,7 +47,6 @@ def get_filer_folder_links_via_api(shared_state, url):
|
|
|
33
47
|
files = data.get("files", [])
|
|
34
48
|
links = []
|
|
35
49
|
|
|
36
|
-
# Build download URLs from their file hashes
|
|
37
50
|
for f in files:
|
|
38
51
|
file_hash = f.get("hash")
|
|
39
52
|
if not file_hash:
|
|
@@ -41,14 +54,19 @@ def get_filer_folder_links_via_api(shared_state, url):
|
|
|
41
54
|
dl_url = f"https://filer.net/get/{file_hash}"
|
|
42
55
|
links.append(dl_url)
|
|
43
56
|
|
|
44
|
-
# Return extracted links or fallback
|
|
45
57
|
return links if links else url
|
|
46
58
|
|
|
47
59
|
except:
|
|
48
60
|
return url
|
|
49
61
|
|
|
50
62
|
|
|
51
|
-
def get_nx_download_links(shared_state, url, mirror, title):
|
|
63
|
+
def get_nx_download_links(shared_state, url, mirror, title, password):
|
|
64
|
+
"""
|
|
65
|
+
KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
|
|
66
|
+
|
|
67
|
+
NX source handler - auto-decrypts via site API and returns plain download links.
|
|
68
|
+
"""
|
|
69
|
+
|
|
52
70
|
nx = shared_state.values["config"]("Hostnames").get("nx")
|
|
53
71
|
|
|
54
72
|
if f"{nx}/release/" not in url:
|
|
@@ -57,7 +75,7 @@ def get_nx_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
57
75
|
nx_session = retrieve_and_validate_session(shared_state)
|
|
58
76
|
if not nx_session:
|
|
59
77
|
info(f"Could not retrieve valid session for {nx}")
|
|
60
|
-
return []
|
|
78
|
+
return {"links": []}
|
|
61
79
|
|
|
62
80
|
headers = {
|
|
63
81
|
'User-Agent': shared_state.values["user_agent"],
|
|
@@ -81,13 +99,13 @@ def get_nx_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
81
99
|
except:
|
|
82
100
|
info("Invalid response decrypting " + str(title) + " URL: " + str(url))
|
|
83
101
|
shared_state.values["database"]("sessions").delete("nx")
|
|
84
|
-
return []
|
|
102
|
+
return {"links": []}
|
|
85
103
|
|
|
86
104
|
if payload and any(key in payload for key in ("err", "error")):
|
|
87
105
|
error_msg = payload.get("err") or payload.get("error")
|
|
88
106
|
info(f"Error decrypting {title!r} URL: {url!r} - {error_msg}")
|
|
89
107
|
shared_state.values["database"]("sessions").delete("nx")
|
|
90
|
-
return []
|
|
108
|
+
return {"links": []}
|
|
91
109
|
|
|
92
110
|
try:
|
|
93
111
|
decrypted_url = payload['link'][0]['url']
|
|
@@ -96,10 +114,13 @@ def get_nx_download_links(shared_state, url, mirror, title): # signature must al
|
|
|
96
114
|
urls = get_filer_folder_links_via_api(shared_state, decrypted_url)
|
|
97
115
|
else:
|
|
98
116
|
urls = [decrypted_url]
|
|
99
|
-
|
|
117
|
+
|
|
118
|
+
# Convert to [[url, mirror], ...] format
|
|
119
|
+
links = [[u, derive_mirror_from_url(u)] for u in urls]
|
|
120
|
+
return {"links": links}
|
|
100
121
|
except:
|
|
101
122
|
pass
|
|
102
123
|
|
|
103
124
|
info("Something went wrong decrypting " + str(title) + " URL: " + str(url))
|
|
104
125
|
shared_state.values["database"]("sessions").delete("nx")
|
|
105
|
-
return []
|
|
126
|
+
return {"links": []}
|