quasarr 1.20.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +460 -0
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +373 -0
- quasarr/api/captcha/__init__.py +1075 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +267 -0
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +467 -0
- quasarr/downloads/sources/__init__.py +0 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +65 -0
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +51 -0
- quasarr/downloads/sources/nx.py +105 -0
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/providers/__init__.py +0 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +20 -0
- quasarr/providers/html_templates.py +241 -0
- quasarr/providers/imdb_metadata.py +142 -0
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +917 -0
- quasarr/providers/notifications.py +124 -0
- quasarr/providers/obfuscated.py +51 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +826 -0
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +118 -0
- quasarr/providers/web_server.py +49 -0
- quasarr/search/__init__.py +153 -0
- quasarr/search/sources/__init__.py +0 -0
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +203 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +214 -0
- quasarr/search/sources/fx.py +223 -0
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +197 -0
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/storage/__init__.py +0 -0
- quasarr/storage/config.py +163 -0
- quasarr/storage/setup.py +458 -0
- quasarr/storage/sqlite_database.py +80 -0
- quasarr-1.20.6.dist-info/METADATA +304 -0
- quasarr-1.20.6.dist-info/RECORD +72 -0
- quasarr-1.20.6.dist-info/WHEEL +5 -0
- quasarr-1.20.6.dist-info/entry_points.txt +2 -0
- quasarr-1.20.6.dist-info/licenses/LICENSE +21 -0
- quasarr-1.20.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import concurrent.futures
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from urllib.parse import urlparse
|
|
9
|
+
|
|
10
|
+
import requests
|
|
11
|
+
from bs4 import BeautifulSoup
|
|
12
|
+
|
|
13
|
+
from quasarr.providers.log import info, debug
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_by_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
17
|
+
by = shared_state.values["config"]("Hostnames").get("by")
|
|
18
|
+
headers = {
|
|
19
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
mirror_lower = mirror.lower() if mirror else None
|
|
23
|
+
links = []
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
resp = requests.get(url, headers=headers, timeout=10)
|
|
27
|
+
page_content = resp.text
|
|
28
|
+
soup = BeautifulSoup(page_content, "html.parser")
|
|
29
|
+
frames = [iframe.get("src") for iframe in soup.find_all("iframe") if iframe.get("src")]
|
|
30
|
+
|
|
31
|
+
frame_urls = [src for src in frames if f'https://{by}' in src]
|
|
32
|
+
if not frame_urls:
|
|
33
|
+
debug(f"No iframe hosts found on {url} for {title}.")
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
async_results = []
|
|
37
|
+
|
|
38
|
+
def fetch(url):
|
|
39
|
+
try:
|
|
40
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
41
|
+
return r.text, url
|
|
42
|
+
except Exception:
|
|
43
|
+
info(f"Error fetching iframe URL: {url}")
|
|
44
|
+
return None, url
|
|
45
|
+
|
|
46
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
|
47
|
+
future_to_url = {executor.submit(fetch, url): url for url in frame_urls}
|
|
48
|
+
for future in concurrent.futures.as_completed(future_to_url):
|
|
49
|
+
content, source = future.result()
|
|
50
|
+
if content:
|
|
51
|
+
async_results.append((content, source))
|
|
52
|
+
|
|
53
|
+
url_hosters = []
|
|
54
|
+
for content, source in async_results:
|
|
55
|
+
host_soup = BeautifulSoup(content, "html.parser")
|
|
56
|
+
link = host_soup.find("a", href=re.compile(
|
|
57
|
+
r"https?://(?:www\.)?(?:hide\.cx|filecrypt\.(?:cc|co|to))/container/"))
|
|
58
|
+
|
|
59
|
+
# Fallback to the old format
|
|
60
|
+
if not link:
|
|
61
|
+
link = host_soup.find("a", href=re.compile(r"/go\.php\?"))
|
|
62
|
+
|
|
63
|
+
if not link:
|
|
64
|
+
continue
|
|
65
|
+
|
|
66
|
+
href = link["href"]
|
|
67
|
+
hostname = link.text.strip().replace(" ", "")
|
|
68
|
+
hostname_lower = hostname.lower()
|
|
69
|
+
|
|
70
|
+
if mirror_lower and mirror_lower not in hostname_lower:
|
|
71
|
+
debug(f'Skipping link from "{hostname}" (not the desired mirror "{mirror}")!')
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
url_hosters.append((href, hostname))
|
|
75
|
+
|
|
76
|
+
def resolve_redirect(href_hostname):
|
|
77
|
+
href, hostname = href_hostname
|
|
78
|
+
try:
|
|
79
|
+
r = requests.get(href, headers=headers, timeout=10, allow_redirects=True)
|
|
80
|
+
if "/404.html" in r.url:
|
|
81
|
+
info(f"Link leads to 404 page for {hostname}: {r.url}")
|
|
82
|
+
return None
|
|
83
|
+
time.sleep(1)
|
|
84
|
+
return r.url
|
|
85
|
+
except Exception as e:
|
|
86
|
+
info(f"Error resolving link for {hostname}: {e}")
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
for pair in url_hosters:
|
|
90
|
+
resolved_url = resolve_redirect(pair)
|
|
91
|
+
hostname = pair[1]
|
|
92
|
+
|
|
93
|
+
if not hostname:
|
|
94
|
+
hostname = urlparse(resolved_url).hostname
|
|
95
|
+
|
|
96
|
+
if resolved_url and hostname and hostname.startswith(("ddownload", "rapidgator", "turbobit", "filecrypt")):
|
|
97
|
+
if "rapidgator" in hostname:
|
|
98
|
+
links.insert(0, [resolved_url, hostname])
|
|
99
|
+
else:
|
|
100
|
+
links.append([resolved_url, hostname])
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
except Exception as e:
|
|
104
|
+
info(f"Error loading BY download links: {e}")
|
|
105
|
+
|
|
106
|
+
return links
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
from quasarr.providers.log import info, debug
|
|
6
|
+
from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_dd_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
10
|
+
dd = shared_state.values["config"]("Hostnames").get("dd")
|
|
11
|
+
|
|
12
|
+
dd_session = retrieve_and_validate_session(shared_state)
|
|
13
|
+
if not dd_session:
|
|
14
|
+
info(f"Could not retrieve valid session for {dd}")
|
|
15
|
+
return []
|
|
16
|
+
|
|
17
|
+
links = []
|
|
18
|
+
|
|
19
|
+
qualities = [
|
|
20
|
+
"disk-480p",
|
|
21
|
+
"web-480p",
|
|
22
|
+
"movie-480p-x265",
|
|
23
|
+
"disk-1080p-x265",
|
|
24
|
+
"web-1080p",
|
|
25
|
+
"web-1080p-x265",
|
|
26
|
+
"web-2160p-x265-hdr",
|
|
27
|
+
"movie-1080p-x265",
|
|
28
|
+
"movie-2160p-webdl-x265-hdr"
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
headers = {
|
|
32
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
release_list = []
|
|
37
|
+
for page in range(0, 100, 20):
|
|
38
|
+
url = f'https://{dd}/index/search/keyword/{title}/qualities/{','.join(qualities)}/from/{page}/search'
|
|
39
|
+
|
|
40
|
+
releases_on_page = dd_session.get(url, headers=headers, timeout=10).json()
|
|
41
|
+
if releases_on_page:
|
|
42
|
+
release_list.extend(releases_on_page)
|
|
43
|
+
|
|
44
|
+
for release in release_list:
|
|
45
|
+
try:
|
|
46
|
+
if release.get("fake"):
|
|
47
|
+
debug(f"Release {release.get('release')} marked as fake. Invalidating DD session...")
|
|
48
|
+
create_and_persist_session(shared_state)
|
|
49
|
+
return []
|
|
50
|
+
elif release.get("release") == title:
|
|
51
|
+
filtered_links = []
|
|
52
|
+
for link in release["links"]:
|
|
53
|
+
if mirror and mirror not in link["hostname"]:
|
|
54
|
+
debug(f'Skipping link from "{link["hostname"]}" (not the desired mirror "{mirror}")!')
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
if any(
|
|
58
|
+
existing_link["hostname"] == link["hostname"] and
|
|
59
|
+
existing_link["url"].endswith(".mkv") and
|
|
60
|
+
link["url"].endswith(".mkv")
|
|
61
|
+
for existing_link in filtered_links
|
|
62
|
+
):
|
|
63
|
+
debug(f"Skipping duplicate `.mkv` link from {link['hostname']}")
|
|
64
|
+
continue # Skip adding duplicate `.mkv` links from the same hostname
|
|
65
|
+
filtered_links.append(link)
|
|
66
|
+
|
|
67
|
+
links = [link["url"] for link in filtered_links]
|
|
68
|
+
break
|
|
69
|
+
except Exception as e:
|
|
70
|
+
info(f"Error parsing DD download: {e}")
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
except Exception as e:
|
|
74
|
+
info(f"Error loading DD download: {e}")
|
|
75
|
+
|
|
76
|
+
return links
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import requests
|
|
7
|
+
from bs4 import BeautifulSoup
|
|
8
|
+
from quasarr.providers.log import info
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_dt_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
12
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
13
|
+
session = requests.Session()
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
resp = session.get(url, headers=headers, timeout=10)
|
|
17
|
+
soup = BeautifulSoup(resp.text, "html.parser")
|
|
18
|
+
|
|
19
|
+
article = soup.find("article")
|
|
20
|
+
if not article:
|
|
21
|
+
info(f"Could not find article block on DT page for {title}")
|
|
22
|
+
return False
|
|
23
|
+
body = article.find("div", class_="card-body")
|
|
24
|
+
if not body:
|
|
25
|
+
info(f"Could not find download section for {title}")
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
# grab all <a href="…">
|
|
29
|
+
anchors = body.find_all("a", href=True)
|
|
30
|
+
|
|
31
|
+
except Exception as e:
|
|
32
|
+
info(f"DT site has been updated. Grabbing download links for {title} not possible! ({e})")
|
|
33
|
+
return False
|
|
34
|
+
|
|
35
|
+
# first do your normal filtering
|
|
36
|
+
filtered = []
|
|
37
|
+
for a in anchors:
|
|
38
|
+
href = a["href"].strip()
|
|
39
|
+
|
|
40
|
+
if not href.lower().startswith(("http://", "https://")):
|
|
41
|
+
continue
|
|
42
|
+
lower = href.lower()
|
|
43
|
+
if "imdb.com" in lower or "?ref=" in lower:
|
|
44
|
+
continue
|
|
45
|
+
if mirror and mirror not in href:
|
|
46
|
+
continue
|
|
47
|
+
|
|
48
|
+
filtered.append(href)
|
|
49
|
+
|
|
50
|
+
# if after filtering you got nothing, fall back to regex
|
|
51
|
+
if not filtered:
|
|
52
|
+
text = body.get_text(separator="\n")
|
|
53
|
+
urls = re.findall(r'https?://[^\s<>"\']+', text)
|
|
54
|
+
# de-dupe preserving order
|
|
55
|
+
seen = set()
|
|
56
|
+
for u in urls:
|
|
57
|
+
u = u.strip()
|
|
58
|
+
if u not in seen:
|
|
59
|
+
seen.add(u)
|
|
60
|
+
# apply same filters
|
|
61
|
+
low = u.lower()
|
|
62
|
+
if low.startswith(("http://", "https://")) and "imdb.com" not in low and "?ref=" not in low:
|
|
63
|
+
if not mirror or mirror in u:
|
|
64
|
+
filtered.append(u)
|
|
65
|
+
|
|
66
|
+
return filtered
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
|
|
10
|
+
from quasarr.providers.log import info, debug
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_dw_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
14
|
+
dw = shared_state.values["config"]("Hostnames").get("dw")
|
|
15
|
+
ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
|
|
16
|
+
|
|
17
|
+
headers = {
|
|
18
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
session = requests.Session()
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
request = session.get(url, headers=headers, timeout=10)
|
|
25
|
+
content = BeautifulSoup(request.text, "html.parser")
|
|
26
|
+
download_buttons = content.find_all("button", {"class": "show_link"})
|
|
27
|
+
except:
|
|
28
|
+
info(f"DW site has been updated. Grabbing download links for {title} not possible!")
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
download_links = []
|
|
32
|
+
try:
|
|
33
|
+
for button in download_buttons:
|
|
34
|
+
payload = f"action=show_link&link_id={button['value']}"
|
|
35
|
+
headers = {
|
|
36
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
37
|
+
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
response = session.post(ajax_url, payload, headers=headers, timeout=10)
|
|
41
|
+
if response.status_code != 200:
|
|
42
|
+
info(f"DW site has been updated. Grabbing download links for {title} not possible!")
|
|
43
|
+
continue
|
|
44
|
+
else:
|
|
45
|
+
response = response.json()
|
|
46
|
+
link = response["data"].split(",")[0]
|
|
47
|
+
|
|
48
|
+
if dw in link:
|
|
49
|
+
match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link)
|
|
50
|
+
if match:
|
|
51
|
+
link = (f'https://filecrypt.cc/Container/{match.group(1)}'
|
|
52
|
+
f'.html{match.group(2) if match.group(2) else ""}')
|
|
53
|
+
|
|
54
|
+
hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
|
|
55
|
+
hoster = f"1fichier" if hoster.startswith("fichier") else hoster # align with expected mirror name
|
|
56
|
+
if mirror and mirror.lower() not in hoster.lower():
|
|
57
|
+
debug(f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!')
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
download_links.append([link, hoster])
|
|
61
|
+
except:
|
|
62
|
+
info(f"DW site has been updated. Parsing download links for {title} not possible!")
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
return download_links
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from urllib.parse import urlparse, urljoin
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
from bs4 import BeautifulSoup
|
|
10
|
+
|
|
11
|
+
from quasarr.providers.log import info, debug
|
|
12
|
+
|
|
13
|
+
hostname = "he"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_he_download_links(shared_state, url, mirror, title):
|
|
17
|
+
headers = {
|
|
18
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
session = requests.Session()
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
resp = session.get(url, headers=headers, timeout=30)
|
|
25
|
+
soup = BeautifulSoup(resp.text, 'html.parser')
|
|
26
|
+
except Exception as e:
|
|
27
|
+
info(f"{hostname}: could not fetch release for {title}: {e}")
|
|
28
|
+
return False
|
|
29
|
+
|
|
30
|
+
imdb_id = None
|
|
31
|
+
try:
|
|
32
|
+
imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
|
|
33
|
+
if imdb_link:
|
|
34
|
+
href = imdb_link['href'].strip()
|
|
35
|
+
m = re.search(r"(tt\d{4,7})", href)
|
|
36
|
+
if m:
|
|
37
|
+
imdb_id = m.group(1)
|
|
38
|
+
else:
|
|
39
|
+
debug(f"{hostname}: imdb_id not found for title {title} in link href.")
|
|
40
|
+
else:
|
|
41
|
+
debug(f"{hostname}: imdb_id link href not found for title {title}.")
|
|
42
|
+
except Exception:
|
|
43
|
+
debug(f"{hostname}: failed to extract imdb_id for title {title}.")
|
|
44
|
+
|
|
45
|
+
anchors = []
|
|
46
|
+
for retries in range(10):
|
|
47
|
+
form = soup.find('form', id=re.compile(r'content-protector-access-form'))
|
|
48
|
+
if not form:
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
action = form.get('action') or url
|
|
52
|
+
action_url = urljoin(resp.url, action)
|
|
53
|
+
|
|
54
|
+
payload = {}
|
|
55
|
+
for inp in form.find_all('input'):
|
|
56
|
+
name = inp.get('name')
|
|
57
|
+
if not name:
|
|
58
|
+
continue
|
|
59
|
+
value = inp.get('value', '')
|
|
60
|
+
payload[name] = value
|
|
61
|
+
|
|
62
|
+
append_patt = re.compile(r"append\(\s*[\'\"](?P<key>[^\'\"]+)[\'\"]\s*,\s*[\'\"](?P<val>[^\'\"]+)[\'\"]\s*\)",
|
|
63
|
+
re.IGNORECASE)
|
|
64
|
+
|
|
65
|
+
for script in soup.find_all('script'):
|
|
66
|
+
txt = script.string if script.string is not None else script.get_text()
|
|
67
|
+
if not txt:
|
|
68
|
+
continue
|
|
69
|
+
for m in append_patt.finditer(txt):
|
|
70
|
+
payload[m.group('key')] = m.group('val')
|
|
71
|
+
|
|
72
|
+
post_headers = headers.copy()
|
|
73
|
+
post_headers.update({'Referer': resp.url})
|
|
74
|
+
try:
|
|
75
|
+
resp = session.post(action_url, data=payload, headers=post_headers, timeout=30)
|
|
76
|
+
soup = BeautifulSoup(resp.text, 'html.parser')
|
|
77
|
+
except Exception as e:
|
|
78
|
+
info(f"{hostname}: could not submit protector form for {title}: {e}")
|
|
79
|
+
break
|
|
80
|
+
|
|
81
|
+
unlocked = soup.select('.content-protector-access-form')
|
|
82
|
+
if unlocked:
|
|
83
|
+
for u in unlocked:
|
|
84
|
+
anchors.extend(u.find_all('a', href=True))
|
|
85
|
+
|
|
86
|
+
if anchors:
|
|
87
|
+
break
|
|
88
|
+
|
|
89
|
+
links = []
|
|
90
|
+
for a in anchors:
|
|
91
|
+
try:
|
|
92
|
+
href = a['href'].strip()
|
|
93
|
+
|
|
94
|
+
netloc = urlparse(href).netloc
|
|
95
|
+
hoster = netloc.split(':')[0].lower()
|
|
96
|
+
parts = hoster.split('.')
|
|
97
|
+
if len(parts) >= 2:
|
|
98
|
+
hoster = parts[-2]
|
|
99
|
+
|
|
100
|
+
links.append([href, hoster])
|
|
101
|
+
except Exception:
|
|
102
|
+
debug(f"{hostname}: could not resolve download link hoster for {title}")
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
if not links:
|
|
106
|
+
info(f"No external download links found on {hostname} page for {title}")
|
|
107
|
+
return False
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
"links": links,
|
|
111
|
+
"imdb_id": imdb_id,
|
|
112
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
|
|
10
|
+
from quasarr.providers.log import info, debug
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_mb_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
14
|
+
headers = {
|
|
15
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
response = requests.get(url, headers=headers, timeout=10)
|
|
20
|
+
response.raise_for_status()
|
|
21
|
+
except Exception as e:
|
|
22
|
+
info(f"Failed to fetch page for {title or url}: {e}")
|
|
23
|
+
return False
|
|
24
|
+
|
|
25
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
|
26
|
+
|
|
27
|
+
download_links = []
|
|
28
|
+
|
|
29
|
+
pattern = re.compile(r'https?://(?:www\.)?filecrypt\.[^/]+/Container/', re.IGNORECASE)
|
|
30
|
+
for a in soup.find_all('a', href=pattern):
|
|
31
|
+
try:
|
|
32
|
+
link = a['href']
|
|
33
|
+
hoster = a.get_text(strip=True).lower()
|
|
34
|
+
|
|
35
|
+
if mirror and mirror.lower() not in hoster.lower():
|
|
36
|
+
debug(f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!')
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
download_links.append([link, hoster])
|
|
40
|
+
except Exception as e:
|
|
41
|
+
debug(f"Error parsing MB download links: {e}")
|
|
42
|
+
|
|
43
|
+
if not download_links:
|
|
44
|
+
info(f"No download links found for {title}. Site structure may have changed. - {url}")
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
return download_links
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
from bs4 import BeautifulSoup
|
|
7
|
+
|
|
8
|
+
from quasarr.providers.log import info
|
|
9
|
+
|
|
10
|
+
hostname = "nk"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_nk_download_links(shared_state, url, mirror, title):
|
|
14
|
+
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
15
|
+
headers = {
|
|
16
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
session = requests.Session()
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
resp = session.get(url, headers=headers, timeout=20)
|
|
23
|
+
soup = BeautifulSoup(resp.text, 'html.parser')
|
|
24
|
+
except Exception as e:
|
|
25
|
+
info(f"{hostname}: could not fetch release page for {title}: {e}")
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
anchors = soup.select('a.btn-orange')
|
|
29
|
+
candidates = []
|
|
30
|
+
for a in anchors:
|
|
31
|
+
|
|
32
|
+
href = a.get('href', '').strip()
|
|
33
|
+
hoster = href.split('/')[3].lower()
|
|
34
|
+
if not href.lower().startswith(('http://', 'https://')):
|
|
35
|
+
href = 'https://' + host + href
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
href = requests.head(href, headers=headers, allow_redirects=True, timeout=20).url
|
|
39
|
+
except Exception as e:
|
|
40
|
+
info(f"{hostname}: could not resolve download link for {title}: {e}")
|
|
41
|
+
continue
|
|
42
|
+
|
|
43
|
+
if hoster == 'ddl.to':
|
|
44
|
+
hoster = 'ddownload'
|
|
45
|
+
|
|
46
|
+
candidates.append([href, hoster])
|
|
47
|
+
|
|
48
|
+
if not candidates:
|
|
49
|
+
info(f"No external download links found on {hostname} page for {title}")
|
|
50
|
+
|
|
51
|
+
return candidates
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
import requests
|
|
8
|
+
from bs4 import BeautifulSoup
|
|
9
|
+
|
|
10
|
+
from quasarr.providers.log import info
|
|
11
|
+
from quasarr.providers.sessions.nx import retrieve_and_validate_session
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_filer_folder_links_via_api(shared_state, url):
|
|
15
|
+
try:
|
|
16
|
+
headers = {
|
|
17
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
18
|
+
'Referer': url
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
m = re.search(r"/folder/([A-Za-z0-9]+)", url)
|
|
22
|
+
if not m:
|
|
23
|
+
return url # not a folder URL
|
|
24
|
+
|
|
25
|
+
folder_hash = m.group(1)
|
|
26
|
+
api_url = f"https://filer.net/api/folder/{folder_hash}"
|
|
27
|
+
|
|
28
|
+
response = requests.get(api_url, headers=headers, timeout=10)
|
|
29
|
+
if not response or response.status_code != 200:
|
|
30
|
+
return url
|
|
31
|
+
|
|
32
|
+
data = response.json()
|
|
33
|
+
files = data.get("files", [])
|
|
34
|
+
links = []
|
|
35
|
+
|
|
36
|
+
# Build download URLs from their file hashes
|
|
37
|
+
for f in files:
|
|
38
|
+
file_hash = f.get("hash")
|
|
39
|
+
if not file_hash:
|
|
40
|
+
continue
|
|
41
|
+
dl_url = f"https://filer.net/get/{file_hash}"
|
|
42
|
+
links.append(dl_url)
|
|
43
|
+
|
|
44
|
+
# Return extracted links or fallback
|
|
45
|
+
return links if links else url
|
|
46
|
+
|
|
47
|
+
except:
|
|
48
|
+
return url
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def get_nx_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
|
|
52
|
+
nx = shared_state.values["config"]("Hostnames").get("nx")
|
|
53
|
+
|
|
54
|
+
if f"{nx}/release/" not in url:
|
|
55
|
+
info("Link is not a Release link, could not proceed:" + url)
|
|
56
|
+
|
|
57
|
+
nx_session = retrieve_and_validate_session(shared_state)
|
|
58
|
+
if not nx_session:
|
|
59
|
+
info(f"Could not retrieve valid session for {nx}")
|
|
60
|
+
return []
|
|
61
|
+
|
|
62
|
+
headers = {
|
|
63
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
64
|
+
'Referer': url
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
json_data = {}
|
|
68
|
+
|
|
69
|
+
url_segments = url.split('/')
|
|
70
|
+
payload_url = '/'.join(url_segments[:-2]) + '/api/getLinks/' + url_segments[-1]
|
|
71
|
+
|
|
72
|
+
payload = nx_session.post(payload_url,
|
|
73
|
+
headers=headers,
|
|
74
|
+
json=json_data,
|
|
75
|
+
timeout=10
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if payload.status_code == 200:
|
|
79
|
+
try:
|
|
80
|
+
payload = payload.json()
|
|
81
|
+
except:
|
|
82
|
+
info("Invalid response decrypting " + str(title) + " URL: " + str(url))
|
|
83
|
+
shared_state.values["database"]("sessions").delete("nx")
|
|
84
|
+
return []
|
|
85
|
+
|
|
86
|
+
if payload and any(key in payload for key in ("err", "error")):
|
|
87
|
+
error_msg = payload.get("err") or payload.get("error")
|
|
88
|
+
info(f"Error decrypting {title!r} URL: {url!r} - {error_msg}")
|
|
89
|
+
shared_state.values["database"]("sessions").delete("nx")
|
|
90
|
+
return []
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
decrypted_url = payload['link'][0]['url']
|
|
94
|
+
if decrypted_url:
|
|
95
|
+
if "filer.net/folder/" in decrypted_url:
|
|
96
|
+
urls = get_filer_folder_links_via_api(shared_state, decrypted_url)
|
|
97
|
+
else:
|
|
98
|
+
urls = [decrypted_url]
|
|
99
|
+
return urls
|
|
100
|
+
except:
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
info("Something went wrong decrypting " + str(title) + " URL: " + str(url))
|
|
104
|
+
shared_state.values["database"]("sessions").delete("nx")
|
|
105
|
+
return []
|