quasarr 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
- quasarr-2.4.9.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
quasarr/downloads/sources/dw.py
CHANGED
|
@@ -7,8 +7,8 @@ import re
|
|
|
7
7
|
import requests
|
|
8
8
|
from bs4 import BeautifulSoup
|
|
9
9
|
|
|
10
|
-
from quasarr.providers.hostname_issues import
|
|
11
|
-
from quasarr.providers.log import
|
|
10
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
11
|
+
from quasarr.providers.log import debug, info
|
|
12
12
|
|
|
13
13
|
hostname = "dw"
|
|
14
14
|
|
|
@@ -24,7 +24,7 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
24
24
|
ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
|
|
25
25
|
|
|
26
26
|
headers = {
|
|
27
|
-
|
|
27
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
28
28
|
}
|
|
29
29
|
|
|
30
30
|
session = requests.Session()
|
|
@@ -35,7 +35,9 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
35
35
|
content = BeautifulSoup(r.text, "html.parser")
|
|
36
36
|
download_buttons = content.find_all("button", {"class": "show_link"})
|
|
37
37
|
except Exception as e:
|
|
38
|
-
info(
|
|
38
|
+
info(
|
|
39
|
+
f"DW site has been updated. Grabbing download links for {title} not possible!"
|
|
40
|
+
)
|
|
39
41
|
mark_hostname_issue(hostname, "download", str(e))
|
|
40
42
|
return {"links": []}
|
|
41
43
|
|
|
@@ -44,8 +46,8 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
44
46
|
for button in download_buttons:
|
|
45
47
|
payload = f"action=show_link&link_id={button['value']}"
|
|
46
48
|
headers = {
|
|
47
|
-
|
|
48
|
-
|
|
49
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
50
|
+
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
49
51
|
}
|
|
50
52
|
|
|
51
53
|
r = session.post(ajax_url, payload, headers=headers, timeout=10)
|
|
@@ -55,20 +57,32 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
55
57
|
link = response["data"].split(",")[0]
|
|
56
58
|
|
|
57
59
|
if dw in link:
|
|
58
|
-
match = re.search(
|
|
60
|
+
match = re.search(
|
|
61
|
+
r"https://" + dw + r"/azn/af\.php\?v=([A-Z0-9]+)(#.*)?", link
|
|
62
|
+
)
|
|
59
63
|
if match:
|
|
60
|
-
link = (
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
64
|
+
link = (
|
|
65
|
+
f"https://filecrypt.cc/Container/{match.group(1)}"
|
|
66
|
+
f".html{match.group(2) if match.group(2) else ''}"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
hoster = (
|
|
70
|
+
button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
|
|
71
|
+
)
|
|
72
|
+
hoster = (
|
|
73
|
+
f"1fichier" if hoster.startswith("fichier") else hoster
|
|
74
|
+
) # align with expected mirror name
|
|
65
75
|
if mirror and mirror.lower() not in hoster.lower():
|
|
66
|
-
debug(
|
|
76
|
+
debug(
|
|
77
|
+
f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!'
|
|
78
|
+
)
|
|
67
79
|
continue
|
|
68
80
|
|
|
69
81
|
download_links.append([link, hoster])
|
|
70
82
|
except Exception as e:
|
|
71
|
-
info(
|
|
83
|
+
info(
|
|
84
|
+
f"DW site has been updated. Parsing download links for {title} not possible!"
|
|
85
|
+
)
|
|
72
86
|
mark_hostname_issue(hostname, "download", str(e))
|
|
73
87
|
|
|
74
88
|
if download_links:
|
quasarr/downloads/sources/he.py
CHANGED
|
@@ -3,13 +3,13 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
import re
|
|
6
|
-
from urllib.parse import
|
|
6
|
+
from urllib.parse import urljoin, urlparse
|
|
7
7
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
11
|
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
12
|
-
from quasarr.providers.log import
|
|
12
|
+
from quasarr.providers.log import debug, info
|
|
13
13
|
|
|
14
14
|
hostname = "he"
|
|
15
15
|
|
|
@@ -22,7 +22,7 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
24
|
headers = {
|
|
25
|
-
|
|
25
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
26
26
|
}
|
|
27
27
|
|
|
28
28
|
session = requests.Session()
|
|
@@ -30,17 +30,21 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
30
30
|
try:
|
|
31
31
|
r = session.get(url, headers=headers, timeout=10)
|
|
32
32
|
r.raise_for_status()
|
|
33
|
-
soup = BeautifulSoup(r.text,
|
|
33
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
34
34
|
except Exception as e:
|
|
35
35
|
info(f"{hostname}: could not fetch release for {title}: {e}")
|
|
36
|
-
mark_hostname_issue(
|
|
36
|
+
mark_hostname_issue(
|
|
37
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
38
|
+
)
|
|
37
39
|
return {"links": [], "imdb_id": None}
|
|
38
40
|
|
|
39
41
|
imdb_id = None
|
|
40
42
|
try:
|
|
41
|
-
imdb_link = soup.find(
|
|
43
|
+
imdb_link = soup.find(
|
|
44
|
+
"a", href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE)
|
|
45
|
+
)
|
|
42
46
|
if imdb_link:
|
|
43
|
-
href = imdb_link[
|
|
47
|
+
href = imdb_link["href"].strip()
|
|
44
48
|
m = re.search(r"(tt\d{4,7})", href)
|
|
45
49
|
if m:
|
|
46
50
|
imdb_id = m.group(1)
|
|
@@ -53,46 +57,50 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
53
57
|
|
|
54
58
|
anchors = []
|
|
55
59
|
for retries in range(10):
|
|
56
|
-
form = soup.find(
|
|
60
|
+
form = soup.find("form", id=re.compile(r"content-protector-access-form"))
|
|
57
61
|
if not form:
|
|
58
62
|
return {"links": [], "imdb_id": None}
|
|
59
63
|
|
|
60
|
-
action = form.get(
|
|
64
|
+
action = form.get("action") or url
|
|
61
65
|
action_url = urljoin(r.url, action)
|
|
62
66
|
|
|
63
67
|
payload = {}
|
|
64
|
-
for inp in form.find_all(
|
|
65
|
-
name = inp.get(
|
|
68
|
+
for inp in form.find_all("input"):
|
|
69
|
+
name = inp.get("name")
|
|
66
70
|
if not name:
|
|
67
71
|
continue
|
|
68
|
-
value = inp.get(
|
|
72
|
+
value = inp.get("value", "")
|
|
69
73
|
payload[name] = value
|
|
70
74
|
|
|
71
|
-
append_patt = re.compile(
|
|
72
|
-
|
|
75
|
+
append_patt = re.compile(
|
|
76
|
+
r"append\(\s*[\'\"](?P<key>[^\'\"]+)[\'\"]\s*,\s*[\'\"](?P<val>[^\'\"]+)[\'\"]\s*\)",
|
|
77
|
+
re.IGNORECASE,
|
|
78
|
+
)
|
|
73
79
|
|
|
74
|
-
for script in soup.find_all(
|
|
80
|
+
for script in soup.find_all("script"):
|
|
75
81
|
txt = script.string if script.string is not None else script.get_text()
|
|
76
82
|
if not txt:
|
|
77
83
|
continue
|
|
78
84
|
for m in append_patt.finditer(txt):
|
|
79
|
-
payload[m.group(
|
|
85
|
+
payload[m.group("key")] = m.group("val")
|
|
80
86
|
|
|
81
87
|
post_headers = headers.copy()
|
|
82
|
-
post_headers.update({
|
|
88
|
+
post_headers.update({"Referer": r.url})
|
|
83
89
|
try:
|
|
84
90
|
r = session.post(action_url, data=payload, headers=post_headers, timeout=10)
|
|
85
91
|
r.raise_for_status()
|
|
86
|
-
soup = BeautifulSoup(r.text,
|
|
92
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
87
93
|
except Exception as e:
|
|
88
94
|
info(f"{hostname}: could not submit protector form for {title}: {e}")
|
|
89
|
-
mark_hostname_issue(
|
|
95
|
+
mark_hostname_issue(
|
|
96
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
97
|
+
)
|
|
90
98
|
break
|
|
91
99
|
|
|
92
|
-
unlocked = soup.select(
|
|
100
|
+
unlocked = soup.select(".content-protector-access-form")
|
|
93
101
|
if unlocked:
|
|
94
102
|
for u in unlocked:
|
|
95
|
-
anchors.extend(u.find_all(
|
|
103
|
+
anchors.extend(u.find_all("a", href=True))
|
|
96
104
|
|
|
97
105
|
if anchors:
|
|
98
106
|
break
|
|
@@ -100,11 +108,11 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
100
108
|
links = []
|
|
101
109
|
for a in anchors:
|
|
102
110
|
try:
|
|
103
|
-
href = a[
|
|
111
|
+
href = a["href"].strip()
|
|
104
112
|
|
|
105
113
|
netloc = urlparse(href).netloc
|
|
106
|
-
hoster = netloc.split(
|
|
107
|
-
parts = hoster.split(
|
|
114
|
+
hoster = netloc.split(":")[0].lower()
|
|
115
|
+
parts = hoster.split(".")
|
|
108
116
|
if len(parts) >= 2:
|
|
109
117
|
hoster = parts[-2]
|
|
110
118
|
|
quasarr/downloads/sources/mb.py
CHANGED
|
@@ -7,8 +7,8 @@ import re
|
|
|
7
7
|
import requests
|
|
8
8
|
from bs4 import BeautifulSoup
|
|
9
9
|
|
|
10
|
-
from quasarr.providers.hostname_issues import
|
|
11
|
-
from quasarr.providers.log import
|
|
10
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
11
|
+
from quasarr.providers.log import debug, info
|
|
12
12
|
|
|
13
13
|
hostname = "mb"
|
|
14
14
|
|
|
@@ -21,7 +21,7 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
headers = {
|
|
24
|
-
|
|
24
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
25
25
|
}
|
|
26
26
|
|
|
27
27
|
try:
|
|
@@ -36,14 +36,18 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
|
36
36
|
|
|
37
37
|
download_links = []
|
|
38
38
|
|
|
39
|
-
pattern = re.compile(
|
|
40
|
-
|
|
39
|
+
pattern = re.compile(
|
|
40
|
+
r"https?://(?:www\.)?filecrypt\.[^/]+/Container/", re.IGNORECASE
|
|
41
|
+
)
|
|
42
|
+
for a in soup.find_all("a", href=pattern):
|
|
41
43
|
try:
|
|
42
|
-
link = a[
|
|
44
|
+
link = a["href"]
|
|
43
45
|
hoster = a.get_text(strip=True).lower()
|
|
44
46
|
|
|
45
47
|
if mirror and mirror.lower() not in hoster.lower():
|
|
46
|
-
debug(
|
|
48
|
+
debug(
|
|
49
|
+
f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!'
|
|
50
|
+
)
|
|
47
51
|
continue
|
|
48
52
|
|
|
49
53
|
download_links.append([link, hoster])
|
|
@@ -51,8 +55,14 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
|
51
55
|
debug(f"Error parsing MB download links: {e}")
|
|
52
56
|
|
|
53
57
|
if not download_links:
|
|
54
|
-
info(
|
|
55
|
-
|
|
58
|
+
info(
|
|
59
|
+
f"No download links found for {title}. Site structure may have changed. - {url}"
|
|
60
|
+
)
|
|
61
|
+
mark_hostname_issue(
|
|
62
|
+
hostname,
|
|
63
|
+
"download",
|
|
64
|
+
"No download links found - site structure may have changed",
|
|
65
|
+
)
|
|
56
66
|
return {"links": []}
|
|
57
67
|
|
|
58
68
|
clear_hostname_issue(hostname)
|
quasarr/downloads/sources/nk.py
CHANGED
|
@@ -21,7 +21,7 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
21
21
|
|
|
22
22
|
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
23
23
|
headers = {
|
|
24
|
-
|
|
24
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
25
25
|
}
|
|
26
26
|
|
|
27
27
|
session = requests.Session()
|
|
@@ -29,25 +29,27 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
29
29
|
try:
|
|
30
30
|
r = session.get(url, headers=headers, timeout=10)
|
|
31
31
|
r.raise_for_status()
|
|
32
|
-
soup = BeautifulSoup(r.text,
|
|
32
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
33
33
|
except Exception as e:
|
|
34
34
|
info(f"{hostname}: could not fetch release page for {title}: {e}")
|
|
35
|
-
mark_hostname_issue(
|
|
35
|
+
mark_hostname_issue(
|
|
36
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
37
|
+
)
|
|
36
38
|
return {"links": []}
|
|
37
39
|
|
|
38
|
-
anchors = soup.select(
|
|
40
|
+
anchors = soup.select("a.btn-orange")
|
|
39
41
|
candidates = []
|
|
40
42
|
for a in anchors:
|
|
41
43
|
mirror = a.text.strip().lower()
|
|
42
|
-
if mirror ==
|
|
43
|
-
mirror =
|
|
44
|
+
if mirror == "ddl.to":
|
|
45
|
+
mirror = "ddownload"
|
|
44
46
|
|
|
45
47
|
if mirror not in supported_mirrors:
|
|
46
48
|
continue
|
|
47
49
|
|
|
48
|
-
href = a.get(
|
|
49
|
-
if not href.lower().startswith((
|
|
50
|
-
href =
|
|
50
|
+
href = a.get("href", "").strip()
|
|
51
|
+
if not href.lower().startswith(("http://", "https://")):
|
|
52
|
+
href = "https://" + host + href
|
|
51
53
|
|
|
52
54
|
try:
|
|
53
55
|
r = requests.head(href, headers=headers, allow_redirects=True, timeout=10)
|
|
@@ -55,7 +57,9 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
55
57
|
href = r.url
|
|
56
58
|
except Exception as e:
|
|
57
59
|
info(f"{hostname}: could not resolve download link for {title}: {e}")
|
|
58
|
-
mark_hostname_issue(
|
|
60
|
+
mark_hostname_issue(
|
|
61
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
62
|
+
)
|
|
59
63
|
continue
|
|
60
64
|
|
|
61
65
|
candidates.append([href, mirror])
|
quasarr/downloads/sources/nx.py
CHANGED
|
@@ -18,9 +18,9 @@ def derive_mirror_from_url(url):
|
|
|
18
18
|
"""Extract hoster name from URL hostname."""
|
|
19
19
|
try:
|
|
20
20
|
hostname = urlparse(url).netloc.lower()
|
|
21
|
-
if hostname.startswith(
|
|
21
|
+
if hostname.startswith("www."):
|
|
22
22
|
hostname = hostname[4:]
|
|
23
|
-
parts = hostname.split(
|
|
23
|
+
parts = hostname.split(".")
|
|
24
24
|
if len(parts) >= 2:
|
|
25
25
|
return parts[-2]
|
|
26
26
|
return hostname
|
|
@@ -30,10 +30,7 @@ def derive_mirror_from_url(url):
|
|
|
30
30
|
|
|
31
31
|
def get_filer_folder_links_via_api(shared_state, url):
|
|
32
32
|
try:
|
|
33
|
-
headers = {
|
|
34
|
-
'User-Agent': shared_state.values["user_agent"],
|
|
35
|
-
'Referer': url
|
|
36
|
-
}
|
|
33
|
+
headers = {"User-Agent": shared_state.values["user_agent"], "Referer": url}
|
|
37
34
|
|
|
38
35
|
m = re.search(r"/folder/([A-Za-z0-9]+)", url)
|
|
39
36
|
if not m:
|
|
@@ -80,22 +77,15 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
|
|
|
80
77
|
mark_hostname_issue(hostname, "download", "Session error")
|
|
81
78
|
return {"links": []}
|
|
82
79
|
|
|
83
|
-
headers = {
|
|
84
|
-
'User-Agent': shared_state.values["user_agent"],
|
|
85
|
-
'Referer': url
|
|
86
|
-
}
|
|
80
|
+
headers = {"User-Agent": shared_state.values["user_agent"], "Referer": url}
|
|
87
81
|
|
|
88
82
|
json_data = {}
|
|
89
83
|
|
|
90
|
-
url_segments = url.split(
|
|
91
|
-
payload_url =
|
|
84
|
+
url_segments = url.split("/")
|
|
85
|
+
payload_url = "/".join(url_segments[:-2]) + "/api/getLinks/" + url_segments[-1]
|
|
92
86
|
|
|
93
87
|
try:
|
|
94
|
-
r = nx_session.post(payload_url,
|
|
95
|
-
headers=headers,
|
|
96
|
-
json=json_data,
|
|
97
|
-
timeout=10
|
|
98
|
-
)
|
|
88
|
+
r = nx_session.post(payload_url, headers=headers, json=json_data, timeout=10)
|
|
99
89
|
r.raise_for_status()
|
|
100
90
|
|
|
101
91
|
payload = r.json()
|
|
@@ -112,7 +102,7 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
|
|
|
112
102
|
return {"links": []}
|
|
113
103
|
|
|
114
104
|
try:
|
|
115
|
-
decrypted_url = payload[
|
|
105
|
+
decrypted_url = payload["link"][0]["url"]
|
|
116
106
|
if decrypted_url:
|
|
117
107
|
if "filer.net/folder/" in decrypted_url:
|
|
118
108
|
urls = get_filer_folder_links_via_api(shared_state, decrypted_url)
|
quasarr/downloads/sources/sf.py
CHANGED
|
@@ -9,14 +9,14 @@ import requests
|
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
11
|
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
12
|
-
from quasarr.providers.log import
|
|
12
|
+
from quasarr.providers.log import debug, info
|
|
13
13
|
from quasarr.search.sources.sf import parse_mirrors
|
|
14
14
|
|
|
15
15
|
hostname = "sf"
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
def is_last_section_integer(url):
|
|
19
|
-
last_section = url.rstrip(
|
|
19
|
+
last_section = url.rstrip("/").split("/")[-1]
|
|
20
20
|
if last_section.isdigit() and len(last_section) <= 3:
|
|
21
21
|
return int(last_section)
|
|
22
22
|
return None
|
|
@@ -25,7 +25,9 @@ def is_last_section_integer(url):
|
|
|
25
25
|
def resolve_sf_redirect(url, user_agent):
|
|
26
26
|
"""Follow redirects and return final URL or None if 404."""
|
|
27
27
|
try:
|
|
28
|
-
r = requests.get(
|
|
28
|
+
r = requests.get(
|
|
29
|
+
url, allow_redirects=True, timeout=10, headers={"User-Agent": user_agent}
|
|
30
|
+
)
|
|
29
31
|
r.raise_for_status()
|
|
30
32
|
if r.history:
|
|
31
33
|
for resp in r.history:
|
|
@@ -35,10 +37,14 @@ def resolve_sf_redirect(url, user_agent):
|
|
|
35
37
|
return None
|
|
36
38
|
return r.url
|
|
37
39
|
else:
|
|
38
|
-
info(
|
|
40
|
+
info(
|
|
41
|
+
f"SF blocked attempt to resolve {url}. Your IP may be banned. Try again later."
|
|
42
|
+
)
|
|
39
43
|
except Exception as e:
|
|
40
44
|
info(f"Error fetching redirected URL for {url}: {e}")
|
|
41
|
-
mark_hostname_issue(
|
|
45
|
+
mark_hostname_issue(
|
|
46
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
47
|
+
)
|
|
42
48
|
return None
|
|
43
49
|
|
|
44
50
|
|
|
@@ -61,7 +67,7 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
61
67
|
|
|
62
68
|
# Handle series page URLs - need to find the right release
|
|
63
69
|
release_pattern = re.compile(
|
|
64
|
-
r
|
|
70
|
+
r"""
|
|
65
71
|
^
|
|
66
72
|
(?P<name>.+?)\.
|
|
67
73
|
S(?P<season>\d+)
|
|
@@ -72,8 +78,8 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
72
78
|
\..+?
|
|
73
79
|
-(?P<group>\w+)
|
|
74
80
|
$
|
|
75
|
-
|
|
76
|
-
re.IGNORECASE | re.VERBOSE
|
|
81
|
+
""",
|
|
82
|
+
re.IGNORECASE | re.VERBOSE,
|
|
77
83
|
)
|
|
78
84
|
|
|
79
85
|
release_match = release_pattern.match(title)
|
|
@@ -87,7 +93,7 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
87
93
|
if not season:
|
|
88
94
|
season = "ALL"
|
|
89
95
|
|
|
90
|
-
headers = {
|
|
96
|
+
headers = {"User-Agent": user_agent}
|
|
91
97
|
r = requests.get(url, headers=headers, timeout=10)
|
|
92
98
|
r.raise_for_status()
|
|
93
99
|
series_page = r.text
|
|
@@ -103,16 +109,30 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
103
109
|
debug(f"Found IMDb id: {imdb_id}")
|
|
104
110
|
|
|
105
111
|
season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
|
|
106
|
-
epoch = str(datetime.now().timestamp()).replace(
|
|
107
|
-
api_url =
|
|
112
|
+
epoch = str(datetime.now().timestamp()).replace(".", "")[:-3]
|
|
113
|
+
api_url = (
|
|
114
|
+
"https://"
|
|
115
|
+
+ sf
|
|
116
|
+
+ "/api/v1/"
|
|
117
|
+
+ season_id
|
|
118
|
+
+ f"/season/{season}?lang=ALL&_="
|
|
119
|
+
+ epoch
|
|
120
|
+
)
|
|
108
121
|
|
|
109
122
|
r = requests.get(api_url, headers=headers, timeout=10)
|
|
110
123
|
r.raise_for_status()
|
|
111
124
|
try:
|
|
112
125
|
data = r.json()["html"]
|
|
113
126
|
except ValueError:
|
|
114
|
-
epoch = str(datetime.now().timestamp()).replace(
|
|
115
|
-
api_url =
|
|
127
|
+
epoch = str(datetime.now().timestamp()).replace(".", "")[:-3]
|
|
128
|
+
api_url = (
|
|
129
|
+
"https://"
|
|
130
|
+
+ sf
|
|
131
|
+
+ "/api/v1/"
|
|
132
|
+
+ season_id
|
|
133
|
+
+ f"/season/ALL?lang=ALL&_="
|
|
134
|
+
+ epoch
|
|
135
|
+
)
|
|
116
136
|
r = requests.get(api_url, headers=headers, timeout=10)
|
|
117
137
|
r.raise_for_status()
|
|
118
138
|
data = r.json()["html"]
|
|
@@ -126,8 +146,8 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
126
146
|
name = details.find("small").text.strip()
|
|
127
147
|
|
|
128
148
|
result_pattern = re.compile(
|
|
129
|
-
r
|
|
130
|
-
re.IGNORECASE
|
|
149
|
+
r"^(?P<name>.+?)\.S(?P<season>\d+)(?:E\d+)?\..*?(?P<resolution>\d+p)\..+?-(?P<group>[\w/-]+)$",
|
|
150
|
+
re.IGNORECASE,
|
|
131
151
|
)
|
|
132
152
|
result_match = result_pattern.match(name)
|
|
133
153
|
|
|
@@ -136,12 +156,17 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
136
156
|
|
|
137
157
|
result_parts = result_match.groupdict()
|
|
138
158
|
|
|
139
|
-
name_match =
|
|
140
|
-
|
|
141
|
-
|
|
159
|
+
name_match = (
|
|
160
|
+
release_parts["name"].lower() == result_parts["name"].lower()
|
|
161
|
+
)
|
|
162
|
+
season_match = release_parts["season"] == result_parts["season"]
|
|
163
|
+
resolution_match = (
|
|
164
|
+
release_parts["resolution"].lower()
|
|
165
|
+
== result_parts["resolution"].lower()
|
|
166
|
+
)
|
|
142
167
|
|
|
143
|
-
result_groups = {g.lower() for g in result_parts[
|
|
144
|
-
release_groups = {g.lower() for g in release_parts[
|
|
168
|
+
result_groups = {g.lower() for g in result_parts["group"].split("/")}
|
|
169
|
+
release_groups = {g.lower() for g in release_parts["group"].split("/")}
|
|
145
170
|
group_match = not result_groups.isdisjoint(release_groups)
|
|
146
171
|
|
|
147
172
|
if name_match and season_match and resolution_match and group_match:
|
quasarr/downloads/sources/sj.py
CHANGED
quasarr/downloads/sources/sl.py
CHANGED
|
@@ -8,8 +8,8 @@ from urllib.parse import urlparse
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
-
from quasarr.providers.hostname_issues import
|
|
12
|
-
from quasarr.providers.log import
|
|
11
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
12
|
+
from quasarr.providers.log import debug, info
|
|
13
13
|
|
|
14
14
|
hostname = "sl"
|
|
15
15
|
supported_mirrors = ["nitroflare", "ddownload"]
|
|
@@ -20,7 +20,7 @@ def derive_mirror_from_host(host):
|
|
|
20
20
|
for m in supported_mirrors:
|
|
21
21
|
if host.startswith(m + "."):
|
|
22
22
|
return m
|
|
23
|
-
return host.split(
|
|
23
|
+
return host.split(".")[0] if host else "unknown"
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
@@ -41,7 +41,9 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
41
41
|
entry = soup.find("div", class_="entry")
|
|
42
42
|
if not entry:
|
|
43
43
|
info(f"Could not find main content section for {title}")
|
|
44
|
-
mark_hostname_issue(
|
|
44
|
+
mark_hostname_issue(
|
|
45
|
+
hostname, "download", "Could not find main content section"
|
|
46
|
+
)
|
|
45
47
|
return {"links": [], "imdb_id": None}
|
|
46
48
|
|
|
47
49
|
imdb_id = None
|
|
@@ -66,7 +68,9 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
66
68
|
anchors = entry.find_all("a", href=True)
|
|
67
69
|
|
|
68
70
|
except Exception as e:
|
|
69
|
-
info(
|
|
71
|
+
info(
|
|
72
|
+
f"SL site has been updated. Grabbing download links for {title} not possible! ({e})"
|
|
73
|
+
)
|
|
70
74
|
mark_hostname_issue(hostname, "download", str(e))
|
|
71
75
|
return {"links": [], "imdb_id": None}
|
|
72
76
|
|