quasarr 2.1.5__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +38 -29
- quasarr/api/__init__.py +94 -23
- quasarr/api/captcha/__init__.py +0 -12
- quasarr/api/config/__init__.py +22 -11
- quasarr/api/packages/__init__.py +26 -34
- quasarr/api/statistics/__init__.py +15 -15
- quasarr/downloads/__init__.py +9 -1
- quasarr/downloads/packages/__init__.py +2 -2
- quasarr/downloads/sources/al.py +6 -0
- quasarr/downloads/sources/by.py +29 -20
- quasarr/downloads/sources/dd.py +9 -1
- quasarr/downloads/sources/dl.py +3 -0
- quasarr/downloads/sources/dt.py +16 -7
- quasarr/downloads/sources/dw.py +22 -17
- quasarr/downloads/sources/he.py +11 -6
- quasarr/downloads/sources/mb.py +9 -3
- quasarr/downloads/sources/nk.py +9 -3
- quasarr/downloads/sources/nx.py +21 -17
- quasarr/downloads/sources/sf.py +21 -13
- quasarr/downloads/sources/sl.py +10 -2
- quasarr/downloads/sources/wd.py +18 -9
- quasarr/downloads/sources/wx.py +7 -11
- quasarr/providers/auth.py +1 -1
- quasarr/providers/cloudflare.py +1 -1
- quasarr/providers/hostname_issues.py +63 -0
- quasarr/providers/html_images.py +1 -18
- quasarr/providers/html_templates.py +104 -12
- quasarr/providers/imdb_metadata.py +288 -75
- quasarr/providers/obfuscated.py +11 -11
- quasarr/providers/sessions/al.py +27 -11
- quasarr/providers/sessions/dd.py +12 -4
- quasarr/providers/sessions/dl.py +19 -11
- quasarr/providers/sessions/nx.py +12 -4
- quasarr/providers/version.py +1 -1
- quasarr/search/__init__.py +5 -0
- quasarr/search/sources/al.py +12 -1
- quasarr/search/sources/by.py +15 -4
- quasarr/search/sources/dd.py +22 -3
- quasarr/search/sources/dj.py +12 -1
- quasarr/search/sources/dl.py +12 -6
- quasarr/search/sources/dt.py +17 -4
- quasarr/search/sources/dw.py +15 -4
- quasarr/search/sources/fx.py +19 -6
- quasarr/search/sources/he.py +22 -3
- quasarr/search/sources/mb.py +15 -4
- quasarr/search/sources/nk.py +19 -3
- quasarr/search/sources/nx.py +15 -4
- quasarr/search/sources/sf.py +25 -8
- quasarr/search/sources/sj.py +14 -1
- quasarr/search/sources/sl.py +17 -2
- quasarr/search/sources/wd.py +15 -4
- quasarr/search/sources/wx.py +16 -18
- quasarr/storage/setup.py +150 -35
- {quasarr-2.1.5.dist-info → quasarr-2.3.0.dist-info}/METADATA +6 -3
- quasarr-2.3.0.dist-info/RECORD +82 -0
- {quasarr-2.1.5.dist-info → quasarr-2.3.0.dist-info}/WHEEL +1 -1
- quasarr-2.1.5.dist-info/RECORD +0 -81
- {quasarr-2.1.5.dist-info → quasarr-2.3.0.dist-info}/entry_points.txt +0 -0
- {quasarr-2.1.5.dist-info → quasarr-2.3.0.dist-info}/licenses/LICENSE +0 -0
- {quasarr-2.1.5.dist-info → quasarr-2.3.0.dist-info}/top_level.txt +0 -0
quasarr/downloads/sources/by.py
CHANGED
|
@@ -10,8 +10,11 @@ from urllib.parse import urlparse
|
|
|
10
10
|
import requests
|
|
11
11
|
from bs4 import BeautifulSoup
|
|
12
12
|
|
|
13
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
13
14
|
from quasarr.providers.log import info, debug
|
|
14
15
|
|
|
16
|
+
hostname = "by"
|
|
17
|
+
|
|
15
18
|
|
|
16
19
|
def get_by_download_links(shared_state, url, mirror, title, password):
|
|
17
20
|
"""
|
|
@@ -29,9 +32,9 @@ def get_by_download_links(shared_state, url, mirror, title, password):
|
|
|
29
32
|
links = []
|
|
30
33
|
|
|
31
34
|
try:
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
soup = BeautifulSoup(
|
|
35
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
36
|
+
r.raise_for_status()
|
|
37
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
35
38
|
frames = [iframe.get("src") for iframe in soup.find_all("iframe") if iframe.get("src")]
|
|
36
39
|
|
|
37
40
|
frame_urls = [src for src in frames if f'https://{by}' in src]
|
|
@@ -43,10 +46,12 @@ def get_by_download_links(shared_state, url, mirror, title, password):
|
|
|
43
46
|
|
|
44
47
|
def fetch(url):
|
|
45
48
|
try:
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
+
rq = requests.get(url, headers=headers, timeout=10)
|
|
50
|
+
rq.raise_for_status()
|
|
51
|
+
return rq.text, url
|
|
52
|
+
except Exception as e:
|
|
49
53
|
info(f"Error fetching iframe URL: {url}")
|
|
54
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
50
55
|
return None, url
|
|
51
56
|
|
|
52
57
|
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
|
|
@@ -70,43 +75,47 @@ def get_by_download_links(shared_state, url, mirror, title, password):
|
|
|
70
75
|
continue
|
|
71
76
|
|
|
72
77
|
href = link["href"]
|
|
73
|
-
|
|
74
|
-
hostname_lower =
|
|
78
|
+
link_hostname = link.text.strip().replace(" ", "")
|
|
79
|
+
hostname_lower = link_hostname.lower()
|
|
75
80
|
|
|
76
81
|
if mirror_lower and mirror_lower not in hostname_lower:
|
|
77
|
-
debug(f'Skipping link from "{
|
|
82
|
+
debug(f'Skipping link from "{link_hostname}" (not the desired mirror "{mirror}")!')
|
|
78
83
|
continue
|
|
79
84
|
|
|
80
|
-
url_hosters.append((href,
|
|
85
|
+
url_hosters.append((href, link_hostname))
|
|
81
86
|
|
|
82
87
|
def resolve_redirect(href_hostname):
|
|
83
88
|
href, hostname = href_hostname
|
|
84
89
|
try:
|
|
85
|
-
|
|
86
|
-
|
|
90
|
+
rq = requests.get(href, headers=headers, timeout=10, allow_redirects=True)
|
|
91
|
+
rq.raise_for_status()
|
|
92
|
+
if "/404.html" in rq.url:
|
|
87
93
|
info(f"Link leads to 404 page for {hostname}: {r.url}")
|
|
88
94
|
return None
|
|
89
95
|
time.sleep(1)
|
|
90
|
-
return
|
|
96
|
+
return rq.url
|
|
91
97
|
except Exception as e:
|
|
92
98
|
info(f"Error resolving link for {hostname}: {e}")
|
|
99
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
93
100
|
return None
|
|
94
101
|
|
|
95
102
|
for pair in url_hosters:
|
|
96
103
|
resolved_url = resolve_redirect(pair)
|
|
97
|
-
|
|
104
|
+
link_hostname = pair[1]
|
|
98
105
|
|
|
99
|
-
if not
|
|
100
|
-
|
|
106
|
+
if not link_hostname:
|
|
107
|
+
link_hostname = urlparse(resolved_url).hostname
|
|
101
108
|
|
|
102
|
-
if resolved_url and
|
|
103
|
-
|
|
104
|
-
|
|
109
|
+
if resolved_url and link_hostname and link_hostname.startswith(
|
|
110
|
+
("ddownload", "rapidgator", "turbobit", "filecrypt")):
|
|
111
|
+
if "rapidgator" in link_hostname:
|
|
112
|
+
links.insert(0, [resolved_url, link_hostname])
|
|
105
113
|
else:
|
|
106
|
-
links.append([resolved_url,
|
|
114
|
+
links.append([resolved_url, link_hostname])
|
|
107
115
|
|
|
108
116
|
|
|
109
117
|
except Exception as e:
|
|
110
118
|
info(f"Error loading BY download links: {e}")
|
|
119
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
111
120
|
|
|
112
121
|
return {"links": links}
|
quasarr/downloads/sources/dd.py
CHANGED
|
@@ -2,9 +2,12 @@
|
|
|
2
2
|
# Quasarr
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
5
6
|
from quasarr.providers.log import info, debug
|
|
6
7
|
from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
|
|
7
8
|
|
|
9
|
+
hostname = "dd"
|
|
10
|
+
|
|
8
11
|
|
|
9
12
|
def get_dd_download_links(shared_state, url, mirror, title, password):
|
|
10
13
|
"""
|
|
@@ -18,6 +21,7 @@ def get_dd_download_links(shared_state, url, mirror, title, password):
|
|
|
18
21
|
dd_session = retrieve_and_validate_session(shared_state)
|
|
19
22
|
if not dd_session:
|
|
20
23
|
info(f"Could not retrieve valid session for {dd}")
|
|
24
|
+
mark_hostname_issue(hostname, "download", "Session error")
|
|
21
25
|
return {"links": []}
|
|
22
26
|
|
|
23
27
|
links = []
|
|
@@ -43,7 +47,9 @@ def get_dd_download_links(shared_state, url, mirror, title, password):
|
|
|
43
47
|
for page in range(0, 100, 20):
|
|
44
48
|
api_url = f'https://{dd}/index/search/keyword/{title}/qualities/{",".join(qualities)}/from/{page}/search'
|
|
45
49
|
|
|
46
|
-
|
|
50
|
+
r = dd_session.get(api_url, headers=headers, timeout=10)
|
|
51
|
+
r.raise_for_status()
|
|
52
|
+
releases_on_page = r.json()
|
|
47
53
|
if releases_on_page:
|
|
48
54
|
release_list.extend(releases_on_page)
|
|
49
55
|
|
|
@@ -75,9 +81,11 @@ def get_dd_download_links(shared_state, url, mirror, title, password):
|
|
|
75
81
|
break
|
|
76
82
|
except Exception as e:
|
|
77
83
|
info(f"Error parsing DD download: {e}")
|
|
84
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
78
85
|
continue
|
|
79
86
|
|
|
80
87
|
except Exception as e:
|
|
81
88
|
info(f"Error loading DD download: {e}")
|
|
89
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
82
90
|
|
|
83
91
|
return {"links": links}
|
quasarr/downloads/sources/dl.py
CHANGED
|
@@ -6,6 +6,7 @@ import re
|
|
|
6
6
|
|
|
7
7
|
from bs4 import BeautifulSoup, NavigableString
|
|
8
8
|
|
|
9
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
9
10
|
from quasarr.providers.log import info, debug
|
|
10
11
|
from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
|
|
11
12
|
from quasarr.providers.utils import generate_status_url, check_links_online_status
|
|
@@ -313,6 +314,7 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
|
|
|
313
314
|
sess = retrieve_and_validate_session(shared_state)
|
|
314
315
|
if not sess:
|
|
315
316
|
info(f"Could not retrieve valid session for {host}")
|
|
317
|
+
mark_hostname_issue(hostname, "download", "Session error")
|
|
316
318
|
return {"links": [], "password": ""}
|
|
317
319
|
|
|
318
320
|
try:
|
|
@@ -376,5 +378,6 @@ def get_dl_download_links(shared_state, url, mirror, title, password):
|
|
|
376
378
|
|
|
377
379
|
except Exception as e:
|
|
378
380
|
info(f"Error extracting download links from {url}: {e}")
|
|
381
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
379
382
|
invalidate_session(shared_state)
|
|
380
383
|
return {"links": [], "password": ""}
|
quasarr/downloads/sources/dt.py
CHANGED
|
@@ -8,19 +8,22 @@ from urllib.parse import urlparse
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
|
|
11
12
|
from quasarr.providers.log import info
|
|
12
13
|
|
|
14
|
+
hostname = "dt"
|
|
15
|
+
|
|
13
16
|
|
|
14
17
|
def derive_mirror_from_url(url):
|
|
15
18
|
"""Extract hoster name from URL hostname."""
|
|
16
19
|
try:
|
|
17
|
-
|
|
18
|
-
if
|
|
19
|
-
|
|
20
|
-
parts =
|
|
20
|
+
mirror_hostname = urlparse(url).netloc.lower()
|
|
21
|
+
if mirror_hostname.startswith('www.'):
|
|
22
|
+
mirror_hostname = mirror_hostname[4:]
|
|
23
|
+
parts = mirror_hostname.split('.')
|
|
21
24
|
if len(parts) >= 2:
|
|
22
25
|
return parts[-2]
|
|
23
|
-
return
|
|
26
|
+
return mirror_hostname
|
|
24
27
|
except:
|
|
25
28
|
return "unknown"
|
|
26
29
|
|
|
@@ -36,23 +39,27 @@ def get_dt_download_links(shared_state, url, mirror, title, password):
|
|
|
36
39
|
session = requests.Session()
|
|
37
40
|
|
|
38
41
|
try:
|
|
39
|
-
|
|
40
|
-
|
|
42
|
+
r = session.get(url, headers=headers, timeout=10)
|
|
43
|
+
r.raise_for_status()
|
|
44
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
41
45
|
|
|
42
46
|
article = soup.find("article")
|
|
43
47
|
if not article:
|
|
44
48
|
info(f"Could not find article block on DT page for {title}")
|
|
49
|
+
mark_hostname_issue(hostname, "download", "Could not find article block")
|
|
45
50
|
return None
|
|
46
51
|
|
|
47
52
|
body = article.find("div", class_="card-body")
|
|
48
53
|
if not body:
|
|
49
54
|
info(f"Could not find download section for {title}")
|
|
55
|
+
mark_hostname_issue(hostname, "download", "Could not find download section")
|
|
50
56
|
return None
|
|
51
57
|
|
|
52
58
|
anchors = body.find_all("a", href=True)
|
|
53
59
|
|
|
54
60
|
except Exception as e:
|
|
55
61
|
info(f"DT site has been updated. Grabbing download links for {title} not possible! ({e})")
|
|
62
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
56
63
|
return None
|
|
57
64
|
|
|
58
65
|
filtered = []
|
|
@@ -85,4 +92,6 @@ def get_dt_download_links(shared_state, url, mirror, title, password):
|
|
|
85
92
|
mirror_name = derive_mirror_from_url(u)
|
|
86
93
|
filtered.append([u, mirror_name])
|
|
87
94
|
|
|
95
|
+
if filtered:
|
|
96
|
+
clear_hostname_issue(hostname)
|
|
88
97
|
return {"links": filtered} if filtered else None
|
quasarr/downloads/sources/dw.py
CHANGED
|
@@ -7,8 +7,11 @@ import re
|
|
|
7
7
|
import requests
|
|
8
8
|
from bs4 import BeautifulSoup
|
|
9
9
|
|
|
10
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
|
|
10
11
|
from quasarr.providers.log import info, debug
|
|
11
12
|
|
|
13
|
+
hostname = "dw"
|
|
14
|
+
|
|
12
15
|
|
|
13
16
|
def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
14
17
|
"""
|
|
@@ -27,11 +30,13 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
27
30
|
session = requests.Session()
|
|
28
31
|
|
|
29
32
|
try:
|
|
30
|
-
|
|
31
|
-
|
|
33
|
+
r = session.get(url, headers=headers, timeout=10)
|
|
34
|
+
r.raise_for_status()
|
|
35
|
+
content = BeautifulSoup(r.text, "html.parser")
|
|
32
36
|
download_buttons = content.find_all("button", {"class": "show_link"})
|
|
33
|
-
except:
|
|
37
|
+
except Exception as e:
|
|
34
38
|
info(f"DW site has been updated. Grabbing download links for {title} not possible!")
|
|
39
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
35
40
|
return {"links": []}
|
|
36
41
|
|
|
37
42
|
download_links = []
|
|
@@ -43,19 +48,17 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
43
48
|
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
|
|
44
49
|
}
|
|
45
50
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
response = response.json()
|
|
52
|
-
link = response["data"].split(",")[0]
|
|
51
|
+
r = session.post(ajax_url, payload, headers=headers, timeout=10)
|
|
52
|
+
r.raise_for_status()
|
|
53
|
+
|
|
54
|
+
response = r.json()
|
|
55
|
+
link = response["data"].split(",")[0]
|
|
53
56
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
57
|
+
if dw in link:
|
|
58
|
+
match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link)
|
|
59
|
+
if match:
|
|
60
|
+
link = (f'https://filecrypt.cc/Container/{match.group(1)}'
|
|
61
|
+
f'.html{match.group(2) if match.group(2) else ""}')
|
|
59
62
|
|
|
60
63
|
hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
|
|
61
64
|
hoster = f"1fichier" if hoster.startswith("fichier") else hoster # align with expected mirror name
|
|
@@ -64,8 +67,10 @@ def get_dw_download_links(shared_state, url, mirror, title, password):
|
|
|
64
67
|
continue
|
|
65
68
|
|
|
66
69
|
download_links.append([link, hoster])
|
|
67
|
-
except:
|
|
70
|
+
except Exception as e:
|
|
68
71
|
info(f"DW site has been updated. Parsing download links for {title} not possible!")
|
|
69
|
-
|
|
72
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
70
73
|
|
|
74
|
+
if download_links:
|
|
75
|
+
clear_hostname_issue(hostname)
|
|
71
76
|
return {"links": download_links}
|
quasarr/downloads/sources/he.py
CHANGED
|
@@ -8,6 +8,7 @@ from urllib.parse import urlparse, urljoin
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
11
12
|
from quasarr.providers.log import info, debug
|
|
12
13
|
|
|
13
14
|
hostname = "he"
|
|
@@ -27,10 +28,12 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
27
28
|
session = requests.Session()
|
|
28
29
|
|
|
29
30
|
try:
|
|
30
|
-
|
|
31
|
-
|
|
31
|
+
r = session.get(url, headers=headers, timeout=10)
|
|
32
|
+
r.raise_for_status()
|
|
33
|
+
soup = BeautifulSoup(r.text, 'html.parser')
|
|
32
34
|
except Exception as e:
|
|
33
35
|
info(f"{hostname}: could not fetch release for {title}: {e}")
|
|
36
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
34
37
|
return {"links": [], "imdb_id": None}
|
|
35
38
|
|
|
36
39
|
imdb_id = None
|
|
@@ -55,7 +58,7 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
55
58
|
return {"links": [], "imdb_id": None}
|
|
56
59
|
|
|
57
60
|
action = form.get('action') or url
|
|
58
|
-
action_url = urljoin(
|
|
61
|
+
action_url = urljoin(r.url, action)
|
|
59
62
|
|
|
60
63
|
payload = {}
|
|
61
64
|
for inp in form.find_all('input'):
|
|
@@ -76,12 +79,14 @@ def get_he_download_links(shared_state, url, mirror, title, password):
|
|
|
76
79
|
payload[m.group('key')] = m.group('val')
|
|
77
80
|
|
|
78
81
|
post_headers = headers.copy()
|
|
79
|
-
post_headers.update({'Referer':
|
|
82
|
+
post_headers.update({'Referer': r.url})
|
|
80
83
|
try:
|
|
81
|
-
|
|
82
|
-
|
|
84
|
+
r = session.post(action_url, data=payload, headers=post_headers, timeout=10)
|
|
85
|
+
r.raise_for_status()
|
|
86
|
+
soup = BeautifulSoup(r.text, 'html.parser')
|
|
83
87
|
except Exception as e:
|
|
84
88
|
info(f"{hostname}: could not submit protector form for {title}: {e}")
|
|
89
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
85
90
|
break
|
|
86
91
|
|
|
87
92
|
unlocked = soup.select('.content-protector-access-form')
|
quasarr/downloads/sources/mb.py
CHANGED
|
@@ -7,8 +7,11 @@ import re
|
|
|
7
7
|
import requests
|
|
8
8
|
from bs4 import BeautifulSoup
|
|
9
9
|
|
|
10
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
|
|
10
11
|
from quasarr.providers.log import info, debug
|
|
11
12
|
|
|
13
|
+
hostname = "mb"
|
|
14
|
+
|
|
12
15
|
|
|
13
16
|
def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
14
17
|
"""
|
|
@@ -22,13 +25,14 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
|
22
25
|
}
|
|
23
26
|
|
|
24
27
|
try:
|
|
25
|
-
|
|
26
|
-
|
|
28
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
29
|
+
r.raise_for_status()
|
|
27
30
|
except Exception as e:
|
|
28
31
|
info(f"Failed to fetch page for {title or url}: {e}")
|
|
32
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
29
33
|
return {"links": []}
|
|
30
34
|
|
|
31
|
-
soup = BeautifulSoup(
|
|
35
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
32
36
|
|
|
33
37
|
download_links = []
|
|
34
38
|
|
|
@@ -48,6 +52,8 @@ def get_mb_download_links(shared_state, url, mirror, title, password):
|
|
|
48
52
|
|
|
49
53
|
if not download_links:
|
|
50
54
|
info(f"No download links found for {title}. Site structure may have changed. - {url}")
|
|
55
|
+
mark_hostname_issue(hostname, "download", "No download links found - site structure may have changed")
|
|
51
56
|
return {"links": []}
|
|
52
57
|
|
|
58
|
+
clear_hostname_issue(hostname)
|
|
53
59
|
return {"links": download_links}
|
quasarr/downloads/sources/nk.py
CHANGED
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
import requests
|
|
6
6
|
from bs4 import BeautifulSoup
|
|
7
7
|
|
|
8
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
8
9
|
from quasarr.providers.log import info
|
|
9
10
|
|
|
10
11
|
hostname = "nk"
|
|
@@ -26,10 +27,12 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
26
27
|
session = requests.Session()
|
|
27
28
|
|
|
28
29
|
try:
|
|
29
|
-
|
|
30
|
-
|
|
30
|
+
r = session.get(url, headers=headers, timeout=10)
|
|
31
|
+
r.raise_for_status()
|
|
32
|
+
soup = BeautifulSoup(r.text, 'html.parser')
|
|
31
33
|
except Exception as e:
|
|
32
34
|
info(f"{hostname}: could not fetch release page for {title}: {e}")
|
|
35
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
33
36
|
return {"links": []}
|
|
34
37
|
|
|
35
38
|
anchors = soup.select('a.btn-orange')
|
|
@@ -47,9 +50,12 @@ def get_nk_download_links(shared_state, url, mirror, title, password):
|
|
|
47
50
|
href = 'https://' + host + href
|
|
48
51
|
|
|
49
52
|
try:
|
|
50
|
-
|
|
53
|
+
r = requests.head(href, headers=headers, allow_redirects=True, timeout=10)
|
|
54
|
+
r.raise_for_status()
|
|
55
|
+
href = r.url
|
|
51
56
|
except Exception as e:
|
|
52
57
|
info(f"{hostname}: could not resolve download link for {title}: {e}")
|
|
58
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
53
59
|
continue
|
|
54
60
|
|
|
55
61
|
candidates.append([href, mirror])
|
quasarr/downloads/sources/nx.py
CHANGED
|
@@ -7,9 +7,12 @@ from urllib.parse import urlparse
|
|
|
7
7
|
|
|
8
8
|
import requests
|
|
9
9
|
|
|
10
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
10
11
|
from quasarr.providers.log import info
|
|
11
12
|
from quasarr.providers.sessions.nx import retrieve_and_validate_session
|
|
12
13
|
|
|
14
|
+
hostname = "nx"
|
|
15
|
+
|
|
13
16
|
|
|
14
17
|
def derive_mirror_from_url(url):
|
|
15
18
|
"""Extract hoster name from URL hostname."""
|
|
@@ -39,11 +42,10 @@ def get_filer_folder_links_via_api(shared_state, url):
|
|
|
39
42
|
folder_hash = m.group(1)
|
|
40
43
|
api_url = f"https://filer.net/api/folder/{folder_hash}"
|
|
41
44
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
return url
|
|
45
|
+
r = requests.get(api_url, headers=headers, timeout=10)
|
|
46
|
+
r.raise_for_status()
|
|
45
47
|
|
|
46
|
-
data =
|
|
48
|
+
data = r.json()
|
|
47
49
|
files = data.get("files", [])
|
|
48
50
|
links = []
|
|
49
51
|
|
|
@@ -75,6 +77,7 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
|
|
|
75
77
|
nx_session = retrieve_and_validate_session(shared_state)
|
|
76
78
|
if not nx_session:
|
|
77
79
|
info(f"Could not retrieve valid session for {nx}")
|
|
80
|
+
mark_hostname_issue(hostname, "download", "Session error")
|
|
78
81
|
return {"links": []}
|
|
79
82
|
|
|
80
83
|
headers = {
|
|
@@ -87,23 +90,24 @@ def get_nx_download_links(shared_state, url, mirror, title, password):
|
|
|
87
90
|
url_segments = url.split('/')
|
|
88
91
|
payload_url = '/'.join(url_segments[:-2]) + '/api/getLinks/' + url_segments[-1]
|
|
89
92
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
93
|
+
try:
|
|
94
|
+
r = nx_session.post(payload_url,
|
|
95
|
+
headers=headers,
|
|
96
|
+
json=json_data,
|
|
97
|
+
timeout=10
|
|
98
|
+
)
|
|
99
|
+
r.raise_for_status()
|
|
100
|
+
|
|
101
|
+
payload = r.json()
|
|
102
|
+
except Exception as e:
|
|
103
|
+
info(f"Could not get NX Links: {e}")
|
|
104
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
105
|
+
return {"links": []}
|
|
103
106
|
|
|
104
107
|
if payload and any(key in payload for key in ("err", "error")):
|
|
105
108
|
error_msg = payload.get("err") or payload.get("error")
|
|
106
109
|
info(f"Error decrypting {title!r} URL: {url!r} - {error_msg}")
|
|
110
|
+
mark_hostname_issue(hostname, "download", "Download error")
|
|
107
111
|
shared_state.values["database"]("sessions").delete("nx")
|
|
108
112
|
return {"links": []}
|
|
109
113
|
|
quasarr/downloads/sources/sf.py
CHANGED
|
@@ -8,9 +8,12 @@ from datetime import datetime
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
11
12
|
from quasarr.providers.log import info, debug
|
|
12
13
|
from quasarr.search.sources.sf import parse_mirrors
|
|
13
14
|
|
|
15
|
+
hostname = "sf"
|
|
16
|
+
|
|
14
17
|
|
|
15
18
|
def is_last_section_integer(url):
|
|
16
19
|
last_section = url.rstrip('/').split('/')[-1]
|
|
@@ -22,19 +25,20 @@ def is_last_section_integer(url):
|
|
|
22
25
|
def resolve_sf_redirect(url, user_agent):
|
|
23
26
|
"""Follow redirects and return final URL or None if 404."""
|
|
24
27
|
try:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
if
|
|
28
|
-
for resp in
|
|
29
|
-
debug(f"Redirected from {resp.url} to {
|
|
30
|
-
if "/404.html" in
|
|
31
|
-
info(f"SF link redirected to 404 page: {
|
|
28
|
+
r = requests.get(url, allow_redirects=True, timeout=10, headers={'User-Agent': user_agent})
|
|
29
|
+
r.raise_for_status()
|
|
30
|
+
if r.history:
|
|
31
|
+
for resp in r.history:
|
|
32
|
+
debug(f"Redirected from {resp.url} to {r.url}")
|
|
33
|
+
if "/404.html" in r.url:
|
|
34
|
+
info(f"SF link redirected to 404 page: {r.url}")
|
|
32
35
|
return None
|
|
33
|
-
return
|
|
36
|
+
return r.url
|
|
34
37
|
else:
|
|
35
38
|
info(f"SF blocked attempt to resolve {url}. Your IP may be banned. Try again later.")
|
|
36
39
|
except Exception as e:
|
|
37
40
|
info(f"Error fetching redirected URL for {url}: {e}")
|
|
41
|
+
mark_hostname_issue(hostname, "download", str(e) if "e" in dir() else "Download error")
|
|
38
42
|
return None
|
|
39
43
|
|
|
40
44
|
|
|
@@ -84,7 +88,9 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
84
88
|
season = "ALL"
|
|
85
89
|
|
|
86
90
|
headers = {'User-Agent': user_agent}
|
|
87
|
-
|
|
91
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
92
|
+
r.raise_for_status()
|
|
93
|
+
series_page = r.text
|
|
88
94
|
soup = BeautifulSoup(series_page, "html.parser")
|
|
89
95
|
|
|
90
96
|
# Extract IMDb id if present
|
|
@@ -100,14 +106,16 @@ def get_sf_download_links(shared_state, url, mirror, title, password):
|
|
|
100
106
|
epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
|
|
101
107
|
api_url = 'https://' + sf + '/api/v1/' + season_id + f'/season/{season}?lang=ALL&_=' + epoch
|
|
102
108
|
|
|
103
|
-
|
|
109
|
+
r = requests.get(api_url, headers=headers, timeout=10)
|
|
110
|
+
r.raise_for_status()
|
|
104
111
|
try:
|
|
105
|
-
data =
|
|
112
|
+
data = r.json()["html"]
|
|
106
113
|
except ValueError:
|
|
107
114
|
epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
|
|
108
115
|
api_url = 'https://' + sf + '/api/v1/' + season_id + f'/season/ALL?lang=ALL&_=' + epoch
|
|
109
|
-
|
|
110
|
-
|
|
116
|
+
r = requests.get(api_url, headers=headers, timeout=10)
|
|
117
|
+
r.raise_for_status()
|
|
118
|
+
data = r.json()["html"]
|
|
111
119
|
|
|
112
120
|
content = BeautifulSoup(data, "html.parser")
|
|
113
121
|
items = content.find_all("h3")
|
quasarr/downloads/sources/sl.py
CHANGED
|
@@ -8,8 +8,10 @@ from urllib.parse import urlparse
|
|
|
8
8
|
import requests
|
|
9
9
|
from bs4 import BeautifulSoup
|
|
10
10
|
|
|
11
|
+
from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
|
|
11
12
|
from quasarr.providers.log import info, debug
|
|
12
13
|
|
|
14
|
+
hostname = "sl"
|
|
13
15
|
supported_mirrors = ["nitroflare", "ddownload"]
|
|
14
16
|
|
|
15
17
|
|
|
@@ -31,12 +33,15 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
31
33
|
session = requests.Session()
|
|
32
34
|
|
|
33
35
|
try:
|
|
34
|
-
|
|
35
|
-
|
|
36
|
+
r = session.get(url, headers=headers, timeout=10)
|
|
37
|
+
r.raise_for_status()
|
|
38
|
+
|
|
39
|
+
soup = BeautifulSoup(r.text, "html.parser")
|
|
36
40
|
|
|
37
41
|
entry = soup.find("div", class_="entry")
|
|
38
42
|
if not entry:
|
|
39
43
|
info(f"Could not find main content section for {title}")
|
|
44
|
+
mark_hostname_issue(hostname, "download", "Could not find main content section")
|
|
40
45
|
return {"links": [], "imdb_id": None}
|
|
41
46
|
|
|
42
47
|
imdb_id = None
|
|
@@ -62,6 +67,7 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
62
67
|
|
|
63
68
|
except Exception as e:
|
|
64
69
|
info(f"SL site has been updated. Grabbing download links for {title} not possible! ({e})")
|
|
70
|
+
mark_hostname_issue(hostname, "download", str(e))
|
|
65
71
|
return {"links": [], "imdb_id": None}
|
|
66
72
|
|
|
67
73
|
filtered = []
|
|
@@ -97,6 +103,8 @@ def get_sl_download_links(shared_state, url, mirror, title, password):
|
|
|
97
103
|
mirror_name = derive_mirror_from_host(host)
|
|
98
104
|
filtered.append([u, mirror_name])
|
|
99
105
|
|
|
106
|
+
if filtered:
|
|
107
|
+
clear_hostname_issue(hostname)
|
|
100
108
|
return {
|
|
101
109
|
"links": filtered,
|
|
102
110
|
"imdb_id": imdb_id,
|