quasarr 1.18.0__py3-none-any.whl → 1.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/api/captcha/__init__.py +157 -16
- quasarr/downloads/__init__.py +9 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/nk.py +3 -7
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/providers/html_images.py +3 -0
- quasarr/providers/html_templates.py +1 -1
- quasarr/providers/obfuscated.py +18 -1
- quasarr/providers/version.py +1 -1
- quasarr/search/__init__.py +12 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/nk.py +6 -7
- quasarr/search/sources/sj.py +213 -0
- quasarr/storage/config.py +3 -0
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/METADATA +1 -1
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/RECORD +22 -16
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/WHEEL +0 -0
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/entry_points.txt +0 -0
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/licenses/LICENSE +0 -0
- {quasarr-1.18.0.dist-info → quasarr-1.20.0.dist-info}/top_level.txt +0 -0
quasarr/search/__init__.py
CHANGED
|
@@ -9,13 +9,16 @@ from quasarr.providers.log import info, debug
|
|
|
9
9
|
from quasarr.search.sources.al import al_feed, al_search
|
|
10
10
|
from quasarr.search.sources.by import by_feed, by_search
|
|
11
11
|
from quasarr.search.sources.dd import dd_search, dd_feed
|
|
12
|
+
from quasarr.search.sources.dj import dj_search, dj_feed
|
|
12
13
|
from quasarr.search.sources.dt import dt_feed, dt_search
|
|
13
14
|
from quasarr.search.sources.dw import dw_feed, dw_search
|
|
14
15
|
from quasarr.search.sources.fx import fx_feed, fx_search
|
|
16
|
+
from quasarr.search.sources.he import he_feed, he_search
|
|
15
17
|
from quasarr.search.sources.mb import mb_feed, mb_search
|
|
16
18
|
from quasarr.search.sources.nk import nk_feed, nk_search
|
|
17
19
|
from quasarr.search.sources.nx import nx_feed, nx_search
|
|
18
20
|
from quasarr.search.sources.sf import sf_feed, sf_search
|
|
21
|
+
from quasarr.search.sources.sj import sj_search, sj_feed
|
|
19
22
|
from quasarr.search.sources.sl import sl_feed, sl_search
|
|
20
23
|
from quasarr.search.sources.wd import wd_feed, wd_search
|
|
21
24
|
|
|
@@ -32,12 +35,15 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
|
|
|
32
35
|
by = shared_state.values["config"]("Hostnames").get("by")
|
|
33
36
|
dd = shared_state.values["config"]("Hostnames").get("dd")
|
|
34
37
|
dt = shared_state.values["config"]("Hostnames").get("dt")
|
|
38
|
+
dj = shared_state.values["config"]("Hostnames").get("dj")
|
|
35
39
|
dw = shared_state.values["config"]("Hostnames").get("dw")
|
|
36
40
|
fx = shared_state.values["config"]("Hostnames").get("fx")
|
|
41
|
+
he = shared_state.values["config"]("Hostnames").get("he")
|
|
37
42
|
mb = shared_state.values["config"]("Hostnames").get("mb")
|
|
38
43
|
nk = shared_state.values["config"]("Hostnames").get("nk")
|
|
39
44
|
nx = shared_state.values["config"]("Hostnames").get("nx")
|
|
40
45
|
sf = shared_state.values["config"]("Hostnames").get("sf")
|
|
46
|
+
sj = shared_state.values["config"]("Hostnames").get("sj")
|
|
41
47
|
sl = shared_state.values["config"]("Hostnames").get("sl")
|
|
42
48
|
wd = shared_state.values["config"]("Hostnames").get("wd")
|
|
43
49
|
|
|
@@ -51,12 +57,15 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
|
|
|
51
57
|
(by, by_search),
|
|
52
58
|
(dd, dd_search),
|
|
53
59
|
(dt, dt_search),
|
|
60
|
+
(dj, dj_search),
|
|
54
61
|
(dw, dw_search),
|
|
55
62
|
(fx, fx_search),
|
|
63
|
+
(he, he_search),
|
|
56
64
|
(mb, mb_search),
|
|
57
65
|
(nk, nk_search),
|
|
58
66
|
(nx, nx_search),
|
|
59
67
|
(sf, sf_search),
|
|
68
|
+
(sj, sj_search),
|
|
60
69
|
(sl, sl_search),
|
|
61
70
|
(wd, wd_search),
|
|
62
71
|
]
|
|
@@ -75,13 +84,16 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
|
|
|
75
84
|
(al, al_feed),
|
|
76
85
|
(by, by_feed),
|
|
77
86
|
(dd, dd_feed),
|
|
87
|
+
(dj, dj_feed),
|
|
78
88
|
(dt, dt_feed),
|
|
79
89
|
(dw, dw_feed),
|
|
80
90
|
(fx, fx_feed),
|
|
91
|
+
(he, he_feed),
|
|
81
92
|
(mb, mb_feed),
|
|
82
93
|
(nk, nk_feed),
|
|
83
94
|
(nx, nx_feed),
|
|
84
95
|
(sf, sf_feed),
|
|
96
|
+
(sj, sj_feed),
|
|
85
97
|
(sl, sl_feed),
|
|
86
98
|
(wd, wd_feed),
|
|
87
99
|
]
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from base64 import urlsafe_b64encode
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
from bs4 import BeautifulSoup
|
|
13
|
+
|
|
14
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
+
from quasarr.providers.log import info, debug
|
|
16
|
+
|
|
17
|
+
hostname = "dj"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def convert_to_rss_date(date_str):
|
|
21
|
+
try:
|
|
22
|
+
return datetime.fromisoformat(
|
|
23
|
+
date_str.replace("Z", "+00:00")
|
|
24
|
+
).strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
25
|
+
except Exception:
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def dj_feed(shared_state, start_time, request_from, mirror=None):
|
|
30
|
+
releases = []
|
|
31
|
+
|
|
32
|
+
if "sonarr" not in request_from.lower():
|
|
33
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
34
|
+
return releases
|
|
35
|
+
|
|
36
|
+
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
37
|
+
password = sj_host
|
|
38
|
+
|
|
39
|
+
url = f"https://{sj_host}/api/releases/latest/0"
|
|
40
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
44
|
+
data = json.loads(r.content)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
info(f"{hostname.upper()}: feed load error: {e}")
|
|
47
|
+
return releases
|
|
48
|
+
|
|
49
|
+
for release in data:
|
|
50
|
+
try:
|
|
51
|
+
title = release.get("name").rstrip(".")
|
|
52
|
+
if not title:
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
published = convert_to_rss_date(release.get("createdAt"))
|
|
56
|
+
if not published:
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
media = release.get("_media", {})
|
|
60
|
+
slug = media.get("slug")
|
|
61
|
+
if not slug:
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
series_url = f"https://{sj_host}/serie/{slug}"
|
|
65
|
+
|
|
66
|
+
mb = 0
|
|
67
|
+
size = 0
|
|
68
|
+
imdb_id = None
|
|
69
|
+
|
|
70
|
+
payload = urlsafe_b64encode(
|
|
71
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
72
|
+
).decode("utf-8")
|
|
73
|
+
|
|
74
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
75
|
+
|
|
76
|
+
releases.append({
|
|
77
|
+
"details": {
|
|
78
|
+
"title": title,
|
|
79
|
+
"hostname": hostname,
|
|
80
|
+
"imdb_id": imdb_id,
|
|
81
|
+
"link": link,
|
|
82
|
+
"mirror": mirror,
|
|
83
|
+
"size": size,
|
|
84
|
+
"date": published,
|
|
85
|
+
"source": series_url
|
|
86
|
+
},
|
|
87
|
+
"type": "protected"
|
|
88
|
+
})
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
debug(f"{hostname.upper()}: feed parse error: {e}")
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
95
|
+
return releases
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def dj_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
99
|
+
releases = []
|
|
100
|
+
|
|
101
|
+
if "sonarr" not in request_from.lower():
|
|
102
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
103
|
+
return releases
|
|
104
|
+
|
|
105
|
+
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
106
|
+
password = sj_host
|
|
107
|
+
|
|
108
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
109
|
+
if not imdb_id:
|
|
110
|
+
return releases
|
|
111
|
+
|
|
112
|
+
localized_title = get_localized_title(shared_state, imdb_id, "de")
|
|
113
|
+
if not localized_title:
|
|
114
|
+
info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
|
|
115
|
+
return releases
|
|
116
|
+
|
|
117
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
118
|
+
search_url = f"https://{sj_host}/serie/search"
|
|
119
|
+
params = {"q": localized_title}
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
r = requests.get(search_url, headers=headers, params=params, timeout=10)
|
|
123
|
+
soup = BeautifulSoup(r.content, "html.parser")
|
|
124
|
+
results = soup.find_all("a", href=re.compile(r"^/serie/"))
|
|
125
|
+
except Exception as e:
|
|
126
|
+
info(f"{hostname.upper()}: search load error: {e}")
|
|
127
|
+
return releases
|
|
128
|
+
|
|
129
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
|
|
130
|
+
sanitized_search_string = shared_state.sanitize_string(localized_title)
|
|
131
|
+
|
|
132
|
+
for result in results:
|
|
133
|
+
try:
|
|
134
|
+
result_title = result.get_text(strip=True)
|
|
135
|
+
|
|
136
|
+
sanitized_title = shared_state.sanitize_string(result_title)
|
|
137
|
+
|
|
138
|
+
if not re.search(
|
|
139
|
+
rf"\b{re.escape(sanitized_search_string)}\b",
|
|
140
|
+
sanitized_title
|
|
141
|
+
):
|
|
142
|
+
debug(
|
|
143
|
+
f"Search string '{localized_title}' doesn't match '{result_title}'"
|
|
144
|
+
)
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
debug(
|
|
148
|
+
f"Matched search string '{localized_title}' with result '{result_title}'"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
series_url = f"https://{sj_host}{result['href']}"
|
|
152
|
+
|
|
153
|
+
r = requests.get(series_url, headers=headers, timeout=10)
|
|
154
|
+
media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
|
|
155
|
+
if not media_id_match:
|
|
156
|
+
debug(f"{hostname.upper()}: no media id for {result_title}")
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
media_id = media_id_match.group(1)
|
|
160
|
+
api_url = f"https://{sj_host}/api/media/{media_id}/releases"
|
|
161
|
+
|
|
162
|
+
r = requests.get(api_url, headers=headers, timeout=10)
|
|
163
|
+
data = json.loads(r.content)
|
|
164
|
+
|
|
165
|
+
for season_block in data.values():
|
|
166
|
+
for item in season_block.get("items", []):
|
|
167
|
+
title = item.get("name").rstrip(".")
|
|
168
|
+
if not title:
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
if not shared_state.is_valid_release(
|
|
172
|
+
title,
|
|
173
|
+
request_from,
|
|
174
|
+
search_string,
|
|
175
|
+
season,
|
|
176
|
+
episode
|
|
177
|
+
):
|
|
178
|
+
continue
|
|
179
|
+
|
|
180
|
+
published = convert_to_rss_date(item.get("createdAt"))
|
|
181
|
+
if not published:
|
|
182
|
+
debug(f"{hostname.upper()}: no published date for {title}")
|
|
183
|
+
published = one_hour_ago
|
|
184
|
+
|
|
185
|
+
mb = 0
|
|
186
|
+
size = 0
|
|
187
|
+
|
|
188
|
+
payload = urlsafe_b64encode(
|
|
189
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
190
|
+
).decode("utf-8")
|
|
191
|
+
|
|
192
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
193
|
+
|
|
194
|
+
releases.append({
|
|
195
|
+
"details": {
|
|
196
|
+
"title": title,
|
|
197
|
+
"hostname": hostname,
|
|
198
|
+
"imdb_id": imdb_id,
|
|
199
|
+
"link": link,
|
|
200
|
+
"mirror": mirror,
|
|
201
|
+
"size": size,
|
|
202
|
+
"date": published,
|
|
203
|
+
"source": series_url
|
|
204
|
+
},
|
|
205
|
+
"type": "protected"
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
debug(f"{hostname.upper()}: search parse error: {e}")
|
|
210
|
+
continue
|
|
211
|
+
|
|
212
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
213
|
+
return releases
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from html import unescape
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
from bs4 import BeautifulSoup
|
|
13
|
+
|
|
14
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
+
from quasarr.providers.log import info, debug
|
|
16
|
+
|
|
17
|
+
hostname = "he"
|
|
18
|
+
supported_mirrors = ["rapidgator", "nitroflare"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_posted_ago(txt):
|
|
22
|
+
try:
|
|
23
|
+
m = re.search(r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE)
|
|
24
|
+
if not m:
|
|
25
|
+
return ''
|
|
26
|
+
value = int(m.group(1))
|
|
27
|
+
unit = m.group(2).lower()
|
|
28
|
+
now = datetime.utcnow()
|
|
29
|
+
if unit.startswith('sec'):
|
|
30
|
+
delta = timedelta(seconds=value)
|
|
31
|
+
elif unit.startswith('min'):
|
|
32
|
+
delta = timedelta(minutes=value)
|
|
33
|
+
elif unit.startswith('hour'):
|
|
34
|
+
delta = timedelta(hours=value)
|
|
35
|
+
elif unit.startswith('day'):
|
|
36
|
+
delta = timedelta(days=value)
|
|
37
|
+
elif unit.startswith('week'):
|
|
38
|
+
delta = timedelta(weeks=value)
|
|
39
|
+
elif unit.startswith('month'):
|
|
40
|
+
delta = timedelta(days=30 * value)
|
|
41
|
+
else:
|
|
42
|
+
delta = timedelta(days=365 * value)
|
|
43
|
+
return (datetime.utcnow() - delta).strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
44
|
+
except Exception:
|
|
45
|
+
return ''
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def extract_size(text: str) -> dict:
|
|
49
|
+
match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
|
|
50
|
+
if match:
|
|
51
|
+
size = match.group(1).replace(',', '.')
|
|
52
|
+
unit = match.group(2)
|
|
53
|
+
return {"size": size, "sizeunit": unit}
|
|
54
|
+
return {"size": "0", "sizeunit": "MB"}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def he_feed(*args, **kwargs):
|
|
58
|
+
return he_search(*args, **kwargs)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def he_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
|
|
62
|
+
releases = []
|
|
63
|
+
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
64
|
+
|
|
65
|
+
if not "arr" in request_from.lower():
|
|
66
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
67
|
+
return releases
|
|
68
|
+
|
|
69
|
+
if "radarr" in request_from.lower():
|
|
70
|
+
tag = "movies"
|
|
71
|
+
else:
|
|
72
|
+
tag = "tv-shows"
|
|
73
|
+
|
|
74
|
+
if mirror and mirror not in supported_mirrors:
|
|
75
|
+
debug(f'Mirror "{mirror}" not supported by {hostname}.')
|
|
76
|
+
return releases
|
|
77
|
+
|
|
78
|
+
source_search = ""
|
|
79
|
+
if search_string != "":
|
|
80
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
81
|
+
if imdb_id:
|
|
82
|
+
local_title = get_localized_title(shared_state, imdb_id, 'en')
|
|
83
|
+
if not local_title:
|
|
84
|
+
info(f"{hostname}: no title for IMDb {imdb_id}")
|
|
85
|
+
return releases
|
|
86
|
+
source_search = local_title
|
|
87
|
+
else:
|
|
88
|
+
return releases
|
|
89
|
+
source_search = unescape(source_search)
|
|
90
|
+
else:
|
|
91
|
+
imdb_id = None
|
|
92
|
+
|
|
93
|
+
url = f'https://{host}/tag/{tag}/'
|
|
94
|
+
|
|
95
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
96
|
+
params = {"s": source_search}
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
r = requests.get(url, headers=headers, params=params, timeout=10)
|
|
100
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
101
|
+
results = soup.find_all('div', class_='item')
|
|
102
|
+
except Exception as e:
|
|
103
|
+
info(f"{hostname}: search load error: {e}")
|
|
104
|
+
return releases
|
|
105
|
+
|
|
106
|
+
if not results:
|
|
107
|
+
return releases
|
|
108
|
+
|
|
109
|
+
for result in results:
|
|
110
|
+
try:
|
|
111
|
+
data = result.find('div', class_='data')
|
|
112
|
+
if not data:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
headline = data.find('h5')
|
|
116
|
+
if not headline:
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
a = headline.find('a', href=True)
|
|
120
|
+
if not a:
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
source = a['href'].strip()
|
|
124
|
+
|
|
125
|
+
head_title = a.get_text(strip=True)
|
|
126
|
+
if not head_title:
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
head_split = head_title.split(" – ")
|
|
130
|
+
title = head_split[0].strip()
|
|
131
|
+
|
|
132
|
+
if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
|
|
133
|
+
continue
|
|
134
|
+
|
|
135
|
+
size_item = extract_size(head_split[1].strip())
|
|
136
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
137
|
+
|
|
138
|
+
size = mb * 1024 * 1024
|
|
139
|
+
|
|
140
|
+
published = None
|
|
141
|
+
p_meta = data.find('p', class_='meta')
|
|
142
|
+
if p_meta:
|
|
143
|
+
posted_span = None
|
|
144
|
+
for sp in p_meta.find_all('span'):
|
|
145
|
+
txt = sp.get_text(' ', strip=True)
|
|
146
|
+
if txt.lower().startswith('posted') or 'ago' in txt.lower():
|
|
147
|
+
posted_span = txt
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
if posted_span:
|
|
151
|
+
published = parse_posted_ago(posted_span)
|
|
152
|
+
|
|
153
|
+
if published is None:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
release_imdb_id = None
|
|
157
|
+
try:
|
|
158
|
+
r = requests.get(source, headers=headers, timeout=10)
|
|
159
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
160
|
+
imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
|
|
161
|
+
if imdb_link:
|
|
162
|
+
release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
|
|
163
|
+
if imdb_id and release_imdb_id != imdb_id:
|
|
164
|
+
debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
|
|
165
|
+
continue
|
|
166
|
+
else:
|
|
167
|
+
debug(f"{hostname}: imdb link not found for title {title}")
|
|
168
|
+
except Exception as e:
|
|
169
|
+
debug(f"{hostname}: failed to determine imdb_id for title {title}")
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
password = None
|
|
173
|
+
payload = urlsafe_b64encode(
|
|
174
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
|
|
175
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
176
|
+
|
|
177
|
+
releases.append({
|
|
178
|
+
"details": {
|
|
179
|
+
"title": title,
|
|
180
|
+
"hostname": hostname,
|
|
181
|
+
"imdb_id": release_imdb_id,
|
|
182
|
+
"link": link,
|
|
183
|
+
"mirror": mirror,
|
|
184
|
+
"size": size,
|
|
185
|
+
"date": published,
|
|
186
|
+
"source": source
|
|
187
|
+
},
|
|
188
|
+
"type": "protected"
|
|
189
|
+
})
|
|
190
|
+
except Exception as e:
|
|
191
|
+
debug(f"{hostname}: error parsing search result: {e}")
|
|
192
|
+
continue
|
|
193
|
+
|
|
194
|
+
elapsed = time.time() - start_time
|
|
195
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
196
|
+
return releases
|
quasarr/search/sources/nk.py
CHANGED
|
@@ -9,8 +9,8 @@ from datetime import datetime
|
|
|
9
9
|
from html import unescape
|
|
10
10
|
from urllib.parse import urljoin
|
|
11
11
|
|
|
12
|
-
from bs4 import BeautifulSoup
|
|
13
12
|
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
14
|
|
|
15
15
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
16
|
from quasarr.providers.log import info, debug
|
|
@@ -65,7 +65,6 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
|
|
|
65
65
|
if mirror and mirror not in supported_mirrors:
|
|
66
66
|
debug(f'Mirror "{mirror}" not supported by {hostname}.')
|
|
67
67
|
return releases
|
|
68
|
-
|
|
69
68
|
|
|
70
69
|
source_search = ""
|
|
71
70
|
if search_string != "":
|
|
@@ -94,7 +93,6 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
|
|
|
94
93
|
info(f"{hostname}: search load error: {e}")
|
|
95
94
|
return releases
|
|
96
95
|
|
|
97
|
-
|
|
98
96
|
if not results:
|
|
99
97
|
return releases
|
|
100
98
|
|
|
@@ -118,7 +116,7 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
|
|
|
118
116
|
a = result.find('a', class_='release-details', href=True)
|
|
119
117
|
if not a:
|
|
120
118
|
continue
|
|
121
|
-
|
|
119
|
+
|
|
122
120
|
sub_title = result.find('span', class_='subtitle')
|
|
123
121
|
if sub_title:
|
|
124
122
|
title = sub_title.get_text(strip=True)
|
|
@@ -163,9 +161,10 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
|
|
|
163
161
|
|
|
164
162
|
published = convert_to_rss_date(date_text) if date_text else ""
|
|
165
163
|
|
|
166
|
-
payload = urlsafe_b64encode(
|
|
164
|
+
payload = urlsafe_b64encode(
|
|
165
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
|
|
167
166
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
168
|
-
|
|
167
|
+
|
|
169
168
|
releases.append({
|
|
170
169
|
"details": {
|
|
171
170
|
"title": title,
|
|
@@ -186,4 +185,4 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
|
|
|
186
185
|
|
|
187
186
|
elapsed = time.time() - start_time
|
|
188
187
|
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
189
|
-
return releases
|
|
188
|
+
return releases
|