quasarr 1.20.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +460 -0
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +373 -0
- quasarr/api/captcha/__init__.py +1075 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +267 -0
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +467 -0
- quasarr/downloads/sources/__init__.py +0 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +65 -0
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +51 -0
- quasarr/downloads/sources/nx.py +105 -0
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/providers/__init__.py +0 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +20 -0
- quasarr/providers/html_templates.py +241 -0
- quasarr/providers/imdb_metadata.py +142 -0
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +917 -0
- quasarr/providers/notifications.py +124 -0
- quasarr/providers/obfuscated.py +51 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +826 -0
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +118 -0
- quasarr/providers/web_server.py +49 -0
- quasarr/search/__init__.py +153 -0
- quasarr/search/sources/__init__.py +0 -0
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +203 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +214 -0
- quasarr/search/sources/fx.py +223 -0
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +197 -0
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/storage/__init__.py +0 -0
- quasarr/storage/config.py +163 -0
- quasarr/storage/setup.py +458 -0
- quasarr/storage/sqlite_database.py +80 -0
- quasarr-1.20.6.dist-info/METADATA +304 -0
- quasarr-1.20.6.dist-info/RECORD +72 -0
- quasarr-1.20.6.dist-info/WHEEL +5 -0
- quasarr-1.20.6.dist-info/entry_points.txt +2 -0
- quasarr-1.20.6.dist-info/licenses/LICENSE +21 -0
- quasarr-1.20.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from base64 import urlsafe_b64encode
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from urllib.parse import quote_plus
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
|
+
|
|
15
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
+
from quasarr.providers.log import info, debug
|
|
17
|
+
|
|
18
|
+
hostname = "mb"
|
|
19
|
+
supported_mirrors = ["rapidgator", "ddownload"]
|
|
20
|
+
XXX_REGEX = re.compile(r"\.xxx\.", re.I)
|
|
21
|
+
RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
|
|
22
|
+
CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
|
|
23
|
+
IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
|
|
24
|
+
|
|
25
|
+
# map German month names to numbers
|
|
26
|
+
GERMAN_MONTHS = {
|
|
27
|
+
'Januar': '01', 'Februar': '02', 'März': '03', 'April': '04', 'Mai': '05', 'Juni': '06',
|
|
28
|
+
'Juli': '07', 'August': '08', 'September': '09', 'Oktober': '10', 'November': '11', 'Dezember': '12'
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def convert_to_rss_date(date_str):
|
|
33
|
+
parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
|
|
34
|
+
return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def extract_size(text):
|
|
38
|
+
m = re.match(r"(\d+(?:\.\d+)?)\s*([A-Za-z]+)", text)
|
|
39
|
+
if not m:
|
|
40
|
+
raise ValueError(f"Invalid size format: {text!r}")
|
|
41
|
+
return {"size": m.group(1), "sizeunit": m.group(2)}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _parse_posts(soup, shared_state, password, mirror_filter,
|
|
45
|
+
is_search=False, request_from=None, search_string=None,
|
|
46
|
+
season=None, episode=None):
|
|
47
|
+
releases = []
|
|
48
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
|
|
49
|
+
|
|
50
|
+
for post in soup.select("div.post"):
|
|
51
|
+
try:
|
|
52
|
+
# title & source
|
|
53
|
+
h1 = post.find("h1")
|
|
54
|
+
a = h1.find("a")
|
|
55
|
+
source = a["href"].strip()
|
|
56
|
+
title = a.get_text(strip=True)
|
|
57
|
+
|
|
58
|
+
# parse date
|
|
59
|
+
date_p = post.find("p", class_="date_x")
|
|
60
|
+
date_txt = date_p.get_text(strip=True) if date_p else None
|
|
61
|
+
published = one_hour_ago
|
|
62
|
+
if date_txt:
|
|
63
|
+
m_date = re.search(r'(?:\w+, )?(\d{1,2})\.\s*(\w+)\s+(\d{4})\s+(\d{2}:\d{2})', date_txt)
|
|
64
|
+
if m_date:
|
|
65
|
+
day, mon_name, year, hm = m_date.groups()
|
|
66
|
+
mon = GERMAN_MONTHS.get(mon_name, '01')
|
|
67
|
+
dt_obj = datetime.strptime(f"{day}.{mon}.{year} {hm}", "%d.%m.%Y %H:%M")
|
|
68
|
+
published = dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
69
|
+
|
|
70
|
+
if is_search:
|
|
71
|
+
if not shared_state.is_valid_release(title,
|
|
72
|
+
request_from,
|
|
73
|
+
search_string,
|
|
74
|
+
season,
|
|
75
|
+
episode):
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
# drop .XXX. unless user explicitly searched xxx
|
|
79
|
+
if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
|
|
80
|
+
continue
|
|
81
|
+
# require resolution/codec
|
|
82
|
+
if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
|
|
83
|
+
continue
|
|
84
|
+
# require no spaces in title
|
|
85
|
+
if " " in title:
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
# can't check for mirrors in search context
|
|
89
|
+
if mirror_filter and mirror_filter not in supported_mirrors:
|
|
90
|
+
continue
|
|
91
|
+
else:
|
|
92
|
+
mirror_candidates = []
|
|
93
|
+
for strong in post.find_all('strong', string=re.compile(r'^Download', re.I)):
|
|
94
|
+
link_tag = strong.find_next_sibling('a')
|
|
95
|
+
if link_tag and link_tag.get_text(strip=True):
|
|
96
|
+
host = link_tag.get_text(strip=True).split('.')[0].lower()
|
|
97
|
+
mirror_candidates.append(host)
|
|
98
|
+
valid = [m for m in mirror_candidates if m in supported_mirrors]
|
|
99
|
+
if not valid or (mirror_filter and mirror_filter not in valid):
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# extract IMDb ID
|
|
103
|
+
imdb_id = None
|
|
104
|
+
for tag in post.find_all('a', href=True):
|
|
105
|
+
m = IMDB_REGEX.search(tag['href'])
|
|
106
|
+
if m:
|
|
107
|
+
imdb_id = m.group(1)
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
# size extraction
|
|
111
|
+
mb = size_bytes = 0
|
|
112
|
+
size_match = re.search(r"Größe:\s*([\d\.]+)\s*([GMK]B)", post.get_text())
|
|
113
|
+
if size_match:
|
|
114
|
+
sz = {"size": size_match.group(1), "sizeunit": size_match.group(2)}
|
|
115
|
+
mb = shared_state.convert_to_mb(sz)
|
|
116
|
+
size_bytes = mb * 1024 * 1024
|
|
117
|
+
|
|
118
|
+
payload = urlsafe_b64encode(
|
|
119
|
+
f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
|
|
120
|
+
).decode()
|
|
121
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
122
|
+
|
|
123
|
+
releases.append({
|
|
124
|
+
"details": {
|
|
125
|
+
"title": title,
|
|
126
|
+
"hostname": hostname,
|
|
127
|
+
"imdb_id": imdb_id,
|
|
128
|
+
"link": link,
|
|
129
|
+
"mirror": mirror_filter,
|
|
130
|
+
"size": size_bytes,
|
|
131
|
+
"date": published,
|
|
132
|
+
"source": source
|
|
133
|
+
},
|
|
134
|
+
"type": "protected"
|
|
135
|
+
})
|
|
136
|
+
except Exception as e:
|
|
137
|
+
debug(f"Error parsing {hostname.upper()} post: {e}")
|
|
138
|
+
continue
|
|
139
|
+
return releases
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def mb_feed(shared_state, start_time, request_from, mirror=None):
|
|
143
|
+
mb = shared_state.values["config"]("Hostnames").get(hostname)
|
|
144
|
+
|
|
145
|
+
if not "arr" in request_from.lower():
|
|
146
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
147
|
+
return []
|
|
148
|
+
|
|
149
|
+
password = mb
|
|
150
|
+
section = "neuerscheinungen" if "Radarr" in request_from else "serie"
|
|
151
|
+
url = f"https://{mb}/category/{section}/"
|
|
152
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
153
|
+
try:
|
|
154
|
+
html_doc = requests.get(url, headers=headers, timeout=10).content
|
|
155
|
+
soup = BeautifulSoup(html_doc, "html.parser")
|
|
156
|
+
releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
|
|
157
|
+
except Exception as e:
|
|
158
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
159
|
+
releases = []
|
|
160
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
161
|
+
return releases
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def mb_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
165
|
+
mb = shared_state.values["config"]("Hostnames").get(hostname)
|
|
166
|
+
|
|
167
|
+
if not "arr" in request_from.lower():
|
|
168
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
169
|
+
return []
|
|
170
|
+
|
|
171
|
+
password = mb
|
|
172
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
173
|
+
if imdb_id:
|
|
174
|
+
title = get_localized_title(shared_state, imdb_id, 'de')
|
|
175
|
+
if not title:
|
|
176
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
177
|
+
return []
|
|
178
|
+
search_string = html.unescape(title)
|
|
179
|
+
|
|
180
|
+
q = quote_plus(search_string)
|
|
181
|
+
url = f"https://{mb}/?s={q}&id=20&post_type=post"
|
|
182
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
183
|
+
try:
|
|
184
|
+
html_doc = requests.get(url, headers=headers, timeout=10).content
|
|
185
|
+
soup = BeautifulSoup(html_doc, "html.parser")
|
|
186
|
+
releases = _parse_posts(
|
|
187
|
+
soup, shared_state, password, mirror_filter=mirror,
|
|
188
|
+
is_search=True, request_from=request_from,
|
|
189
|
+
search_string=search_string, season=season, episode=episode
|
|
190
|
+
)
|
|
191
|
+
except Exception as e:
|
|
192
|
+
info(f"Error loading {hostname.upper()} search: {e}")
|
|
193
|
+
releases = []
|
|
194
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
195
|
+
return releases
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from html import unescape
|
|
10
|
+
from urllib.parse import urljoin
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
|
+
|
|
15
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
+
from quasarr.providers.log import info, debug
|
|
17
|
+
|
|
18
|
+
hostname = "nk"
|
|
19
|
+
supported_mirrors = ["rapidgator", "ddownload"]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def convert_to_rss_date(date_str: str) -> str:
|
|
23
|
+
date_str = date_str.strip()
|
|
24
|
+
for fmt in ("%d. %B %Y / %H:%M", "%d.%m.%Y / %H:%M", "%d.%m.%Y - %H:%M", "%Y-%m-%d %H:%M"):
|
|
25
|
+
try:
|
|
26
|
+
dt = datetime.strptime(date_str, fmt)
|
|
27
|
+
return dt.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
28
|
+
except Exception:
|
|
29
|
+
continue
|
|
30
|
+
return ""
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def extract_size(text: str) -> dict:
|
|
34
|
+
match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
|
|
35
|
+
if match:
|
|
36
|
+
size = match.group(1).replace(',', '.')
|
|
37
|
+
unit = match.group(2)
|
|
38
|
+
return {"size": size, "sizeunit": unit}
|
|
39
|
+
return {"size": "0", "sizeunit": "MB"}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_release_field(res, label):
|
|
43
|
+
for li in res.select('ul.release-infos li'):
|
|
44
|
+
sp = li.find('span')
|
|
45
|
+
if not sp:
|
|
46
|
+
return ''
|
|
47
|
+
if sp.get_text(strip=True).lower() == label.lower():
|
|
48
|
+
txt = li.get_text(' ', strip=True)
|
|
49
|
+
return txt[len(sp.get_text(strip=True)):].strip()
|
|
50
|
+
return ''
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def nk_feed(*args, **kwargs):
|
|
54
|
+
return nk_search(*args, **kwargs)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def nk_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
|
|
58
|
+
releases = []
|
|
59
|
+
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
60
|
+
|
|
61
|
+
if not "arr" in request_from.lower():
|
|
62
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
63
|
+
return releases
|
|
64
|
+
|
|
65
|
+
if mirror and mirror not in supported_mirrors:
|
|
66
|
+
debug(f'Mirror "{mirror}" not supported by {hostname}.')
|
|
67
|
+
return releases
|
|
68
|
+
|
|
69
|
+
source_search = ""
|
|
70
|
+
if search_string != "":
|
|
71
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
72
|
+
if imdb_id:
|
|
73
|
+
local_title = get_localized_title(shared_state, imdb_id, 'de')
|
|
74
|
+
if not local_title:
|
|
75
|
+
info(f"{hostname}: no title for IMDb {imdb_id}")
|
|
76
|
+
return releases
|
|
77
|
+
source_search = local_title
|
|
78
|
+
else:
|
|
79
|
+
return releases
|
|
80
|
+
source_search = unescape(source_search)
|
|
81
|
+
else:
|
|
82
|
+
imdb_id = None
|
|
83
|
+
|
|
84
|
+
url = f'https://{host}/search'
|
|
85
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
86
|
+
data = {"search": source_search}
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
r = requests.post(url, headers=headers, data=data, timeout=20)
|
|
90
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
91
|
+
results = soup.find_all('div', class_='article-right')
|
|
92
|
+
except Exception as e:
|
|
93
|
+
info(f"{hostname}: search load error: {e}")
|
|
94
|
+
return releases
|
|
95
|
+
|
|
96
|
+
if not results:
|
|
97
|
+
return releases
|
|
98
|
+
|
|
99
|
+
for result in results:
|
|
100
|
+
try:
|
|
101
|
+
imdb_a = result.select_one('a.imdb')
|
|
102
|
+
if imdb_a and imdb_a.get('href'):
|
|
103
|
+
try:
|
|
104
|
+
release_imdb_id = re.search(r'tt\d+', imdb_a['href']).group()
|
|
105
|
+
if imdb_id:
|
|
106
|
+
if release_imdb_id != imdb_id:
|
|
107
|
+
debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
|
|
108
|
+
continue
|
|
109
|
+
except Exception:
|
|
110
|
+
debug(f"{hostname}: could not extract IMDb ID")
|
|
111
|
+
continue
|
|
112
|
+
else:
|
|
113
|
+
debug(f"{hostname}: could not extract IMDb ID")
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
a = result.find('a', class_='release-details', href=True)
|
|
117
|
+
if not a:
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
sub_title = result.find('span', class_='subtitle')
|
|
121
|
+
if sub_title:
|
|
122
|
+
title = sub_title.get_text(strip=True)
|
|
123
|
+
else:
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
source = urljoin(f'https://{host}', a['href'])
|
|
130
|
+
|
|
131
|
+
mb = 0
|
|
132
|
+
size_text = get_release_field(result, 'Größe')
|
|
133
|
+
if size_text:
|
|
134
|
+
size_item = extract_size(size_text)
|
|
135
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
136
|
+
|
|
137
|
+
if season != "" and episode == "":
|
|
138
|
+
mb = 0 # Size unknown for season packs
|
|
139
|
+
|
|
140
|
+
size = mb * 1024 * 1024
|
|
141
|
+
|
|
142
|
+
password = ''
|
|
143
|
+
mirrors_p = result.find('p', class_='mirrors')
|
|
144
|
+
if mirrors_p:
|
|
145
|
+
strong = mirrors_p.find('strong')
|
|
146
|
+
if strong and strong.get_text(strip=True).lower().startswith('passwort'):
|
|
147
|
+
nxt = strong.next_sibling
|
|
148
|
+
if nxt:
|
|
149
|
+
val = str(nxt).strip()
|
|
150
|
+
if val:
|
|
151
|
+
password = val.split()[0]
|
|
152
|
+
|
|
153
|
+
date_text = ''
|
|
154
|
+
p_meta = result.find('p', class_='meta')
|
|
155
|
+
if p_meta:
|
|
156
|
+
spans = p_meta.find_all('span')
|
|
157
|
+
if len(spans) >= 2:
|
|
158
|
+
date_part = spans[0].get_text(strip=True)
|
|
159
|
+
time_part = spans[1].get_text(strip=True).replace('Uhr', '').strip()
|
|
160
|
+
date_text = f"{date_part} / {time_part}"
|
|
161
|
+
|
|
162
|
+
published = convert_to_rss_date(date_text) if date_text else ""
|
|
163
|
+
|
|
164
|
+
payload = urlsafe_b64encode(
|
|
165
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
|
|
166
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
167
|
+
|
|
168
|
+
releases.append({
|
|
169
|
+
"details": {
|
|
170
|
+
"title": title,
|
|
171
|
+
"hostname": hostname,
|
|
172
|
+
"imdb_id": release_imdb_id,
|
|
173
|
+
"link": link,
|
|
174
|
+
"mirror": mirror,
|
|
175
|
+
"size": size,
|
|
176
|
+
"date": published,
|
|
177
|
+
"source": source
|
|
178
|
+
},
|
|
179
|
+
"type": "protected"
|
|
180
|
+
})
|
|
181
|
+
except Exception as e:
|
|
182
|
+
info(e)
|
|
183
|
+
debug(f"{hostname}: error parsing search result: {e}")
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
elapsed = time.time() - start_time
|
|
187
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
188
|
+
return releases
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
|
|
9
|
+
import requests
|
|
10
|
+
|
|
11
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
12
|
+
from quasarr.providers.log import info, debug
|
|
13
|
+
|
|
14
|
+
hostname = "nx"
|
|
15
|
+
supported_mirrors = ["filer"]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def nx_feed(shared_state, start_time, request_from, mirror=None):
|
|
19
|
+
releases = []
|
|
20
|
+
nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
21
|
+
password = nx
|
|
22
|
+
|
|
23
|
+
if "lazylibrarian" in request_from.lower():
|
|
24
|
+
category = "ebook"
|
|
25
|
+
elif "radarr" in request_from.lower():
|
|
26
|
+
category = "movie"
|
|
27
|
+
else:
|
|
28
|
+
category = "episode"
|
|
29
|
+
|
|
30
|
+
if mirror and mirror not in supported_mirrors:
|
|
31
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
32
|
+
' Skipping search!')
|
|
33
|
+
return releases
|
|
34
|
+
|
|
35
|
+
url = f'https://{nx}/api/frontend/releases/category/{category}/tag/all/1/51?sort=date'
|
|
36
|
+
headers = {
|
|
37
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
response = requests.get(url, headers, timeout=10)
|
|
42
|
+
feed = response.json()
|
|
43
|
+
except Exception as e:
|
|
44
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
45
|
+
return releases
|
|
46
|
+
|
|
47
|
+
items = feed['result']['list']
|
|
48
|
+
for item in items:
|
|
49
|
+
try:
|
|
50
|
+
title = item['name']
|
|
51
|
+
|
|
52
|
+
if title:
|
|
53
|
+
try:
|
|
54
|
+
if 'lazylibrarian' in request_from.lower():
|
|
55
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
56
|
+
title = shared_state.normalize_magazine_title(title)
|
|
57
|
+
|
|
58
|
+
source = f"https://{nx}/release/{item['slug']}"
|
|
59
|
+
imdb_id = item.get('_media', {}).get('imdbid', None)
|
|
60
|
+
mb = shared_state.convert_to_mb(item)
|
|
61
|
+
payload = urlsafe_b64encode(
|
|
62
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode(
|
|
63
|
+
"utf-8")
|
|
64
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
65
|
+
except:
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
size = mb * 1024 * 1024
|
|
70
|
+
except:
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
published = item['publishat']
|
|
75
|
+
except:
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
releases.append({
|
|
79
|
+
"details": {
|
|
80
|
+
"title": title,
|
|
81
|
+
"hostname": hostname.lower(),
|
|
82
|
+
"imdb_id": imdb_id,
|
|
83
|
+
"link": link,
|
|
84
|
+
"mirror": mirror,
|
|
85
|
+
"size": size,
|
|
86
|
+
"date": published,
|
|
87
|
+
"source": source
|
|
88
|
+
},
|
|
89
|
+
"type": "protected"
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
except Exception as e:
|
|
93
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
94
|
+
|
|
95
|
+
elapsed_time = time.time() - start_time
|
|
96
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
97
|
+
|
|
98
|
+
return releases
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def nx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
102
|
+
releases = []
|
|
103
|
+
nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
104
|
+
password = nx
|
|
105
|
+
|
|
106
|
+
if "lazylibrarian" in request_from.lower():
|
|
107
|
+
valid_type = "ebook"
|
|
108
|
+
elif "radarr" in request_from.lower():
|
|
109
|
+
valid_type = "movie"
|
|
110
|
+
else:
|
|
111
|
+
valid_type = "episode"
|
|
112
|
+
|
|
113
|
+
if mirror and mirror not in supported_mirrors:
|
|
114
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
115
|
+
' Skipping search!')
|
|
116
|
+
return releases
|
|
117
|
+
|
|
118
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
119
|
+
if imdb_id:
|
|
120
|
+
search_string = get_localized_title(shared_state, imdb_id, 'de')
|
|
121
|
+
if not search_string:
|
|
122
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
123
|
+
return releases
|
|
124
|
+
search_string = html.unescape(search_string)
|
|
125
|
+
|
|
126
|
+
url = f'https://{nx}/api/frontend/search/{search_string}'
|
|
127
|
+
headers = {
|
|
128
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
response = requests.get(url, headers, timeout=10)
|
|
133
|
+
feed = response.json()
|
|
134
|
+
except Exception as e:
|
|
135
|
+
info(f"Error loading {hostname.upper()} search: {e}")
|
|
136
|
+
return releases
|
|
137
|
+
|
|
138
|
+
items = feed['result']['releases']
|
|
139
|
+
for item in items:
|
|
140
|
+
try:
|
|
141
|
+
if item['type'] == valid_type:
|
|
142
|
+
title = item['name']
|
|
143
|
+
if title:
|
|
144
|
+
if not shared_state.is_valid_release(title,
|
|
145
|
+
request_from,
|
|
146
|
+
search_string,
|
|
147
|
+
season,
|
|
148
|
+
episode):
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
if 'lazylibrarian' in request_from.lower():
|
|
152
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
153
|
+
title = shared_state.normalize_magazine_title(title)
|
|
154
|
+
|
|
155
|
+
try:
|
|
156
|
+
source = f"https://{nx}/release/{item['slug']}"
|
|
157
|
+
if not imdb_id:
|
|
158
|
+
imdb_id = item.get('_media', {}).get('imdbid', None)
|
|
159
|
+
|
|
160
|
+
mb = shared_state.convert_to_mb(item)
|
|
161
|
+
payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".
|
|
162
|
+
encode("utf-8")).decode("utf-8")
|
|
163
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
164
|
+
except:
|
|
165
|
+
continue
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
size = mb * 1024 * 1024
|
|
169
|
+
except:
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
published = item['publishat']
|
|
174
|
+
except:
|
|
175
|
+
published = ""
|
|
176
|
+
|
|
177
|
+
releases.append({
|
|
178
|
+
"details": {
|
|
179
|
+
"title": title,
|
|
180
|
+
"hostname": hostname.lower(),
|
|
181
|
+
"imdb_id": imdb_id,
|
|
182
|
+
"link": link,
|
|
183
|
+
"mirror": mirror,
|
|
184
|
+
"size": size,
|
|
185
|
+
"date": published,
|
|
186
|
+
"source": source
|
|
187
|
+
},
|
|
188
|
+
"type": "protected"
|
|
189
|
+
})
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
info(f"Error parsing {hostname.upper()} search: {e}")
|
|
193
|
+
|
|
194
|
+
elapsed_time = time.time() - start_time
|
|
195
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
196
|
+
|
|
197
|
+
return releases
|