quasarr 1.20.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +460 -0
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +373 -0
- quasarr/api/captcha/__init__.py +1075 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +267 -0
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +467 -0
- quasarr/downloads/sources/__init__.py +0 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +65 -0
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +51 -0
- quasarr/downloads/sources/nx.py +105 -0
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/providers/__init__.py +0 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +20 -0
- quasarr/providers/html_templates.py +241 -0
- quasarr/providers/imdb_metadata.py +142 -0
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +917 -0
- quasarr/providers/notifications.py +124 -0
- quasarr/providers/obfuscated.py +51 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +826 -0
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +118 -0
- quasarr/providers/web_server.py +49 -0
- quasarr/search/__init__.py +153 -0
- quasarr/search/sources/__init__.py +0 -0
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +203 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +214 -0
- quasarr/search/sources/fx.py +223 -0
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +197 -0
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/storage/__init__.py +0 -0
- quasarr/storage/config.py +163 -0
- quasarr/storage/setup.py +458 -0
- quasarr/storage/sqlite_database.py +80 -0
- quasarr-1.20.6.dist-info/METADATA +304 -0
- quasarr-1.20.6.dist-info/RECORD +72 -0
- quasarr-1.20.6.dist-info/WHEEL +5 -0
- quasarr-1.20.6.dist-info/entry_points.txt +2 -0
- quasarr-1.20.6.dist-info/licenses/LICENSE +21 -0
- quasarr-1.20.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import datetime
|
|
6
|
+
import html
|
|
7
|
+
import re
|
|
8
|
+
import time
|
|
9
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
10
|
+
import xml.etree.ElementTree as ET
|
|
11
|
+
from base64 import urlsafe_b64encode
|
|
12
|
+
from urllib.parse import quote_plus
|
|
13
|
+
|
|
14
|
+
import requests
|
|
15
|
+
from bs4 import BeautifulSoup
|
|
16
|
+
|
|
17
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
18
|
+
from quasarr.providers.log import info, debug
|
|
19
|
+
|
|
20
|
+
hostname = "sl"
|
|
21
|
+
supported_mirrors = ["nitroflare", "ddownload"] # ignoring captcha-protected multiup/mirrorace for now
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def extract_size(text):
|
|
25
|
+
match = re.match(r"([\d\.]+)\s*([KMGT]B)", text, re.IGNORECASE)
|
|
26
|
+
if match:
|
|
27
|
+
size = match.group(1)
|
|
28
|
+
unit = match.group(2).upper()
|
|
29
|
+
return {"size": size, "sizeunit": unit}
|
|
30
|
+
else:
|
|
31
|
+
raise ValueError(f"Invalid size format: {text}")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def parse_pubdate_to_iso(pubdate_str):
|
|
35
|
+
"""
|
|
36
|
+
Parse an RFC-822 pubDate from RSS into an ISO8601 string with timezone.
|
|
37
|
+
"""
|
|
38
|
+
dt = datetime.datetime.strptime(pubdate_str, '%a, %d %b %Y %H:%M:%S %z')
|
|
39
|
+
return dt.isoformat()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def sl_feed(shared_state, start_time, request_from, mirror=None):
|
|
43
|
+
releases = []
|
|
44
|
+
|
|
45
|
+
sl = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
46
|
+
password = sl
|
|
47
|
+
|
|
48
|
+
if "lazylibrarian" in request_from.lower():
|
|
49
|
+
feed_type = "ebooks"
|
|
50
|
+
elif "radarr" in request_from.lower():
|
|
51
|
+
feed_type = "movies"
|
|
52
|
+
else:
|
|
53
|
+
feed_type = "tv-shows"
|
|
54
|
+
|
|
55
|
+
if mirror and mirror not in supported_mirrors:
|
|
56
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
|
|
57
|
+
return releases
|
|
58
|
+
|
|
59
|
+
url = f'https://{sl}/{feed_type}/feed/'
|
|
60
|
+
headers = {'User-Agent': shared_state.values['user_agent']}
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
xml_text = requests.get(url, headers=headers, timeout=10).text
|
|
64
|
+
root = ET.fromstring(xml_text)
|
|
65
|
+
|
|
66
|
+
for item in root.find('channel').findall('item'):
|
|
67
|
+
try:
|
|
68
|
+
title = item.findtext('title').strip()
|
|
69
|
+
if 'lazylibrarian' in request_from.lower():
|
|
70
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
71
|
+
title = shared_state.normalize_magazine_title(title)
|
|
72
|
+
|
|
73
|
+
source = item.findtext('link').strip()
|
|
74
|
+
|
|
75
|
+
desc = item.findtext('description') or ''
|
|
76
|
+
|
|
77
|
+
size_match = re.search(r"Size:\s*([\d\.]+\s*(?:GB|MB|KB|TB))", desc, re.IGNORECASE)
|
|
78
|
+
if not size_match:
|
|
79
|
+
debug(f"Size not found in RSS item: {title}")
|
|
80
|
+
continue
|
|
81
|
+
size_info = size_match.group(1).strip()
|
|
82
|
+
size_item = extract_size(size_info)
|
|
83
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
84
|
+
size = mb * 1024 * 1024
|
|
85
|
+
|
|
86
|
+
pubdate = item.findtext('pubDate').strip()
|
|
87
|
+
published = parse_pubdate_to_iso(pubdate)
|
|
88
|
+
|
|
89
|
+
m = re.search(r"https?://www\.imdb\.com/title/(tt\d+)", desc)
|
|
90
|
+
imdb_id = m.group(1) if m else None
|
|
91
|
+
|
|
92
|
+
payload = urlsafe_b64encode(
|
|
93
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
94
|
+
).decode("utf-8")
|
|
95
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
96
|
+
|
|
97
|
+
releases.append({
|
|
98
|
+
"details": {
|
|
99
|
+
"title": title,
|
|
100
|
+
"hostname": hostname.lower(),
|
|
101
|
+
"imdb_id": imdb_id,
|
|
102
|
+
"link": link,
|
|
103
|
+
"mirror": mirror,
|
|
104
|
+
"size": size,
|
|
105
|
+
"date": published,
|
|
106
|
+
"source": source
|
|
107
|
+
},
|
|
108
|
+
"type": "protected"
|
|
109
|
+
})
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
info(f"Error parsing {hostname.upper()} feed item: {e}")
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
except Exception as e:
|
|
116
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
117
|
+
|
|
118
|
+
elapsed = time.time() - start_time
|
|
119
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
120
|
+
return releases
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def sl_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
124
|
+
releases = []
|
|
125
|
+
sl = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
126
|
+
password = sl
|
|
127
|
+
|
|
128
|
+
if "lazylibrarian" in request_from.lower():
|
|
129
|
+
feed_type = "ebooks"
|
|
130
|
+
elif "radarr" in request_from.lower():
|
|
131
|
+
feed_type = "movies"
|
|
132
|
+
else:
|
|
133
|
+
feed_type = "tv-shows"
|
|
134
|
+
|
|
135
|
+
if mirror and mirror not in supported_mirrors:
|
|
136
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
|
|
137
|
+
return releases
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
141
|
+
if imdb_id:
|
|
142
|
+
search_string = get_localized_title(shared_state, imdb_id, 'en') or ''
|
|
143
|
+
search_string = html.unescape(search_string)
|
|
144
|
+
if not search_string:
|
|
145
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
146
|
+
return releases
|
|
147
|
+
|
|
148
|
+
# Build the list of URLs to search. For tv-shows also search the "foreign" section.
|
|
149
|
+
q = quote_plus(search_string)
|
|
150
|
+
urls = [f'https://{sl}/{feed_type}/?s={q}']
|
|
151
|
+
if feed_type == "tv-shows":
|
|
152
|
+
urls.append(f'https://{sl}/foreign/?s={q}')
|
|
153
|
+
|
|
154
|
+
headers = {"User-Agent": shared_state.values['user_agent']}
|
|
155
|
+
|
|
156
|
+
# Fetch pages in parallel (so we don't double the slow site latency)
|
|
157
|
+
def fetch(url):
|
|
158
|
+
try:
|
|
159
|
+
debug(f"Fetching {url} ({hostname})")
|
|
160
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
161
|
+
r.raise_for_status()
|
|
162
|
+
return r.text
|
|
163
|
+
except Exception as e:
|
|
164
|
+
info(f"Error fetching {hostname} url {url}: {e}")
|
|
165
|
+
return ''
|
|
166
|
+
|
|
167
|
+
html_texts = []
|
|
168
|
+
with ThreadPoolExecutor(max_workers=len(urls)) as tpe:
|
|
169
|
+
futures = {tpe.submit(fetch, u): u for u in urls}
|
|
170
|
+
for future in as_completed(futures):
|
|
171
|
+
try:
|
|
172
|
+
html_texts.append(future.result())
|
|
173
|
+
except Exception as e:
|
|
174
|
+
info(f"Error fetching {hostname} search page: {e}")
|
|
175
|
+
|
|
176
|
+
# Parse each result and collect unique releases (dedupe by source link)
|
|
177
|
+
seen_sources = set()
|
|
178
|
+
for html_text in html_texts:
|
|
179
|
+
if not html_text:
|
|
180
|
+
continue
|
|
181
|
+
try:
|
|
182
|
+
soup = BeautifulSoup(html_text, 'html.parser')
|
|
183
|
+
posts = soup.find_all('div', class_=lambda c: c and c.startswith('post-'))
|
|
184
|
+
|
|
185
|
+
for post in posts:
|
|
186
|
+
try:
|
|
187
|
+
a = post.find('h1').find('a')
|
|
188
|
+
title = a.get_text(strip=True)
|
|
189
|
+
|
|
190
|
+
if not shared_state.is_valid_release(title,
|
|
191
|
+
request_from,
|
|
192
|
+
search_string,
|
|
193
|
+
season,
|
|
194
|
+
episode):
|
|
195
|
+
continue
|
|
196
|
+
|
|
197
|
+
if 'lazylibrarian' in request_from.lower():
|
|
198
|
+
title = shared_state.normalize_magazine_title(title)
|
|
199
|
+
|
|
200
|
+
source = a['href']
|
|
201
|
+
# dedupe
|
|
202
|
+
if source in seen_sources:
|
|
203
|
+
continue
|
|
204
|
+
seen_sources.add(source)
|
|
205
|
+
|
|
206
|
+
# Published date
|
|
207
|
+
time_tag = post.find('span', {'class': 'localtime'})
|
|
208
|
+
published = None
|
|
209
|
+
if time_tag and time_tag.has_attr('data-lttime'):
|
|
210
|
+
published = time_tag['data-lttime']
|
|
211
|
+
published = published or datetime.datetime.utcnow().isoformat() + '+00:00'
|
|
212
|
+
|
|
213
|
+
size = 0
|
|
214
|
+
imdb_id = None
|
|
215
|
+
|
|
216
|
+
payload = urlsafe_b64encode(
|
|
217
|
+
f"{title}|{source}|{mirror}|0|{password}|{imdb_id}".encode('utf-8')
|
|
218
|
+
).decode('utf-8')
|
|
219
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
220
|
+
|
|
221
|
+
releases.append({
|
|
222
|
+
"details": {
|
|
223
|
+
"title": title,
|
|
224
|
+
"hostname": hostname.lower(),
|
|
225
|
+
"imdb_id": imdb_id,
|
|
226
|
+
"link": link,
|
|
227
|
+
"mirror": mirror,
|
|
228
|
+
"size": size,
|
|
229
|
+
"date": published,
|
|
230
|
+
"source": source
|
|
231
|
+
},
|
|
232
|
+
"type": "protected"
|
|
233
|
+
})
|
|
234
|
+
except Exception as e:
|
|
235
|
+
info(f"Error parsing {hostname.upper()} search item: {e}")
|
|
236
|
+
continue
|
|
237
|
+
except Exception as e:
|
|
238
|
+
info(f"Error parsing {hostname.upper()} search HTML: {e}")
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
except Exception as e:
|
|
242
|
+
info(f"Error loading {hostname.upper()} search page: {e}")
|
|
243
|
+
|
|
244
|
+
elapsed = time.time() - start_time
|
|
245
|
+
debug(f"Search time: {elapsed:.2f}s ({hostname})")
|
|
246
|
+
return releases
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from base64 import urlsafe_b64encode
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from urllib.parse import quote, quote_plus
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
|
+
|
|
15
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
+
from quasarr.providers.log import info, debug
|
|
17
|
+
|
|
18
|
+
hostname = "wd"
|
|
19
|
+
supported_mirrors = ["rapidgator", "ddownload", "katfile", "fikper", "turbobit"]
|
|
20
|
+
|
|
21
|
+
# regex to detect porn-tag .XXX. (case-insensitive, dots included)
|
|
22
|
+
XXX_REGEX = re.compile(r"\.xxx\.", re.I)
|
|
23
|
+
# regex to detect video resolution
|
|
24
|
+
RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
|
|
25
|
+
# regex to detect video codec tags
|
|
26
|
+
CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def convert_to_rss_date(date_str):
|
|
30
|
+
"""
|
|
31
|
+
date_str comes in as "02.05.2025 - 09:04"
|
|
32
|
+
Return RFC‑822 style date with +0000 timezone.
|
|
33
|
+
"""
|
|
34
|
+
parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
|
|
35
|
+
return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def extract_size(text):
|
|
39
|
+
"""
|
|
40
|
+
e.g. "8 GB" → {"size": "8", "sizeunit": "GB"}
|
|
41
|
+
"""
|
|
42
|
+
match = re.match(r"(\d+(?:\.\d+)?)\s*([A-Za-z]+)", text)
|
|
43
|
+
if not match:
|
|
44
|
+
raise ValueError(f"Invalid size format: {text!r}")
|
|
45
|
+
return {"size": match.group(1), "sizeunit": match.group(2)}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _parse_rows(
|
|
49
|
+
soup,
|
|
50
|
+
shared_state,
|
|
51
|
+
url_base,
|
|
52
|
+
password,
|
|
53
|
+
mirror_filter,
|
|
54
|
+
request_from=None,
|
|
55
|
+
search_string=None,
|
|
56
|
+
season=None,
|
|
57
|
+
episode=None
|
|
58
|
+
):
|
|
59
|
+
"""
|
|
60
|
+
Walk the <table> rows, extract one release per row.
|
|
61
|
+
Only include rows with at least one supported mirror.
|
|
62
|
+
If mirror_filter provided, only include rows where mirror_filter is present.
|
|
63
|
+
|
|
64
|
+
Context detection:
|
|
65
|
+
- feed when search_string is None
|
|
66
|
+
- search when search_string is a str
|
|
67
|
+
|
|
68
|
+
Porn-filtering:
|
|
69
|
+
- feed: always drop .XXX.
|
|
70
|
+
- search: drop .XXX. unless 'xxx' in search_string (case-insensitive)
|
|
71
|
+
|
|
72
|
+
If in search context, also filter out non-video releases (ebooks, games).
|
|
73
|
+
"""
|
|
74
|
+
releases = []
|
|
75
|
+
is_search = search_string is not None
|
|
76
|
+
|
|
77
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
|
|
78
|
+
|
|
79
|
+
for tr in soup.select("table.table tbody tr.lh-sm"):
|
|
80
|
+
try:
|
|
81
|
+
a = tr.find("a", class_="upload-link")
|
|
82
|
+
raw_href = a["href"]
|
|
83
|
+
href = quote(raw_href, safe="/?:=&")
|
|
84
|
+
source = f"https://{url_base}{href}"
|
|
85
|
+
|
|
86
|
+
preview_div = a.find("div", class_="preview-text")
|
|
87
|
+
date_txt = preview_div.get_text(strip=True) if preview_div else None
|
|
88
|
+
if preview_div:
|
|
89
|
+
preview_div.extract()
|
|
90
|
+
|
|
91
|
+
title = a.get_text(strip=True)
|
|
92
|
+
|
|
93
|
+
# search context contains non-video releases (ebooks, games, etc.)
|
|
94
|
+
if is_search:
|
|
95
|
+
if not shared_state.is_valid_release(title,
|
|
96
|
+
request_from,
|
|
97
|
+
search_string,
|
|
98
|
+
season,
|
|
99
|
+
episode):
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
if 'lazylibrarian' in request_from.lower():
|
|
103
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
104
|
+
title = shared_state.normalize_magazine_title(title)
|
|
105
|
+
else:
|
|
106
|
+
# drop .XXX. unless user explicitly searched xxx
|
|
107
|
+
if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
|
|
108
|
+
continue
|
|
109
|
+
# require resolution/codec
|
|
110
|
+
if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
|
|
111
|
+
continue
|
|
112
|
+
# require no spaces in title
|
|
113
|
+
if " " in title:
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
hoster_names = tr.find("span", class_="button-warezkorb")["data-hoster-names"]
|
|
117
|
+
mirrors = [m.strip().lower() for m in hoster_names.split(",")]
|
|
118
|
+
valid = [m for m in mirrors if m in supported_mirrors]
|
|
119
|
+
if not valid or (mirror_filter and mirror_filter not in valid):
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
size_txt = tr.find("span", class_="element-size").get_text(strip=True)
|
|
123
|
+
sz = extract_size(size_txt)
|
|
124
|
+
mb = shared_state.convert_to_mb(sz)
|
|
125
|
+
size_bytes = mb * 1024 * 1024
|
|
126
|
+
|
|
127
|
+
imdb_id = None
|
|
128
|
+
published = convert_to_rss_date(date_txt) if date_txt else one_hour_ago
|
|
129
|
+
|
|
130
|
+
payload = urlsafe_b64encode(
|
|
131
|
+
f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
|
|
132
|
+
).decode()
|
|
133
|
+
download_link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
134
|
+
|
|
135
|
+
releases.append({
|
|
136
|
+
"details": {
|
|
137
|
+
"title": title,
|
|
138
|
+
"hostname": hostname,
|
|
139
|
+
"imdb_id": imdb_id,
|
|
140
|
+
"link": download_link,
|
|
141
|
+
"mirror": mirror_filter,
|
|
142
|
+
"size": size_bytes,
|
|
143
|
+
"date": published,
|
|
144
|
+
"source": source
|
|
145
|
+
},
|
|
146
|
+
"type": "protected"
|
|
147
|
+
})
|
|
148
|
+
except Exception as e:
|
|
149
|
+
debug(f"Error parsing {hostname.upper()} row: {e}")
|
|
150
|
+
continue
|
|
151
|
+
return releases
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def wd_feed(shared_state, start_time, request_from, mirror=None):
|
|
155
|
+
wd = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
156
|
+
password = wd
|
|
157
|
+
|
|
158
|
+
if "lazylibrarian" in request_from.lower():
|
|
159
|
+
feed_type = "Ebooks"
|
|
160
|
+
elif "radarr" in request_from.lower():
|
|
161
|
+
feed_type = "Movies"
|
|
162
|
+
else:
|
|
163
|
+
feed_type = "Serien"
|
|
164
|
+
|
|
165
|
+
url = f"https://{wd}/{feed_type}"
|
|
166
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
167
|
+
try:
|
|
168
|
+
response = requests.get(url, headers=headers, timeout=10).content
|
|
169
|
+
soup = BeautifulSoup(response, "html.parser")
|
|
170
|
+
releases = _parse_rows(soup, shared_state, wd, password, mirror)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
173
|
+
releases = []
|
|
174
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
175
|
+
return releases
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def wd_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
179
|
+
releases = []
|
|
180
|
+
wd = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
181
|
+
password = wd
|
|
182
|
+
|
|
183
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
184
|
+
if imdb_id:
|
|
185
|
+
search_string = get_localized_title(shared_state, imdb_id, 'de')
|
|
186
|
+
if not search_string:
|
|
187
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
188
|
+
return releases
|
|
189
|
+
search_string = html.unescape(search_string)
|
|
190
|
+
|
|
191
|
+
q = quote_plus(search_string)
|
|
192
|
+
url = f"https://{wd}/search?q={q}"
|
|
193
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
response = requests.get(url, headers=headers, timeout=10).content
|
|
197
|
+
soup = BeautifulSoup(response, "html.parser")
|
|
198
|
+
releases = _parse_rows(
|
|
199
|
+
soup, shared_state, wd, password, mirror,
|
|
200
|
+
request_from=request_from,
|
|
201
|
+
search_string=search_string,
|
|
202
|
+
season=season, episode=episode
|
|
203
|
+
)
|
|
204
|
+
except Exception as e:
|
|
205
|
+
info(f"Error loading {hostname.upper()} search: {e}")
|
|
206
|
+
releases = []
|
|
207
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
208
|
+
return releases
|
|
File without changes
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import configparser
|
|
7
|
+
import re
|
|
8
|
+
import string
|
|
9
|
+
|
|
10
|
+
from Cryptodome.Cipher import AES
|
|
11
|
+
from Cryptodome.Random import get_random_bytes
|
|
12
|
+
from Cryptodome.Util.Padding import pad
|
|
13
|
+
|
|
14
|
+
from quasarr.providers import shared_state
|
|
15
|
+
from quasarr.storage.sqlite_database import DataBase
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Config(object):
|
|
19
|
+
_DEFAULT_CONFIG = {
|
|
20
|
+
'API': [
|
|
21
|
+
("key", "secret", ""),
|
|
22
|
+
],
|
|
23
|
+
'JDownloader': [
|
|
24
|
+
("user", "secret", ""),
|
|
25
|
+
("password", "secret", ""),
|
|
26
|
+
("device", "str", ""),
|
|
27
|
+
],
|
|
28
|
+
'Hostnames': [
|
|
29
|
+
("al", "secret", ""),
|
|
30
|
+
("by", "secret", ""),
|
|
31
|
+
("dd", "secret", ""),
|
|
32
|
+
("dj", "secret", ""),
|
|
33
|
+
("dt", "secret", ""),
|
|
34
|
+
("dw", "secret", ""),
|
|
35
|
+
("fx", "secret", ""),
|
|
36
|
+
("he", "secret", ""),
|
|
37
|
+
("mb", "secret", ""),
|
|
38
|
+
("nk", "secret", ""),
|
|
39
|
+
("nx", "secret", ""),
|
|
40
|
+
("sf", "secret", ""),
|
|
41
|
+
("sj", "secret", ""),
|
|
42
|
+
("sl", "secret", ""),
|
|
43
|
+
("wd", "secret", "")
|
|
44
|
+
],
|
|
45
|
+
'FlareSolverr': [
|
|
46
|
+
("url", "str", ""),
|
|
47
|
+
],
|
|
48
|
+
'AL': [
|
|
49
|
+
("user", "secret", ""),
|
|
50
|
+
("password", "secret", "")
|
|
51
|
+
],
|
|
52
|
+
'DD': [
|
|
53
|
+
("user", "secret", ""),
|
|
54
|
+
("password", "secret", "")
|
|
55
|
+
],
|
|
56
|
+
'NX': [
|
|
57
|
+
("user", "secret", ""),
|
|
58
|
+
("password", "secret", "")
|
|
59
|
+
]
|
|
60
|
+
}
|
|
61
|
+
__config__ = []
|
|
62
|
+
|
|
63
|
+
def __init__(self, section):
|
|
64
|
+
self._configfile = shared_state.values["configfile"]
|
|
65
|
+
self._section = section
|
|
66
|
+
self._config = configparser.RawConfigParser()
|
|
67
|
+
try:
|
|
68
|
+
self._config.read(self._configfile)
|
|
69
|
+
self._config.has_section(
|
|
70
|
+
self._section) or self._set_default_config(self._section)
|
|
71
|
+
self.__config__ = self._read_config(self._section)
|
|
72
|
+
except configparser.DuplicateSectionError:
|
|
73
|
+
print('Duplicate Section in Config File')
|
|
74
|
+
raise
|
|
75
|
+
except Exception as e:
|
|
76
|
+
print(f'Unknown error while reading config file: {e}')
|
|
77
|
+
raise
|
|
78
|
+
|
|
79
|
+
def _set_default_config(self, section):
|
|
80
|
+
self._config.add_section(section)
|
|
81
|
+
for (key, key_type, value) in self._DEFAULT_CONFIG[section]:
|
|
82
|
+
self._config.set(section, key, value)
|
|
83
|
+
with open(self._configfile, 'w') as configfile:
|
|
84
|
+
self._config.write(configfile)
|
|
85
|
+
|
|
86
|
+
def _get_encryption_params(self):
|
|
87
|
+
crypt_key = DataBase('secrets').retrieve("key")
|
|
88
|
+
crypt_iv = DataBase('secrets').retrieve("iv")
|
|
89
|
+
if crypt_iv and crypt_key:
|
|
90
|
+
return base64.b64decode(crypt_key), base64.b64decode(crypt_iv)
|
|
91
|
+
else:
|
|
92
|
+
crypt_key = get_random_bytes(32)
|
|
93
|
+
crypt_iv = get_random_bytes(16)
|
|
94
|
+
DataBase('secrets').update_store("key", base64.b64encode(crypt_key).decode())
|
|
95
|
+
DataBase('secrets').update_store("iv", base64.b64encode(crypt_iv).decode())
|
|
96
|
+
return crypt_key, crypt_iv
|
|
97
|
+
|
|
98
|
+
def _set_to_config(self, section, key, value):
|
|
99
|
+
default_value_type = [param[1] for param in self._DEFAULT_CONFIG[section] if param[0] == key]
|
|
100
|
+
if default_value_type and default_value_type[0] == 'secret' and len(value):
|
|
101
|
+
crypt_key, crypt_iv = self._get_encryption_params()
|
|
102
|
+
cipher = AES.new(crypt_key, AES.MODE_CBC, crypt_iv)
|
|
103
|
+
value = base64.b64encode(cipher.encrypt(pad(value.encode(), AES.block_size)))
|
|
104
|
+
value = 'secret|' + value.decode()
|
|
105
|
+
self._config.set(section, key, value)
|
|
106
|
+
with open(self._configfile, 'w') as configfile:
|
|
107
|
+
self._config.write(configfile)
|
|
108
|
+
|
|
109
|
+
def _read_config(self, section):
|
|
110
|
+
return [(key, '', self._config.get(section, key)) for key in self._config.options(section)]
|
|
111
|
+
|
|
112
|
+
def _get_from_config(self, scope, key):
|
|
113
|
+
res = [param[2] for param in scope if param[0] == key]
|
|
114
|
+
if not res:
|
|
115
|
+
res = [param[2]
|
|
116
|
+
for param in self._DEFAULT_CONFIG[self._section] if param[0] == key]
|
|
117
|
+
if [param for param in self._DEFAULT_CONFIG[self._section] if param[0] == key and param[1] == 'secret']:
|
|
118
|
+
value = res[0].strip('\'"')
|
|
119
|
+
if value.startswith("secret|"):
|
|
120
|
+
crypt_key, crypt_iv = self._get_encryption_params()
|
|
121
|
+
cipher = AES.new(crypt_key, AES.MODE_CBC, crypt_iv)
|
|
122
|
+
decrypted_payload = cipher.decrypt(base64.b64decode(value[7:])).decode("utf-8").strip()
|
|
123
|
+
final_payload = "".join(filter(lambda c: c in string.printable, decrypted_payload))
|
|
124
|
+
return final_payload
|
|
125
|
+
else: ## Loaded value is not encrypted, return as is
|
|
126
|
+
if len(value) > 0:
|
|
127
|
+
self.save(key, value)
|
|
128
|
+
return value
|
|
129
|
+
elif [param for param in self._DEFAULT_CONFIG[self._section] if param[0] == key and param[1] == 'bool']:
|
|
130
|
+
return True if len(res) and res[0].strip('\'"').lower() == 'true' else False
|
|
131
|
+
else:
|
|
132
|
+
return res[0].strip('\'"') if len(res) > 0 else False
|
|
133
|
+
|
|
134
|
+
def save(self, key, value):
|
|
135
|
+
self._set_to_config(self._section, key, value)
|
|
136
|
+
return
|
|
137
|
+
|
|
138
|
+
def get(self, key):
|
|
139
|
+
return self._get_from_config(self.__config__, key)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def get_clean_hostnames(shared_state):
|
|
143
|
+
hostnames = Config('Hostnames')
|
|
144
|
+
set_hostnames = {}
|
|
145
|
+
|
|
146
|
+
def clean_up_hostname(host, strg, hostnames):
|
|
147
|
+
if strg and '/' in strg:
|
|
148
|
+
strg = strg.replace('https://', '').replace('http://', '')
|
|
149
|
+
strg = re.findall(r'([a-z-.]*\.[a-z]*)', strg)[0]
|
|
150
|
+
hostnames.save(host, strg)
|
|
151
|
+
if strg and re.match(r'.*[A-Z].*', strg):
|
|
152
|
+
hostnames.save(host, strg.lower())
|
|
153
|
+
if strg:
|
|
154
|
+
print(f'Using "{strg}" as hostname for "{host}"')
|
|
155
|
+
return strg
|
|
156
|
+
|
|
157
|
+
for name in shared_state.values["sites"]:
|
|
158
|
+
name = name.lower()
|
|
159
|
+
hostname = clean_up_hostname(name, hostnames.get(name), hostnames)
|
|
160
|
+
if hostname:
|
|
161
|
+
set_hostnames[name] = hostname
|
|
162
|
+
|
|
163
|
+
return set_hostnames
|