quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +316 -42
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +387 -0
- quasarr/api/captcha/__init__.py +1189 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +319 -256
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +476 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dl.py +199 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +14 -7
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +54 -0
- quasarr/downloads/sources/nx.py +42 -83
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/downloads/sources/wx.py +127 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +22 -0
- quasarr/providers/html_templates.py +211 -104
- quasarr/providers/imdb_metadata.py +108 -3
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +201 -40
- quasarr/providers/notifications.py +99 -11
- quasarr/providers/obfuscated.py +65 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/dl.py +175 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +656 -79
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +60 -1
- quasarr/providers/web_server.py +1 -1
- quasarr/search/__init__.py +144 -15
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +204 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dl.py +354 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +94 -67
- quasarr/search/sources/fx.py +89 -33
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +75 -21
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/search/sources/wx.py +337 -0
- quasarr/storage/config.py +39 -10
- quasarr/storage/setup.py +269 -97
- quasarr/storage/sqlite_database.py +6 -1
- quasarr-1.23.0.dist-info/METADATA +306 -0
- quasarr-1.23.0.dist-info/RECORD +77 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
- quasarr/arr/__init__.py +0 -423
- quasarr/captcha_solver/__init__.py +0 -284
- quasarr-0.1.6.dist-info/METADATA +0 -81
- quasarr-0.1.6.dist-info/RECORD +0 -31
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from html import unescape
|
|
10
|
+
from urllib.parse import urljoin
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
|
+
|
|
15
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
+
from quasarr.providers.log import info, debug
|
|
17
|
+
|
|
18
|
+
hostname = "nk"
|
|
19
|
+
supported_mirrors = ["rapidgator", "ddownload"]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def convert_to_rss_date(date_str: str) -> str:
|
|
23
|
+
date_str = date_str.strip()
|
|
24
|
+
for fmt in ("%d. %B %Y / %H:%M", "%d.%m.%Y / %H:%M", "%d.%m.%Y - %H:%M", "%Y-%m-%d %H:%M"):
|
|
25
|
+
try:
|
|
26
|
+
dt = datetime.strptime(date_str, fmt)
|
|
27
|
+
return dt.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
28
|
+
except Exception:
|
|
29
|
+
continue
|
|
30
|
+
return ""
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def extract_size(text: str) -> dict:
|
|
34
|
+
match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
|
|
35
|
+
if match:
|
|
36
|
+
size = match.group(1).replace(',', '.')
|
|
37
|
+
unit = match.group(2)
|
|
38
|
+
return {"size": size, "sizeunit": unit}
|
|
39
|
+
return {"size": "0", "sizeunit": "MB"}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_release_field(res, label):
|
|
43
|
+
for li in res.select('ul.release-infos li'):
|
|
44
|
+
sp = li.find('span')
|
|
45
|
+
if not sp:
|
|
46
|
+
return ''
|
|
47
|
+
if sp.get_text(strip=True).lower() == label.lower():
|
|
48
|
+
txt = li.get_text(' ', strip=True)
|
|
49
|
+
return txt[len(sp.get_text(strip=True)):].strip()
|
|
50
|
+
return ''
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def nk_feed(*args, **kwargs):
|
|
54
|
+
return nk_search(*args, **kwargs)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def nk_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
|
|
58
|
+
releases = []
|
|
59
|
+
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
60
|
+
|
|
61
|
+
if not "arr" in request_from.lower():
|
|
62
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
63
|
+
return releases
|
|
64
|
+
|
|
65
|
+
if mirror and mirror not in supported_mirrors:
|
|
66
|
+
debug(f'Mirror "{mirror}" not supported by {hostname}.')
|
|
67
|
+
return releases
|
|
68
|
+
|
|
69
|
+
source_search = ""
|
|
70
|
+
if search_string != "":
|
|
71
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
72
|
+
if imdb_id:
|
|
73
|
+
local_title = get_localized_title(shared_state, imdb_id, 'de')
|
|
74
|
+
if not local_title:
|
|
75
|
+
info(f"{hostname}: no title for IMDb {imdb_id}")
|
|
76
|
+
return releases
|
|
77
|
+
source_search = local_title
|
|
78
|
+
else:
|
|
79
|
+
return releases
|
|
80
|
+
source_search = unescape(source_search)
|
|
81
|
+
else:
|
|
82
|
+
imdb_id = None
|
|
83
|
+
|
|
84
|
+
url = f'https://{host}/search'
|
|
85
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
86
|
+
data = {"search": source_search}
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
r = requests.post(url, headers=headers, data=data, timeout=20)
|
|
90
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
91
|
+
results = soup.find_all('div', class_='article-right')
|
|
92
|
+
except Exception as e:
|
|
93
|
+
info(f"{hostname}: search load error: {e}")
|
|
94
|
+
return releases
|
|
95
|
+
|
|
96
|
+
if not results:
|
|
97
|
+
return releases
|
|
98
|
+
|
|
99
|
+
for result in results:
|
|
100
|
+
try:
|
|
101
|
+
imdb_a = result.select_one('a.imdb')
|
|
102
|
+
if imdb_a and imdb_a.get('href'):
|
|
103
|
+
try:
|
|
104
|
+
release_imdb_id = re.search(r'tt\d+', imdb_a['href']).group()
|
|
105
|
+
if imdb_id:
|
|
106
|
+
if release_imdb_id != imdb_id:
|
|
107
|
+
debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
|
|
108
|
+
continue
|
|
109
|
+
except Exception:
|
|
110
|
+
debug(f"{hostname}: could not extract IMDb ID")
|
|
111
|
+
continue
|
|
112
|
+
else:
|
|
113
|
+
debug(f"{hostname}: could not extract IMDb ID")
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
a = result.find('a', class_='release-details', href=True)
|
|
117
|
+
if not a:
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
sub_title = result.find('span', class_='subtitle')
|
|
121
|
+
if sub_title:
|
|
122
|
+
title = sub_title.get_text(strip=True)
|
|
123
|
+
else:
|
|
124
|
+
continue
|
|
125
|
+
|
|
126
|
+
if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
source = urljoin(f'https://{host}', a['href'])
|
|
130
|
+
|
|
131
|
+
mb = 0
|
|
132
|
+
size_text = get_release_field(result, 'Größe')
|
|
133
|
+
if size_text:
|
|
134
|
+
size_item = extract_size(size_text)
|
|
135
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
136
|
+
|
|
137
|
+
if season != "" and episode == "":
|
|
138
|
+
mb = 0 # Size unknown for season packs
|
|
139
|
+
|
|
140
|
+
size = mb * 1024 * 1024
|
|
141
|
+
|
|
142
|
+
password = ''
|
|
143
|
+
mirrors_p = result.find('p', class_='mirrors')
|
|
144
|
+
if mirrors_p:
|
|
145
|
+
strong = mirrors_p.find('strong')
|
|
146
|
+
if strong and strong.get_text(strip=True).lower().startswith('passwort'):
|
|
147
|
+
nxt = strong.next_sibling
|
|
148
|
+
if nxt:
|
|
149
|
+
val = str(nxt).strip()
|
|
150
|
+
if val:
|
|
151
|
+
password = val.split()[0]
|
|
152
|
+
|
|
153
|
+
date_text = ''
|
|
154
|
+
p_meta = result.find('p', class_='meta')
|
|
155
|
+
if p_meta:
|
|
156
|
+
spans = p_meta.find_all('span')
|
|
157
|
+
if len(spans) >= 2:
|
|
158
|
+
date_part = spans[0].get_text(strip=True)
|
|
159
|
+
time_part = spans[1].get_text(strip=True).replace('Uhr', '').strip()
|
|
160
|
+
date_text = f"{date_part} / {time_part}"
|
|
161
|
+
|
|
162
|
+
published = convert_to_rss_date(date_text) if date_text else ""
|
|
163
|
+
|
|
164
|
+
payload = urlsafe_b64encode(
|
|
165
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
|
|
166
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
167
|
+
|
|
168
|
+
releases.append({
|
|
169
|
+
"details": {
|
|
170
|
+
"title": title,
|
|
171
|
+
"hostname": hostname,
|
|
172
|
+
"imdb_id": release_imdb_id,
|
|
173
|
+
"link": link,
|
|
174
|
+
"mirror": mirror,
|
|
175
|
+
"size": size,
|
|
176
|
+
"date": published,
|
|
177
|
+
"source": source
|
|
178
|
+
},
|
|
179
|
+
"type": "protected"
|
|
180
|
+
})
|
|
181
|
+
except Exception as e:
|
|
182
|
+
info(e)
|
|
183
|
+
debug(f"{hostname}: error parsing search result: {e}")
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
elapsed = time.time() - start_time
|
|
187
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
188
|
+
return releases
|
quasarr/search/sources/nx.py
CHANGED
|
@@ -3,44 +3,64 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
import html
|
|
6
|
+
import time
|
|
6
7
|
from base64 import urlsafe_b64encode
|
|
7
8
|
|
|
8
9
|
import requests
|
|
9
10
|
|
|
10
11
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
12
|
+
from quasarr.providers.log import info, debug
|
|
11
13
|
|
|
14
|
+
hostname = "nx"
|
|
15
|
+
supported_mirrors = ["filer"]
|
|
12
16
|
|
|
13
|
-
|
|
17
|
+
|
|
18
|
+
def nx_feed(shared_state, start_time, request_from, mirror=None):
|
|
14
19
|
releases = []
|
|
15
|
-
nx = shared_state.values["config"]("Hostnames").get(
|
|
20
|
+
nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
16
21
|
password = nx
|
|
17
22
|
|
|
18
|
-
if "
|
|
23
|
+
if "lazylibrarian" in request_from.lower():
|
|
24
|
+
category = "ebook"
|
|
25
|
+
elif "radarr" in request_from.lower():
|
|
19
26
|
category = "movie"
|
|
20
27
|
else:
|
|
21
28
|
category = "episode"
|
|
22
29
|
|
|
30
|
+
if mirror and mirror not in supported_mirrors:
|
|
31
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
32
|
+
' Skipping search!')
|
|
33
|
+
return releases
|
|
34
|
+
|
|
23
35
|
url = f'https://{nx}/api/frontend/releases/category/{category}/tag/all/1/51?sort=date'
|
|
24
36
|
headers = {
|
|
25
37
|
'User-Agent': shared_state.values["user_agent"],
|
|
26
38
|
}
|
|
27
39
|
|
|
28
40
|
try:
|
|
29
|
-
response = requests.get(url, headers)
|
|
41
|
+
response = requests.get(url, headers, timeout=10)
|
|
30
42
|
feed = response.json()
|
|
31
43
|
except Exception as e:
|
|
32
|
-
|
|
44
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
33
45
|
return releases
|
|
34
46
|
|
|
35
47
|
items = feed['result']['list']
|
|
36
48
|
for item in items:
|
|
37
49
|
try:
|
|
38
50
|
title = item['name']
|
|
51
|
+
|
|
39
52
|
if title:
|
|
40
53
|
try:
|
|
54
|
+
if 'lazylibrarian' in request_from.lower():
|
|
55
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
56
|
+
title = shared_state.normalize_magazine_title(title)
|
|
57
|
+
|
|
41
58
|
source = f"https://{nx}/release/{item['slug']}"
|
|
59
|
+
imdb_id = item.get('_media', {}).get('imdbid', None)
|
|
42
60
|
mb = shared_state.convert_to_mb(item)
|
|
43
|
-
payload = urlsafe_b64encode(
|
|
61
|
+
payload = urlsafe_b64encode(
|
|
62
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode(
|
|
63
|
+
"utf-8")
|
|
44
64
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
45
65
|
except:
|
|
46
66
|
continue
|
|
@@ -57,8 +77,11 @@ def nx_feed(shared_state, request_from):
|
|
|
57
77
|
|
|
58
78
|
releases.append({
|
|
59
79
|
"details": {
|
|
60
|
-
"title":
|
|
80
|
+
"title": title,
|
|
81
|
+
"hostname": hostname.lower(),
|
|
82
|
+
"imdb_id": imdb_id,
|
|
61
83
|
"link": link,
|
|
84
|
+
"mirror": mirror,
|
|
62
85
|
"size": size,
|
|
63
86
|
"date": published,
|
|
64
87
|
"source": source
|
|
@@ -67,38 +90,49 @@ def nx_feed(shared_state, request_from):
|
|
|
67
90
|
})
|
|
68
91
|
|
|
69
92
|
except Exception as e:
|
|
70
|
-
|
|
93
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
94
|
+
|
|
95
|
+
elapsed_time = time.time() - start_time
|
|
96
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
71
97
|
|
|
72
98
|
return releases
|
|
73
99
|
|
|
74
100
|
|
|
75
|
-
def nx_search(shared_state, request_from,
|
|
101
|
+
def nx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
76
102
|
releases = []
|
|
77
|
-
nx = shared_state.values["config"]("Hostnames").get(
|
|
103
|
+
nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
78
104
|
password = nx
|
|
79
105
|
|
|
80
|
-
if "
|
|
106
|
+
if "lazylibrarian" in request_from.lower():
|
|
107
|
+
valid_type = "ebook"
|
|
108
|
+
elif "radarr" in request_from.lower():
|
|
81
109
|
valid_type = "movie"
|
|
82
110
|
else:
|
|
83
111
|
valid_type = "episode"
|
|
84
112
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
113
|
+
if mirror and mirror not in supported_mirrors:
|
|
114
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
115
|
+
' Skipping search!')
|
|
88
116
|
return releases
|
|
89
117
|
|
|
90
|
-
|
|
118
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
119
|
+
if imdb_id:
|
|
120
|
+
search_string = get_localized_title(shared_state, imdb_id, 'de')
|
|
121
|
+
if not search_string:
|
|
122
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
123
|
+
return releases
|
|
124
|
+
search_string = html.unescape(search_string)
|
|
91
125
|
|
|
92
|
-
url = f'https://{nx}/api/frontend/search/{
|
|
126
|
+
url = f'https://{nx}/api/frontend/search/{search_string}'
|
|
93
127
|
headers = {
|
|
94
128
|
'User-Agent': shared_state.values["user_agent"],
|
|
95
129
|
}
|
|
96
130
|
|
|
97
131
|
try:
|
|
98
|
-
response = requests.get(url, headers)
|
|
132
|
+
response = requests.get(url, headers, timeout=10)
|
|
99
133
|
feed = response.json()
|
|
100
134
|
except Exception as e:
|
|
101
|
-
|
|
135
|
+
info(f"Error loading {hostname.upper()} search: {e}")
|
|
102
136
|
return releases
|
|
103
137
|
|
|
104
138
|
items = feed['result']['releases']
|
|
@@ -107,10 +141,24 @@ def nx_search(shared_state, request_from, imdb_id):
|
|
|
107
141
|
if item['type'] == valid_type:
|
|
108
142
|
title = item['name']
|
|
109
143
|
if title:
|
|
144
|
+
if not shared_state.is_valid_release(title,
|
|
145
|
+
request_from,
|
|
146
|
+
search_string,
|
|
147
|
+
season,
|
|
148
|
+
episode):
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
if 'lazylibrarian' in request_from.lower():
|
|
152
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
153
|
+
title = shared_state.normalize_magazine_title(title)
|
|
154
|
+
|
|
110
155
|
try:
|
|
111
156
|
source = f"https://{nx}/release/{item['slug']}"
|
|
157
|
+
if not imdb_id:
|
|
158
|
+
imdb_id = item.get('_media', {}).get('imdbid', None)
|
|
159
|
+
|
|
112
160
|
mb = shared_state.convert_to_mb(item)
|
|
113
|
-
payload = urlsafe_b64encode(f"{title}|{source}|{mb}|{password}".
|
|
161
|
+
payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".
|
|
114
162
|
encode("utf-8")).decode("utf-8")
|
|
115
163
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
116
164
|
except:
|
|
@@ -128,8 +176,11 @@ def nx_search(shared_state, request_from, imdb_id):
|
|
|
128
176
|
|
|
129
177
|
releases.append({
|
|
130
178
|
"details": {
|
|
131
|
-
"title":
|
|
179
|
+
"title": title,
|
|
180
|
+
"hostname": hostname.lower(),
|
|
181
|
+
"imdb_id": imdb_id,
|
|
132
182
|
"link": link,
|
|
183
|
+
"mirror": mirror,
|
|
133
184
|
"size": size,
|
|
134
185
|
"date": published,
|
|
135
186
|
"source": source
|
|
@@ -138,6 +189,9 @@ def nx_search(shared_state, request_from, imdb_id):
|
|
|
138
189
|
})
|
|
139
190
|
|
|
140
191
|
except Exception as e:
|
|
141
|
-
|
|
192
|
+
info(f"Error parsing {hostname.upper()} search: {e}")
|
|
193
|
+
|
|
194
|
+
elapsed_time = time.time() - start_time
|
|
195
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
142
196
|
|
|
143
197
|
return releases
|