quasarr 2.4.8__py3-none-any.whl → 2.4.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/METADATA +4 -3
- quasarr-2.4.10.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/wd.py
CHANGED
|
@@ -12,9 +12,9 @@ from urllib.parse import quote, quote_plus
|
|
|
12
12
|
import requests
|
|
13
13
|
from bs4 import BeautifulSoup
|
|
14
14
|
|
|
15
|
-
from quasarr.providers.hostname_issues import
|
|
15
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
16
16
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
17
|
-
from quasarr.providers.log import
|
|
17
|
+
from quasarr.providers.log import debug, info
|
|
18
18
|
|
|
19
19
|
hostname = "wd"
|
|
20
20
|
supported_mirrors = ["rapidgator", "ddownload", "katfile", "fikper", "turbobit"]
|
|
@@ -47,15 +47,15 @@ def extract_size(text):
|
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
def _parse_rows(
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
50
|
+
soup,
|
|
51
|
+
shared_state,
|
|
52
|
+
url_base,
|
|
53
|
+
password,
|
|
54
|
+
mirror_filter,
|
|
55
|
+
request_from=None,
|
|
56
|
+
search_string=None,
|
|
57
|
+
season=None,
|
|
58
|
+
episode=None,
|
|
59
59
|
):
|
|
60
60
|
"""
|
|
61
61
|
Walk the <table> rows, extract one release per row.
|
|
@@ -75,7 +75,7 @@ def _parse_rows(
|
|
|
75
75
|
releases = []
|
|
76
76
|
is_search = search_string is not None
|
|
77
77
|
|
|
78
|
-
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime(
|
|
78
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
|
|
79
79
|
|
|
80
80
|
for tr in soup.select("table.table tbody tr.lh-sm"):
|
|
81
81
|
try:
|
|
@@ -93,28 +93,30 @@ def _parse_rows(
|
|
|
93
93
|
|
|
94
94
|
# search context contains non-video releases (ebooks, games, etc.)
|
|
95
95
|
if is_search:
|
|
96
|
-
if not shared_state.is_valid_release(
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
season,
|
|
100
|
-
episode):
|
|
96
|
+
if not shared_state.is_valid_release(
|
|
97
|
+
title, request_from, search_string, season, episode
|
|
98
|
+
):
|
|
101
99
|
continue
|
|
102
100
|
|
|
103
|
-
if
|
|
101
|
+
if "lazylibrarian" in request_from.lower():
|
|
104
102
|
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
105
103
|
title = shared_state.normalize_magazine_title(title)
|
|
106
104
|
else:
|
|
107
105
|
# drop .XXX. unless user explicitly searched xxx
|
|
108
|
-
if XXX_REGEX.search(title) and
|
|
106
|
+
if XXX_REGEX.search(title) and "xxx" not in search_string.lower():
|
|
109
107
|
continue
|
|
110
108
|
# require resolution/codec
|
|
111
|
-
if not (
|
|
109
|
+
if not (
|
|
110
|
+
RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)
|
|
111
|
+
):
|
|
112
112
|
continue
|
|
113
113
|
# require no spaces in title
|
|
114
114
|
if " " in title:
|
|
115
115
|
continue
|
|
116
116
|
|
|
117
|
-
hoster_names = tr.find("span", class_="button-warezkorb")[
|
|
117
|
+
hoster_names = tr.find("span", class_="button-warezkorb")[
|
|
118
|
+
"data-hoster-names"
|
|
119
|
+
]
|
|
118
120
|
mirrors = [m.strip().lower() for m in hoster_names.split(",")]
|
|
119
121
|
valid = [m for m in mirrors if m in supported_mirrors]
|
|
120
122
|
if not valid or (mirror_filter and mirror_filter not in valid):
|
|
@@ -131,21 +133,25 @@ def _parse_rows(
|
|
|
131
133
|
payload = urlsafe_b64encode(
|
|
132
134
|
f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}|{hostname}".encode()
|
|
133
135
|
).decode()
|
|
134
|
-
download_link =
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
"
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
136
|
+
download_link = (
|
|
137
|
+
f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
releases.append(
|
|
141
|
+
{
|
|
142
|
+
"details": {
|
|
143
|
+
"title": title,
|
|
144
|
+
"hostname": hostname,
|
|
145
|
+
"imdb_id": imdb_id,
|
|
146
|
+
"link": download_link,
|
|
147
|
+
"mirror": mirror_filter,
|
|
148
|
+
"size": size_bytes,
|
|
149
|
+
"date": published,
|
|
150
|
+
"source": source,
|
|
151
|
+
},
|
|
152
|
+
"type": "protected",
|
|
153
|
+
}
|
|
154
|
+
)
|
|
149
155
|
except Exception as e:
|
|
150
156
|
debug(f"Error parsing {hostname.upper()} row: {e}")
|
|
151
157
|
continue
|
|
@@ -164,7 +170,7 @@ def wd_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
164
170
|
feed_type = "Serien"
|
|
165
171
|
|
|
166
172
|
url = f"https://{wd}/{feed_type}"
|
|
167
|
-
headers = {
|
|
173
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
168
174
|
try:
|
|
169
175
|
r = requests.get(url, headers=headers, timeout=10)
|
|
170
176
|
r.raise_for_status()
|
|
@@ -172,7 +178,9 @@ def wd_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
172
178
|
releases = _parse_rows(soup, shared_state, wd, password, mirror)
|
|
173
179
|
except Exception as e:
|
|
174
180
|
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
175
|
-
mark_hostname_issue(
|
|
181
|
+
mark_hostname_issue(
|
|
182
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
183
|
+
)
|
|
176
184
|
releases = []
|
|
177
185
|
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
178
186
|
|
|
@@ -181,14 +189,22 @@ def wd_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
181
189
|
return releases
|
|
182
190
|
|
|
183
191
|
|
|
184
|
-
def wd_search(
|
|
192
|
+
def wd_search(
|
|
193
|
+
shared_state,
|
|
194
|
+
start_time,
|
|
195
|
+
request_from,
|
|
196
|
+
search_string,
|
|
197
|
+
mirror=None,
|
|
198
|
+
season=None,
|
|
199
|
+
episode=None,
|
|
200
|
+
):
|
|
185
201
|
releases = []
|
|
186
202
|
wd = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
187
203
|
password = wd
|
|
188
204
|
|
|
189
205
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
190
206
|
if imdb_id:
|
|
191
|
-
search_string = get_localized_title(shared_state, imdb_id,
|
|
207
|
+
search_string = get_localized_title(shared_state, imdb_id, "de")
|
|
192
208
|
if not search_string:
|
|
193
209
|
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
194
210
|
return releases
|
|
@@ -196,21 +212,28 @@ def wd_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
196
212
|
|
|
197
213
|
q = quote_plus(search_string)
|
|
198
214
|
url = f"https://{wd}/search?q={q}"
|
|
199
|
-
headers = {
|
|
215
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
200
216
|
|
|
201
217
|
try:
|
|
202
218
|
r = requests.get(url, headers=headers, timeout=10)
|
|
203
219
|
r.raise_for_status()
|
|
204
220
|
soup = BeautifulSoup(r.content, "html.parser")
|
|
205
221
|
releases = _parse_rows(
|
|
206
|
-
soup,
|
|
222
|
+
soup,
|
|
223
|
+
shared_state,
|
|
224
|
+
wd,
|
|
225
|
+
password,
|
|
226
|
+
mirror,
|
|
207
227
|
request_from=request_from,
|
|
208
228
|
search_string=search_string,
|
|
209
|
-
season=season,
|
|
229
|
+
season=season,
|
|
230
|
+
episode=episode,
|
|
210
231
|
)
|
|
211
232
|
except Exception as e:
|
|
212
233
|
info(f"Error loading {hostname.upper()} search: {e}")
|
|
213
|
-
mark_hostname_issue(
|
|
234
|
+
mark_hostname_issue(
|
|
235
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
236
|
+
)
|
|
214
237
|
releases = []
|
|
215
238
|
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
216
239
|
|