quasarr 2.4.7__py3-none-any.whl → 2.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
- quasarr-2.4.9.dist-info/RECORD +81 -0
- quasarr-2.4.7.dist-info/RECORD +0 -81
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/dt.py
CHANGED
|
@@ -7,15 +7,15 @@ import html
|
|
|
7
7
|
import re
|
|
8
8
|
import time
|
|
9
9
|
from base64 import urlsafe_b64encode
|
|
10
|
-
from datetime import
|
|
10
|
+
from datetime import timedelta, timezone
|
|
11
11
|
from urllib.parse import quote_plus
|
|
12
12
|
|
|
13
13
|
import requests
|
|
14
14
|
from bs4 import BeautifulSoup
|
|
15
15
|
|
|
16
|
-
from quasarr.providers.hostname_issues import
|
|
16
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
17
17
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
18
|
-
from quasarr.providers.log import
|
|
18
|
+
from quasarr.providers.log import debug, info
|
|
19
19
|
|
|
20
20
|
hostname = "dt"
|
|
21
21
|
supported_mirrors = ["rapidgator", "nitroflare", "ddownload"]
|
|
@@ -32,19 +32,19 @@ def extract_size(text):
|
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
def parse_published_datetime(article):
|
|
35
|
-
date_box = article.find(
|
|
36
|
-
mon = date_box.find(
|
|
37
|
-
day = date_box.find(
|
|
38
|
-
year = date_box.find(
|
|
39
|
-
month_num = datetime.datetime.strptime(mon,
|
|
35
|
+
date_box = article.find("div", class_="mr-2 shadow-sm1 text-center")
|
|
36
|
+
mon = date_box.find("small").text.strip()
|
|
37
|
+
day = date_box.find("h4").text.strip()
|
|
38
|
+
year = date_box.find("h6").text.strip()
|
|
39
|
+
month_num = datetime.datetime.strptime(mon, "%b").month
|
|
40
40
|
|
|
41
|
-
time_icon = article.select_one(
|
|
41
|
+
time_icon = article.select_one("i.fa-clock-o")
|
|
42
42
|
if time_icon:
|
|
43
43
|
# its parent <span> contains e.g. "19:12"
|
|
44
44
|
raw = time_icon.parent.get_text(strip=True)
|
|
45
|
-
m = re.search(r
|
|
45
|
+
m = re.search(r"(\d{1,2}:\d{2})", raw)
|
|
46
46
|
if m:
|
|
47
|
-
hh, mm = map(int, m.group(1).split(
|
|
47
|
+
hh, mm = map(int, m.group(1).split(":"))
|
|
48
48
|
else:
|
|
49
49
|
hh, mm = 0, 0
|
|
50
50
|
else:
|
|
@@ -69,39 +69,50 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
69
69
|
feed_type = "media/tv-show/"
|
|
70
70
|
|
|
71
71
|
if mirror and mirror not in supported_mirrors:
|
|
72
|
-
debug(
|
|
72
|
+
debug(
|
|
73
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!'
|
|
74
|
+
)
|
|
73
75
|
return releases
|
|
74
76
|
|
|
75
|
-
url = f
|
|
76
|
-
headers = {
|
|
77
|
+
url = f"https://{dt}/{feed_type}"
|
|
78
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
77
79
|
|
|
78
80
|
try:
|
|
79
81
|
r = requests.get(url, headers=headers, timeout=30)
|
|
80
82
|
r.raise_for_status()
|
|
81
83
|
feed = BeautifulSoup(r.content, "html.parser")
|
|
82
84
|
|
|
83
|
-
for article in feed.find_all(
|
|
85
|
+
for article in feed.find_all("article"):
|
|
84
86
|
try:
|
|
85
|
-
link_tag = article.select_one(
|
|
87
|
+
link_tag = article.select_one("h4.font-weight-bold a")
|
|
86
88
|
if not link_tag:
|
|
87
|
-
debug(
|
|
89
|
+
debug(
|
|
90
|
+
f"Link tag not found in article: {article} at {hostname.upper()}"
|
|
91
|
+
)
|
|
88
92
|
continue
|
|
89
93
|
|
|
90
|
-
source = link_tag[
|
|
94
|
+
source = link_tag["href"]
|
|
91
95
|
title_raw = link_tag.text.strip()
|
|
92
|
-
title =
|
|
93
|
-
|
|
94
|
-
|
|
96
|
+
title = (
|
|
97
|
+
title_raw.replace(" - ", "-")
|
|
98
|
+
.replace(" ", ".")
|
|
99
|
+
.replace("(", "")
|
|
100
|
+
.replace(")", "")
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
if "lazylibrarian" in request_from.lower():
|
|
95
104
|
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
96
105
|
title = shared_state.normalize_magazine_title(title)
|
|
97
106
|
|
|
98
107
|
try:
|
|
99
|
-
imdb_id = re.search(r
|
|
108
|
+
imdb_id = re.search(r"tt\d+", str(article)).group()
|
|
100
109
|
except:
|
|
101
110
|
imdb_id = None
|
|
102
111
|
|
|
103
|
-
body_text = article.find(
|
|
104
|
-
size_match = re.search(
|
|
112
|
+
body_text = article.find("div", class_="card-body").get_text(" ")
|
|
113
|
+
size_match = re.search(
|
|
114
|
+
r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE
|
|
115
|
+
)
|
|
105
116
|
if not size_match:
|
|
106
117
|
debug(f"Size not found in article: {article} at {hostname.upper()}")
|
|
107
118
|
continue
|
|
@@ -113,32 +124,40 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
113
124
|
published = parse_published_datetime(article)
|
|
114
125
|
|
|
115
126
|
payload = urlsafe_b64encode(
|
|
116
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
127
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
128
|
+
"utf-8"
|
|
129
|
+
)
|
|
117
130
|
).decode("utf-8")
|
|
118
131
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
119
132
|
|
|
120
133
|
except Exception as e:
|
|
121
134
|
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
122
|
-
mark_hostname_issue(
|
|
135
|
+
mark_hostname_issue(
|
|
136
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
137
|
+
)
|
|
123
138
|
continue
|
|
124
139
|
|
|
125
|
-
releases.append(
|
|
126
|
-
|
|
127
|
-
"
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
140
|
+
releases.append(
|
|
141
|
+
{
|
|
142
|
+
"details": {
|
|
143
|
+
"title": title,
|
|
144
|
+
"hostname": hostname.lower(),
|
|
145
|
+
"imdb_id": imdb_id,
|
|
146
|
+
"link": link,
|
|
147
|
+
"mirror": mirror,
|
|
148
|
+
"size": size,
|
|
149
|
+
"date": published,
|
|
150
|
+
"source": source,
|
|
151
|
+
},
|
|
152
|
+
"type": "protected",
|
|
153
|
+
}
|
|
154
|
+
)
|
|
138
155
|
|
|
139
156
|
except Exception as e:
|
|
140
157
|
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
141
|
-
mark_hostname_issue(
|
|
158
|
+
mark_hostname_issue(
|
|
159
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
160
|
+
)
|
|
142
161
|
|
|
143
162
|
elapsed = time.time() - start_time
|
|
144
163
|
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
@@ -148,7 +167,15 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
148
167
|
return releases
|
|
149
168
|
|
|
150
169
|
|
|
151
|
-
def dt_search(
|
|
170
|
+
def dt_search(
|
|
171
|
+
shared_state,
|
|
172
|
+
start_time,
|
|
173
|
+
request_from,
|
|
174
|
+
search_string,
|
|
175
|
+
mirror=None,
|
|
176
|
+
season=None,
|
|
177
|
+
episode=None,
|
|
178
|
+
):
|
|
152
179
|
releases = []
|
|
153
180
|
dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
154
181
|
password = dt
|
|
@@ -161,13 +188,15 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
161
188
|
cat_id = "64"
|
|
162
189
|
|
|
163
190
|
if mirror and mirror not in supported_mirrors:
|
|
164
|
-
debug(
|
|
191
|
+
debug(
|
|
192
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!'
|
|
193
|
+
)
|
|
165
194
|
return releases
|
|
166
195
|
|
|
167
196
|
try:
|
|
168
197
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
169
198
|
if imdb_id:
|
|
170
|
-
search_string = get_localized_title(shared_state, imdb_id,
|
|
199
|
+
search_string = get_localized_title(shared_state, imdb_id, "en")
|
|
171
200
|
if not search_string:
|
|
172
201
|
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
173
202
|
return releases
|
|
@@ -207,21 +236,19 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
207
236
|
continue
|
|
208
237
|
source = link_tag["href"]
|
|
209
238
|
title_raw = link_tag.text.strip()
|
|
210
|
-
title = (
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
if not shared_state.is_valid_release(
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
season,
|
|
221
|
-
episode):
|
|
239
|
+
title = (
|
|
240
|
+
title_raw.replace(" - ", "-")
|
|
241
|
+
.replace(" ", ".")
|
|
242
|
+
.replace("(", "")
|
|
243
|
+
.replace(")", "")
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
if not shared_state.is_valid_release(
|
|
247
|
+
title, request_from, search_string, season, episode
|
|
248
|
+
):
|
|
222
249
|
continue
|
|
223
250
|
|
|
224
|
-
if
|
|
251
|
+
if "lazylibrarian" in request_from.lower():
|
|
225
252
|
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
226
253
|
title = shared_state.normalize_magazine_title(title)
|
|
227
254
|
|
|
@@ -231,7 +258,9 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
231
258
|
imdb_id = None
|
|
232
259
|
|
|
233
260
|
body_text = article.find("div", class_="card-body").get_text(" ")
|
|
234
|
-
m = re.search(
|
|
261
|
+
m = re.search(
|
|
262
|
+
r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE
|
|
263
|
+
)
|
|
235
264
|
if not m:
|
|
236
265
|
debug(f"Size not found in search-article: {title_raw}")
|
|
237
266
|
continue
|
|
@@ -242,33 +271,40 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
242
271
|
published = parse_published_datetime(article)
|
|
243
272
|
|
|
244
273
|
payload = urlsafe_b64encode(
|
|
245
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}"
|
|
246
|
-
|
|
274
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode(
|
|
275
|
+
"utf-8"
|
|
276
|
+
)
|
|
247
277
|
).decode("utf-8")
|
|
248
278
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
249
279
|
|
|
250
280
|
except Exception as e:
|
|
251
281
|
info(f"Error parsing {hostname.upper()} search item: {e}")
|
|
252
|
-
mark_hostname_issue(
|
|
282
|
+
mark_hostname_issue(
|
|
283
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
284
|
+
)
|
|
253
285
|
continue
|
|
254
286
|
|
|
255
|
-
releases.append(
|
|
256
|
-
|
|
257
|
-
"
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
287
|
+
releases.append(
|
|
288
|
+
{
|
|
289
|
+
"details": {
|
|
290
|
+
"title": title,
|
|
291
|
+
"hostname": hostname.lower(),
|
|
292
|
+
"imdb_id": imdb_id,
|
|
293
|
+
"link": link,
|
|
294
|
+
"mirror": mirror,
|
|
295
|
+
"size": size,
|
|
296
|
+
"date": published,
|
|
297
|
+
"source": source,
|
|
298
|
+
},
|
|
299
|
+
"type": "protected",
|
|
300
|
+
}
|
|
301
|
+
)
|
|
268
302
|
|
|
269
303
|
except Exception as e:
|
|
270
304
|
info(f"Error loading {hostname.upper()} search page: {e}")
|
|
271
|
-
mark_hostname_issue(
|
|
305
|
+
mark_hostname_issue(
|
|
306
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
307
|
+
)
|
|
272
308
|
|
|
273
309
|
elapsed = time.time() - start_time
|
|
274
310
|
debug(f"Search time: {elapsed:.2f}s ({hostname})")
|
quasarr/search/sources/dw.py
CHANGED
|
@@ -10,26 +10,50 @@ from base64 import urlsafe_b64encode
|
|
|
10
10
|
import requests
|
|
11
11
|
from bs4 import BeautifulSoup
|
|
12
12
|
|
|
13
|
-
from quasarr.providers.hostname_issues import
|
|
14
|
-
from quasarr.providers.log import
|
|
13
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
14
|
+
from quasarr.providers.log import debug, info
|
|
15
15
|
|
|
16
16
|
hostname = "dw"
|
|
17
17
|
supported_mirrors = ["1fichier", "rapidgator", "ddownload", "katfile"]
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def convert_to_rss_date(date_str):
|
|
21
|
-
german_months = [
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
21
|
+
german_months = [
|
|
22
|
+
"Januar",
|
|
23
|
+
"Februar",
|
|
24
|
+
"März",
|
|
25
|
+
"April",
|
|
26
|
+
"Mai",
|
|
27
|
+
"Juni",
|
|
28
|
+
"Juli",
|
|
29
|
+
"August",
|
|
30
|
+
"September",
|
|
31
|
+
"Oktober",
|
|
32
|
+
"November",
|
|
33
|
+
"Dezember",
|
|
34
|
+
]
|
|
35
|
+
english_months = [
|
|
36
|
+
"January",
|
|
37
|
+
"February",
|
|
38
|
+
"March",
|
|
39
|
+
"April",
|
|
40
|
+
"May",
|
|
41
|
+
"June",
|
|
42
|
+
"July",
|
|
43
|
+
"August",
|
|
44
|
+
"September",
|
|
45
|
+
"October",
|
|
46
|
+
"November",
|
|
47
|
+
"December",
|
|
48
|
+
]
|
|
25
49
|
|
|
26
50
|
for german, english in zip(german_months, english_months):
|
|
27
51
|
if german in date_str:
|
|
28
52
|
date_str = date_str.replace(german, english)
|
|
29
53
|
break
|
|
30
54
|
|
|
31
|
-
parsed_date = datetime.datetime.strptime(date_str,
|
|
32
|
-
rss_date = parsed_date.strftime(
|
|
55
|
+
parsed_date = datetime.datetime.strptime(date_str, "%d. %B %Y / %H:%M")
|
|
56
|
+
rss_date = parsed_date.strftime("%a, %d %b %Y %H:%M:%S %z")
|
|
33
57
|
|
|
34
58
|
return rss_date
|
|
35
59
|
|
|
@@ -59,7 +83,9 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
59
83
|
password = dw
|
|
60
84
|
|
|
61
85
|
if not "arr" in request_from.lower():
|
|
62
|
-
debug(
|
|
86
|
+
debug(
|
|
87
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
88
|
+
)
|
|
63
89
|
return releases
|
|
64
90
|
|
|
65
91
|
if "Radarr" in request_from:
|
|
@@ -68,20 +94,22 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
68
94
|
feed_type = "videos/serien/"
|
|
69
95
|
|
|
70
96
|
if mirror and mirror not in supported_mirrors:
|
|
71
|
-
debug(
|
|
72
|
-
|
|
97
|
+
debug(
|
|
98
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
99
|
+
" Skipping search!"
|
|
100
|
+
)
|
|
73
101
|
return releases
|
|
74
102
|
|
|
75
|
-
url = f
|
|
103
|
+
url = f"https://{dw}/{feed_type}"
|
|
76
104
|
headers = {
|
|
77
|
-
|
|
105
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
78
106
|
}
|
|
79
107
|
|
|
80
108
|
try:
|
|
81
109
|
r = requests.get(url, headers=headers, timeout=30)
|
|
82
110
|
r.raise_for_status()
|
|
83
111
|
feed = BeautifulSoup(r.content, "html.parser")
|
|
84
|
-
articles = feed.find_all(
|
|
112
|
+
articles = feed.find_all("h4")
|
|
85
113
|
|
|
86
114
|
for article in articles:
|
|
87
115
|
try:
|
|
@@ -89,7 +117,7 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
89
117
|
title = article.a.text.strip()
|
|
90
118
|
|
|
91
119
|
try:
|
|
92
|
-
imdb_id = re.search(r
|
|
120
|
+
imdb_id = re.search(r"tt\d+", str(article)).group()
|
|
93
121
|
except:
|
|
94
122
|
imdb_id = None
|
|
95
123
|
|
|
@@ -97,33 +125,44 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
97
125
|
size_item = extract_size(size_info)
|
|
98
126
|
mb = shared_state.convert_to_mb(size_item)
|
|
99
127
|
size = mb * 1024 * 1024
|
|
100
|
-
date = article.parent.parent.find(
|
|
128
|
+
date = article.parent.parent.find(
|
|
129
|
+
"span", {"class": "date updated"}
|
|
130
|
+
).text.strip()
|
|
101
131
|
published = convert_to_rss_date(date)
|
|
102
132
|
payload = urlsafe_b64encode(
|
|
103
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
133
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
134
|
+
"utf-8"
|
|
135
|
+
)
|
|
136
|
+
).decode("utf-8")
|
|
104
137
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
105
138
|
except Exception as e:
|
|
106
139
|
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
107
|
-
mark_hostname_issue(
|
|
140
|
+
mark_hostname_issue(
|
|
141
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
142
|
+
)
|
|
108
143
|
continue
|
|
109
144
|
|
|
110
|
-
releases.append(
|
|
111
|
-
|
|
112
|
-
"
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
145
|
+
releases.append(
|
|
146
|
+
{
|
|
147
|
+
"details": {
|
|
148
|
+
"title": title,
|
|
149
|
+
"hostname": hostname.lower(),
|
|
150
|
+
"imdb_id": imdb_id,
|
|
151
|
+
"link": link,
|
|
152
|
+
"mirror": mirror,
|
|
153
|
+
"size": size,
|
|
154
|
+
"date": published,
|
|
155
|
+
"source": source,
|
|
156
|
+
},
|
|
157
|
+
"type": "protected",
|
|
158
|
+
}
|
|
159
|
+
)
|
|
123
160
|
|
|
124
161
|
except Exception as e:
|
|
125
162
|
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
126
|
-
mark_hostname_issue(
|
|
163
|
+
mark_hostname_issue(
|
|
164
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
165
|
+
)
|
|
127
166
|
|
|
128
167
|
elapsed_time = time.time() - start_time
|
|
129
168
|
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
@@ -133,13 +172,23 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
133
172
|
return releases
|
|
134
173
|
|
|
135
174
|
|
|
136
|
-
def dw_search(
|
|
175
|
+
def dw_search(
|
|
176
|
+
shared_state,
|
|
177
|
+
start_time,
|
|
178
|
+
request_from,
|
|
179
|
+
search_string,
|
|
180
|
+
mirror=None,
|
|
181
|
+
season=None,
|
|
182
|
+
episode=None,
|
|
183
|
+
):
|
|
137
184
|
releases = []
|
|
138
185
|
dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
139
186
|
password = dw
|
|
140
187
|
|
|
141
188
|
if not "arr" in request_from.lower():
|
|
142
|
-
debug(
|
|
189
|
+
debug(
|
|
190
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
191
|
+
)
|
|
143
192
|
return releases
|
|
144
193
|
|
|
145
194
|
if "Radarr" in request_from:
|
|
@@ -148,23 +197,27 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
148
197
|
search_type = "videocategory=serien"
|
|
149
198
|
|
|
150
199
|
if mirror and mirror not in ["1fichier", "rapidgator", "ddownload", "katfile"]:
|
|
151
|
-
debug(
|
|
200
|
+
debug(
|
|
201
|
+
f'Mirror "{mirror}" not not supported by {hostname.upper()}. Skipping search!'
|
|
202
|
+
)
|
|
152
203
|
return releases
|
|
153
204
|
|
|
154
|
-
url = f
|
|
205
|
+
url = f"https://{dw}/?s={search_string}&{search_type}"
|
|
155
206
|
headers = {
|
|
156
|
-
|
|
207
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
157
208
|
}
|
|
158
209
|
|
|
159
210
|
try:
|
|
160
211
|
r = requests.get(url, headers=headers, timeout=10)
|
|
161
212
|
r.raise_for_status()
|
|
162
213
|
search = BeautifulSoup(r.content, "html.parser")
|
|
163
|
-
results = search.find_all(
|
|
214
|
+
results = search.find_all("h4")
|
|
164
215
|
|
|
165
216
|
except Exception as e:
|
|
166
217
|
info(f"Error loading {hostname.upper()} search feed: {e}")
|
|
167
|
-
mark_hostname_issue(
|
|
218
|
+
mark_hostname_issue(
|
|
219
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
220
|
+
)
|
|
168
221
|
return releases
|
|
169
222
|
|
|
170
223
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
@@ -174,16 +227,14 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
174
227
|
try:
|
|
175
228
|
title = result.a.text.strip()
|
|
176
229
|
|
|
177
|
-
if not shared_state.is_valid_release(
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
season,
|
|
181
|
-
episode):
|
|
230
|
+
if not shared_state.is_valid_release(
|
|
231
|
+
title, request_from, search_string, season, episode
|
|
232
|
+
):
|
|
182
233
|
continue
|
|
183
234
|
|
|
184
235
|
if not imdb_id:
|
|
185
236
|
try:
|
|
186
|
-
imdb_id = re.search(r
|
|
237
|
+
imdb_id = re.search(r"tt\d+", str(result)).group()
|
|
187
238
|
except:
|
|
188
239
|
imdb_id = None
|
|
189
240
|
|
|
@@ -192,29 +243,38 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
192
243
|
size_item = extract_size(size_info)
|
|
193
244
|
mb = shared_state.convert_to_mb(size_item)
|
|
194
245
|
size = mb * 1024 * 1024
|
|
195
|
-
date = result.parent.parent.find(
|
|
246
|
+
date = result.parent.parent.find(
|
|
247
|
+
"span", {"class": "date updated"}
|
|
248
|
+
).text.strip()
|
|
196
249
|
published = convert_to_rss_date(date)
|
|
197
250
|
payload = urlsafe_b64encode(
|
|
198
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
251
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
252
|
+
"utf-8"
|
|
253
|
+
)
|
|
254
|
+
).decode("utf-8")
|
|
199
255
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
200
256
|
except Exception as e:
|
|
201
257
|
info(f"Error parsing {hostname.upper()} search: {e}")
|
|
202
|
-
mark_hostname_issue(
|
|
258
|
+
mark_hostname_issue(
|
|
259
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
260
|
+
)
|
|
203
261
|
continue
|
|
204
262
|
|
|
205
|
-
releases.append(
|
|
206
|
-
|
|
207
|
-
"
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
263
|
+
releases.append(
|
|
264
|
+
{
|
|
265
|
+
"details": {
|
|
266
|
+
"title": title,
|
|
267
|
+
"hostname": hostname.lower(),
|
|
268
|
+
"imdb_id": imdb_id,
|
|
269
|
+
"link": link,
|
|
270
|
+
"mirror": mirror,
|
|
271
|
+
"size": size,
|
|
272
|
+
"date": published,
|
|
273
|
+
"source": source,
|
|
274
|
+
},
|
|
275
|
+
"type": "protected",
|
|
276
|
+
}
|
|
277
|
+
)
|
|
218
278
|
|
|
219
279
|
elapsed_time = time.time() - start_time
|
|
220
280
|
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|