quasarr 2.4.8__py3-none-any.whl → 2.4.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/METADATA +4 -3
- quasarr-2.4.10.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/wx.py
CHANGED
|
@@ -10,14 +10,15 @@ from base64 import urlsafe_b64encode
|
|
|
10
10
|
from datetime import datetime
|
|
11
11
|
|
|
12
12
|
import requests
|
|
13
|
-
from bs4 import BeautifulSoup
|
|
14
|
-
from bs4 import XMLParsedAsHTMLWarning
|
|
13
|
+
from bs4 import BeautifulSoup, XMLParsedAsHTMLWarning
|
|
15
14
|
|
|
16
|
-
from quasarr.providers.hostname_issues import
|
|
15
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
17
16
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
18
|
-
from quasarr.providers.log import
|
|
17
|
+
from quasarr.providers.log import debug, info
|
|
19
18
|
|
|
20
|
-
warnings.filterwarnings(
|
|
19
|
+
warnings.filterwarnings(
|
|
20
|
+
"ignore", category=XMLParsedAsHTMLWarning
|
|
21
|
+
) # we dont want to use lxml
|
|
21
22
|
|
|
22
23
|
hostname = "wx"
|
|
23
24
|
supported_mirrors = []
|
|
@@ -31,23 +32,25 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
31
32
|
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
32
33
|
|
|
33
34
|
if "lazylibrarian" in request_from.lower():
|
|
34
|
-
debug(
|
|
35
|
+
debug(
|
|
36
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
37
|
+
)
|
|
35
38
|
return releases
|
|
36
39
|
|
|
37
|
-
rss_url = f
|
|
40
|
+
rss_url = f"https://{host}/rss"
|
|
38
41
|
headers = {
|
|
39
|
-
|
|
42
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
40
43
|
}
|
|
41
44
|
|
|
42
45
|
try:
|
|
43
46
|
r = requests.get(rss_url, headers=headers, timeout=10)
|
|
44
47
|
r.raise_for_status()
|
|
45
48
|
|
|
46
|
-
soup = BeautifulSoup(r.content,
|
|
47
|
-
items = soup.find_all(
|
|
49
|
+
soup = BeautifulSoup(r.content, "html.parser")
|
|
50
|
+
items = soup.find_all("entry")
|
|
48
51
|
|
|
49
52
|
if not items:
|
|
50
|
-
items = soup.find_all(
|
|
53
|
+
items = soup.find_all("item")
|
|
51
54
|
|
|
52
55
|
if not items:
|
|
53
56
|
info(f"{hostname.upper()}: No entries found in RSS feed")
|
|
@@ -57,7 +60,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
57
60
|
|
|
58
61
|
for item in items:
|
|
59
62
|
try:
|
|
60
|
-
title_tag = item.find(
|
|
63
|
+
title_tag = item.find("title")
|
|
61
64
|
if not title_tag:
|
|
62
65
|
continue
|
|
63
66
|
|
|
@@ -66,14 +69,14 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
66
69
|
continue
|
|
67
70
|
|
|
68
71
|
title = html.unescape(title)
|
|
69
|
-
title = title.replace(
|
|
70
|
-
title = title.replace(
|
|
72
|
+
title = title.replace("]]>", "").replace("<![CDATA[", "")
|
|
73
|
+
title = title.replace(" ", ".")
|
|
71
74
|
|
|
72
|
-
link_tag = item.find(
|
|
73
|
-
if link_tag and link_tag.has_attr(
|
|
74
|
-
source = link_tag[
|
|
75
|
+
link_tag = item.find("link", rel="alternate")
|
|
76
|
+
if link_tag and link_tag.has_attr("href"):
|
|
77
|
+
source = link_tag["href"]
|
|
75
78
|
else:
|
|
76
|
-
link_tag = item.find(
|
|
79
|
+
link_tag = item.find("link")
|
|
77
80
|
if not link_tag:
|
|
78
81
|
continue
|
|
79
82
|
source = link_tag.get_text(strip=True)
|
|
@@ -81,7 +84,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
81
84
|
if not source:
|
|
82
85
|
continue
|
|
83
86
|
|
|
84
|
-
pub_date = item.find(
|
|
87
|
+
pub_date = item.find("updated") or item.find("pubDate")
|
|
85
88
|
if pub_date:
|
|
86
89
|
published = pub_date.get_text(strip=True)
|
|
87
90
|
else:
|
|
@@ -94,23 +97,27 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
94
97
|
password = host.upper()
|
|
95
98
|
|
|
96
99
|
payload = urlsafe_b64encode(
|
|
97
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
|
|
100
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
|
|
101
|
+
"utf-8"
|
|
102
|
+
)
|
|
98
103
|
).decode("utf-8")
|
|
99
104
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
100
105
|
|
|
101
|
-
releases.append(
|
|
102
|
-
|
|
103
|
-
"
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
106
|
+
releases.append(
|
|
107
|
+
{
|
|
108
|
+
"details": {
|
|
109
|
+
"title": title,
|
|
110
|
+
"hostname": hostname,
|
|
111
|
+
"imdb_id": imdb_id,
|
|
112
|
+
"link": link,
|
|
113
|
+
"mirror": mirror,
|
|
114
|
+
"size": size,
|
|
115
|
+
"date": published,
|
|
116
|
+
"source": source,
|
|
117
|
+
},
|
|
118
|
+
"type": "protected",
|
|
119
|
+
}
|
|
120
|
+
)
|
|
114
121
|
|
|
115
122
|
except Exception as e:
|
|
116
123
|
debug(f"{hostname.upper()}: error parsing RSS entry: {e}")
|
|
@@ -118,7 +125,9 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
118
125
|
|
|
119
126
|
except Exception as e:
|
|
120
127
|
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
121
|
-
mark_hostname_issue(
|
|
128
|
+
mark_hostname_issue(
|
|
129
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
130
|
+
)
|
|
122
131
|
return releases
|
|
123
132
|
|
|
124
133
|
elapsed_time = time.time() - start_time
|
|
@@ -129,7 +138,15 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
129
138
|
return releases
|
|
130
139
|
|
|
131
140
|
|
|
132
|
-
def wx_search(
|
|
141
|
+
def wx_search(
|
|
142
|
+
shared_state,
|
|
143
|
+
start_time,
|
|
144
|
+
request_from,
|
|
145
|
+
search_string,
|
|
146
|
+
mirror=None,
|
|
147
|
+
season=None,
|
|
148
|
+
episode=None,
|
|
149
|
+
):
|
|
133
150
|
"""
|
|
134
151
|
Search using internal API.
|
|
135
152
|
Deduplicates results by fulltitle - each unique release appears only once.
|
|
@@ -138,48 +155,52 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
138
155
|
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
139
156
|
|
|
140
157
|
if "lazylibrarian" in request_from.lower():
|
|
141
|
-
debug(
|
|
158
|
+
debug(
|
|
159
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
160
|
+
)
|
|
142
161
|
return releases
|
|
143
162
|
|
|
144
163
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
145
164
|
if imdb_id:
|
|
146
165
|
debug(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
|
|
147
|
-
title = get_localized_title(shared_state, imdb_id,
|
|
166
|
+
title = get_localized_title(shared_state, imdb_id, "de")
|
|
148
167
|
if not title:
|
|
149
168
|
debug(f"{hostname.upper()}: no title for IMDb {imdb_id}")
|
|
150
169
|
return releases
|
|
151
|
-
debug(
|
|
170
|
+
debug(
|
|
171
|
+
f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'"
|
|
172
|
+
)
|
|
152
173
|
search_string = html.unescape(title)
|
|
153
174
|
else:
|
|
154
175
|
debug(f"{hostname.upper()}: Using search string directly: '{search_string}'")
|
|
155
176
|
|
|
156
|
-
api_url = f
|
|
177
|
+
api_url = f"https://api.{host}/start/search"
|
|
157
178
|
|
|
158
179
|
headers = {
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
180
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
181
|
+
"Accept": "application/json, text/plain, */*",
|
|
182
|
+
"Referer": f"https://{host}/search",
|
|
162
183
|
}
|
|
163
184
|
|
|
164
185
|
params = {
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
186
|
+
"__LOAD_P": "",
|
|
187
|
+
"per_page": 50,
|
|
188
|
+
"q": search_string,
|
|
189
|
+
"selectedTypes": "",
|
|
190
|
+
"selectedGenres": "",
|
|
191
|
+
"types": "movie,series,anime",
|
|
192
|
+
"genres": "",
|
|
193
|
+
"years": "",
|
|
194
|
+
"ratings": "",
|
|
195
|
+
"page": 1,
|
|
196
|
+
"sortBy": "latest",
|
|
197
|
+
"sortOrder": "desc",
|
|
177
198
|
}
|
|
178
199
|
|
|
179
200
|
if "sonarr" in request_from.lower():
|
|
180
|
-
params[
|
|
201
|
+
params["types"] = "series,anime"
|
|
181
202
|
elif "radarr" in request_from.lower():
|
|
182
|
-
params[
|
|
203
|
+
params["types"] = "movie"
|
|
183
204
|
|
|
184
205
|
debug(f"{hostname.upper()}: Searching: '{search_string}'")
|
|
185
206
|
|
|
@@ -189,12 +210,12 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
189
210
|
|
|
190
211
|
data = r.json()
|
|
191
212
|
|
|
192
|
-
if
|
|
193
|
-
items = data[
|
|
194
|
-
elif
|
|
195
|
-
items = data[
|
|
196
|
-
elif
|
|
197
|
-
items = data[
|
|
213
|
+
if "items" in data and "data" in data["items"]:
|
|
214
|
+
items = data["items"]["data"]
|
|
215
|
+
elif "data" in data:
|
|
216
|
+
items = data["data"]
|
|
217
|
+
elif "results" in data:
|
|
218
|
+
items = data["results"]
|
|
198
219
|
else:
|
|
199
220
|
items = data if isinstance(data, list) else []
|
|
200
221
|
|
|
@@ -205,124 +226,164 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
205
226
|
|
|
206
227
|
for item in items:
|
|
207
228
|
try:
|
|
208
|
-
uid = item.get(
|
|
229
|
+
uid = item.get("uid")
|
|
209
230
|
if not uid:
|
|
210
231
|
debug(f"{hostname.upper()}: Item has no UID, skipping")
|
|
211
232
|
continue
|
|
212
233
|
|
|
213
234
|
debug(f"{hostname.upper()}: Fetching details for UID: {uid}")
|
|
214
235
|
|
|
215
|
-
detail_url = f
|
|
236
|
+
detail_url = f"https://api.{host}/start/d/{uid}"
|
|
216
237
|
detail_r = requests.get(detail_url, headers=headers, timeout=10)
|
|
217
238
|
detail_r.raise_for_status()
|
|
218
239
|
|
|
219
240
|
detail_data = detail_r.json()
|
|
220
241
|
|
|
221
|
-
if
|
|
222
|
-
detail_item = detail_data[
|
|
242
|
+
if "item" in detail_data:
|
|
243
|
+
detail_item = detail_data["item"]
|
|
223
244
|
else:
|
|
224
245
|
detail_item = detail_data
|
|
225
246
|
|
|
226
247
|
item_imdb_id = imdb_id
|
|
227
248
|
if not item_imdb_id:
|
|
228
|
-
item_imdb_id = detail_item.get(
|
|
229
|
-
|
|
230
|
-
|
|
249
|
+
item_imdb_id = detail_item.get("imdb_id") or detail_item.get(
|
|
250
|
+
"imdbid"
|
|
251
|
+
)
|
|
252
|
+
if not item_imdb_id and "options" in detail_item:
|
|
253
|
+
item_imdb_id = detail_item["options"].get("imdb_id")
|
|
231
254
|
|
|
232
255
|
source = f"https://{host}/detail/{uid}"
|
|
233
256
|
|
|
234
|
-
main_title =
|
|
257
|
+
main_title = (
|
|
258
|
+
detail_item.get("fulltitle")
|
|
259
|
+
or detail_item.get("title")
|
|
260
|
+
or detail_item.get("name")
|
|
261
|
+
)
|
|
235
262
|
if main_title:
|
|
236
263
|
title = html.unescape(main_title)
|
|
237
|
-
title = title.replace(
|
|
264
|
+
title = title.replace(" ", ".")
|
|
238
265
|
|
|
239
|
-
if shared_state.is_valid_release(
|
|
266
|
+
if shared_state.is_valid_release(
|
|
267
|
+
title, request_from, search_string, season, episode
|
|
268
|
+
):
|
|
240
269
|
# Skip if we've already seen this exact title
|
|
241
270
|
if title in seen_titles:
|
|
242
|
-
debug(
|
|
271
|
+
debug(
|
|
272
|
+
f"{hostname.upper()}: Skipping duplicate main title: {title}"
|
|
273
|
+
)
|
|
243
274
|
else:
|
|
244
275
|
seen_titles.add(title)
|
|
245
|
-
published = detail_item.get(
|
|
276
|
+
published = detail_item.get(
|
|
277
|
+
"updated_at"
|
|
278
|
+
) or detail_item.get("created_at")
|
|
246
279
|
if not published:
|
|
247
|
-
published = datetime.now().strftime(
|
|
280
|
+
published = datetime.now().strftime(
|
|
281
|
+
"%a, %d %b %Y %H:%M:%S +0000"
|
|
282
|
+
)
|
|
248
283
|
password = f"www.{host}"
|
|
249
284
|
|
|
250
285
|
payload = urlsafe_b64encode(
|
|
251
286
|
f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}|{hostname}".encode(
|
|
252
|
-
"utf-8"
|
|
287
|
+
"utf-8"
|
|
288
|
+
)
|
|
253
289
|
).decode("utf-8")
|
|
254
290
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
255
291
|
|
|
256
|
-
releases.append(
|
|
257
|
-
|
|
258
|
-
"
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
292
|
+
releases.append(
|
|
293
|
+
{
|
|
294
|
+
"details": {
|
|
295
|
+
"title": title,
|
|
296
|
+
"hostname": hostname,
|
|
297
|
+
"imdb_id": item_imdb_id,
|
|
298
|
+
"link": link,
|
|
299
|
+
"mirror": mirror,
|
|
300
|
+
"size": 0,
|
|
301
|
+
"date": published,
|
|
302
|
+
"source": source,
|
|
303
|
+
},
|
|
304
|
+
"type": "protected",
|
|
305
|
+
}
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
if "releases" in detail_item and isinstance(
|
|
309
|
+
detail_item["releases"], list
|
|
310
|
+
):
|
|
311
|
+
debug(
|
|
312
|
+
f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}"
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
for release in detail_item["releases"]:
|
|
274
316
|
try:
|
|
275
|
-
release_title = release.get(
|
|
317
|
+
release_title = release.get("fulltitle")
|
|
276
318
|
if not release_title:
|
|
277
319
|
continue
|
|
278
320
|
|
|
279
321
|
release_title = html.unescape(release_title)
|
|
280
|
-
release_title = release_title.replace(
|
|
281
|
-
|
|
282
|
-
if not shared_state.is_valid_release(
|
|
283
|
-
|
|
284
|
-
|
|
322
|
+
release_title = release_title.replace(" ", ".")
|
|
323
|
+
|
|
324
|
+
if not shared_state.is_valid_release(
|
|
325
|
+
release_title,
|
|
326
|
+
request_from,
|
|
327
|
+
search_string,
|
|
328
|
+
season,
|
|
329
|
+
episode,
|
|
330
|
+
):
|
|
331
|
+
debug(
|
|
332
|
+
f"{hostname.upper()}: ✗ Release filtered out: {release_title}"
|
|
333
|
+
)
|
|
285
334
|
continue
|
|
286
335
|
|
|
287
336
|
# Skip if we've already seen this exact title (deduplication)
|
|
288
337
|
if release_title in seen_titles:
|
|
289
|
-
debug(
|
|
338
|
+
debug(
|
|
339
|
+
f"{hostname.upper()}: Skipping duplicate release: {release_title}"
|
|
340
|
+
)
|
|
290
341
|
continue
|
|
291
342
|
|
|
292
343
|
seen_titles.add(release_title)
|
|
293
344
|
|
|
294
|
-
release_uid = release.get(
|
|
345
|
+
release_uid = release.get("uid")
|
|
295
346
|
if release_uid:
|
|
296
|
-
release_source =
|
|
347
|
+
release_source = (
|
|
348
|
+
f"https://{host}/detail/{uid}?release={release_uid}"
|
|
349
|
+
)
|
|
297
350
|
else:
|
|
298
351
|
release_source = source
|
|
299
352
|
|
|
300
|
-
release_published =
|
|
301
|
-
|
|
353
|
+
release_published = (
|
|
354
|
+
release.get("updated_at")
|
|
355
|
+
or release.get("created_at")
|
|
356
|
+
or detail_item.get("updated_at")
|
|
357
|
+
)
|
|
302
358
|
if not release_published:
|
|
303
|
-
release_published = datetime.now().strftime(
|
|
304
|
-
|
|
359
|
+
release_published = datetime.now().strftime(
|
|
360
|
+
"%a, %d %b %Y %H:%M:%S +0000"
|
|
361
|
+
)
|
|
362
|
+
release_size = release.get("size", 0)
|
|
305
363
|
password = f"www.{host}"
|
|
306
364
|
|
|
307
365
|
payload = urlsafe_b64encode(
|
|
308
366
|
f"{release_title}|{release_source}|{mirror}|{release_size}|{password}|{item_imdb_id or ''}|{hostname}".encode(
|
|
309
|
-
"utf-8"
|
|
367
|
+
"utf-8"
|
|
368
|
+
)
|
|
310
369
|
).decode("utf-8")
|
|
311
370
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
312
371
|
|
|
313
|
-
releases.append(
|
|
314
|
-
|
|
315
|
-
"
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
372
|
+
releases.append(
|
|
373
|
+
{
|
|
374
|
+
"details": {
|
|
375
|
+
"title": release_title,
|
|
376
|
+
"hostname": hostname,
|
|
377
|
+
"imdb_id": item_imdb_id,
|
|
378
|
+
"link": link,
|
|
379
|
+
"mirror": mirror,
|
|
380
|
+
"size": release_size,
|
|
381
|
+
"date": release_published,
|
|
382
|
+
"source": release_source,
|
|
383
|
+
},
|
|
384
|
+
"type": "protected",
|
|
385
|
+
}
|
|
386
|
+
)
|
|
326
387
|
|
|
327
388
|
except Exception as e:
|
|
328
389
|
debug(f"{hostname.upper()}: Error parsing release: {e}")
|
|
@@ -335,11 +396,15 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
335
396
|
debug(f"{hostname.upper()}: {traceback.format_exc()}")
|
|
336
397
|
continue
|
|
337
398
|
|
|
338
|
-
debug(
|
|
399
|
+
debug(
|
|
400
|
+
f"{hostname.upper()}: Returning {len(releases)} total releases (deduplicated)"
|
|
401
|
+
)
|
|
339
402
|
|
|
340
403
|
except Exception as e:
|
|
341
404
|
info(f"Error in {hostname.upper()} search: {e}")
|
|
342
|
-
mark_hostname_issue(
|
|
405
|
+
mark_hostname_issue(
|
|
406
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
407
|
+
)
|
|
343
408
|
|
|
344
409
|
debug(f"{hostname.upper()}: {traceback.format_exc()}")
|
|
345
410
|
return releases
|