quasarr 2.4.8__py3-none-any.whl → 2.4.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/METADATA +4 -3
- quasarr-2.4.10.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/sj.py
CHANGED
|
@@ -11,18 +11,18 @@ from datetime import datetime, timedelta
|
|
|
11
11
|
import requests
|
|
12
12
|
from bs4 import BeautifulSoup
|
|
13
13
|
|
|
14
|
-
from quasarr.providers.hostname_issues import
|
|
14
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
15
15
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
-
from quasarr.providers.log import
|
|
16
|
+
from quasarr.providers.log import debug, info
|
|
17
17
|
|
|
18
18
|
hostname = "sj"
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
def convert_to_rss_date(date_str):
|
|
22
22
|
try:
|
|
23
|
-
return datetime.fromisoformat(
|
|
24
|
-
|
|
25
|
-
)
|
|
23
|
+
return datetime.fromisoformat(date_str.replace("Z", "+00:00")).strftime(
|
|
24
|
+
"%a, %d %b %Y %H:%M:%S +0000"
|
|
25
|
+
)
|
|
26
26
|
except Exception:
|
|
27
27
|
return ""
|
|
28
28
|
|
|
@@ -31,7 +31,9 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
31
31
|
releases = []
|
|
32
32
|
|
|
33
33
|
if "sonarr" not in request_from.lower():
|
|
34
|
-
debug(
|
|
34
|
+
debug(
|
|
35
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
36
|
+
)
|
|
35
37
|
return releases
|
|
36
38
|
|
|
37
39
|
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
@@ -46,7 +48,9 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
46
48
|
data = json.loads(r.content)
|
|
47
49
|
except Exception as e:
|
|
48
50
|
info(f"{hostname.upper()}: feed load error: {e}")
|
|
49
|
-
mark_hostname_issue(
|
|
51
|
+
mark_hostname_issue(
|
|
52
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
53
|
+
)
|
|
50
54
|
return releases
|
|
51
55
|
|
|
52
56
|
for release in data:
|
|
@@ -71,24 +75,30 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
71
75
|
imdb_id = None
|
|
72
76
|
|
|
73
77
|
payload = urlsafe_b64encode(
|
|
74
|
-
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
78
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
79
|
+
"utf-8"
|
|
80
|
+
)
|
|
75
81
|
).decode("utf-8")
|
|
76
82
|
|
|
77
|
-
link =
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
"
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
83
|
+
link = (
|
|
84
|
+
f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
releases.append(
|
|
88
|
+
{
|
|
89
|
+
"details": {
|
|
90
|
+
"title": title,
|
|
91
|
+
"hostname": hostname,
|
|
92
|
+
"imdb_id": imdb_id,
|
|
93
|
+
"link": link,
|
|
94
|
+
"mirror": mirror,
|
|
95
|
+
"size": size,
|
|
96
|
+
"date": published,
|
|
97
|
+
"source": series_url,
|
|
98
|
+
},
|
|
99
|
+
"type": "protected",
|
|
100
|
+
}
|
|
101
|
+
)
|
|
92
102
|
|
|
93
103
|
except Exception as e:
|
|
94
104
|
debug(f"{hostname.upper()}: feed parse error: {e}")
|
|
@@ -101,11 +111,21 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
101
111
|
return releases
|
|
102
112
|
|
|
103
113
|
|
|
104
|
-
def sj_search(
|
|
114
|
+
def sj_search(
|
|
115
|
+
shared_state,
|
|
116
|
+
start_time,
|
|
117
|
+
request_from,
|
|
118
|
+
search_string,
|
|
119
|
+
mirror=None,
|
|
120
|
+
season=None,
|
|
121
|
+
episode=None,
|
|
122
|
+
):
|
|
105
123
|
releases = []
|
|
106
124
|
|
|
107
125
|
if "sonarr" not in request_from.lower():
|
|
108
|
-
debug(
|
|
126
|
+
debug(
|
|
127
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
128
|
+
)
|
|
109
129
|
return releases
|
|
110
130
|
|
|
111
131
|
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
@@ -131,10 +151,12 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
131
151
|
results = soup.find_all("a", href=re.compile(r"^/serie/"))
|
|
132
152
|
except Exception as e:
|
|
133
153
|
info(f"{hostname.upper()}: search load error: {e}")
|
|
134
|
-
mark_hostname_issue(
|
|
154
|
+
mark_hostname_issue(
|
|
155
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
156
|
+
)
|
|
135
157
|
return releases
|
|
136
158
|
|
|
137
|
-
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime(
|
|
159
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
|
|
138
160
|
sanitized_search_string = shared_state.sanitize_string(localized_title)
|
|
139
161
|
|
|
140
162
|
for result in results:
|
|
@@ -144,8 +166,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
144
166
|
sanitized_title = shared_state.sanitize_string(result_title)
|
|
145
167
|
|
|
146
168
|
if not re.search(
|
|
147
|
-
|
|
148
|
-
sanitized_title
|
|
169
|
+
rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title
|
|
149
170
|
):
|
|
150
171
|
debug(
|
|
151
172
|
f"Search string '{localized_title}' doesn't match '{result_title}'"
|
|
@@ -179,11 +200,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
179
200
|
continue
|
|
180
201
|
|
|
181
202
|
if not shared_state.is_valid_release(
|
|
182
|
-
|
|
183
|
-
request_from,
|
|
184
|
-
search_string,
|
|
185
|
-
season,
|
|
186
|
-
episode
|
|
203
|
+
title, request_from, search_string, season, episode
|
|
187
204
|
):
|
|
188
205
|
continue
|
|
189
206
|
|
|
@@ -196,24 +213,28 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
196
213
|
size = 0
|
|
197
214
|
|
|
198
215
|
payload = urlsafe_b64encode(
|
|
199
|
-
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
216
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
217
|
+
"utf-8"
|
|
218
|
+
)
|
|
200
219
|
).decode("utf-8")
|
|
201
220
|
|
|
202
221
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
203
222
|
|
|
204
|
-
releases.append(
|
|
205
|
-
|
|
206
|
-
"
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
223
|
+
releases.append(
|
|
224
|
+
{
|
|
225
|
+
"details": {
|
|
226
|
+
"title": title,
|
|
227
|
+
"hostname": hostname,
|
|
228
|
+
"imdb_id": imdb_id,
|
|
229
|
+
"link": link,
|
|
230
|
+
"mirror": mirror,
|
|
231
|
+
"size": size,
|
|
232
|
+
"date": published,
|
|
233
|
+
"source": series_url,
|
|
234
|
+
},
|
|
235
|
+
"type": "protected",
|
|
236
|
+
}
|
|
237
|
+
)
|
|
217
238
|
|
|
218
239
|
except Exception as e:
|
|
219
240
|
debug(f"{hostname.upper()}: search parse error: {e}")
|
quasarr/search/sources/sl.py
CHANGED
|
@@ -14,12 +14,15 @@ from urllib.parse import quote_plus
|
|
|
14
14
|
import requests
|
|
15
15
|
from bs4 import BeautifulSoup
|
|
16
16
|
|
|
17
|
-
from quasarr.providers.hostname_issues import
|
|
17
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
18
18
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
19
|
-
from quasarr.providers.log import
|
|
19
|
+
from quasarr.providers.log import debug, info
|
|
20
20
|
|
|
21
21
|
hostname = "sl"
|
|
22
|
-
supported_mirrors = [
|
|
22
|
+
supported_mirrors = [
|
|
23
|
+
"nitroflare",
|
|
24
|
+
"ddownload",
|
|
25
|
+
] # ignoring captcha-protected multiup/mirrorace for now
|
|
23
26
|
|
|
24
27
|
|
|
25
28
|
def extract_size(text):
|
|
@@ -36,7 +39,7 @@ def parse_pubdate_to_iso(pubdate_str):
|
|
|
36
39
|
"""
|
|
37
40
|
Parse an RFC-822 pubDate from RSS into an ISO8601 string with timezone.
|
|
38
41
|
"""
|
|
39
|
-
dt = datetime.datetime.strptime(pubdate_str,
|
|
42
|
+
dt = datetime.datetime.strptime(pubdate_str, "%a, %d %b %Y %H:%M:%S %z")
|
|
40
43
|
return dt.isoformat()
|
|
41
44
|
|
|
42
45
|
|
|
@@ -54,29 +57,33 @@ def sl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
54
57
|
feed_type = "tv-shows"
|
|
55
58
|
|
|
56
59
|
if mirror and mirror not in supported_mirrors:
|
|
57
|
-
debug(
|
|
60
|
+
debug(
|
|
61
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!'
|
|
62
|
+
)
|
|
58
63
|
return releases
|
|
59
64
|
|
|
60
|
-
url = f
|
|
61
|
-
headers = {
|
|
65
|
+
url = f"https://{sl}/{feed_type}/feed/"
|
|
66
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
62
67
|
|
|
63
68
|
try:
|
|
64
69
|
r = requests.get(url, headers=headers, timeout=30)
|
|
65
70
|
r.raise_for_status()
|
|
66
71
|
root = ET.fromstring(r.text)
|
|
67
72
|
|
|
68
|
-
for item in root.find(
|
|
73
|
+
for item in root.find("channel").findall("item"):
|
|
69
74
|
try:
|
|
70
|
-
title = item.findtext(
|
|
71
|
-
if
|
|
75
|
+
title = item.findtext("title").strip()
|
|
76
|
+
if "lazylibrarian" in request_from.lower():
|
|
72
77
|
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
73
78
|
title = shared_state.normalize_magazine_title(title)
|
|
74
79
|
|
|
75
|
-
source = item.findtext(
|
|
80
|
+
source = item.findtext("link").strip()
|
|
76
81
|
|
|
77
|
-
desc = item.findtext(
|
|
82
|
+
desc = item.findtext("description") or ""
|
|
78
83
|
|
|
79
|
-
size_match = re.search(
|
|
84
|
+
size_match = re.search(
|
|
85
|
+
r"Size:\s*([\d\.]+\s*(?:GB|MB|KB|TB))", desc, re.IGNORECASE
|
|
86
|
+
)
|
|
80
87
|
if not size_match:
|
|
81
88
|
debug(f"Size not found in RSS item: {title}")
|
|
82
89
|
continue
|
|
@@ -85,39 +92,47 @@ def sl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
85
92
|
mb = shared_state.convert_to_mb(size_item)
|
|
86
93
|
size = mb * 1024 * 1024
|
|
87
94
|
|
|
88
|
-
pubdate = item.findtext(
|
|
95
|
+
pubdate = item.findtext("pubDate").strip()
|
|
89
96
|
published = parse_pubdate_to_iso(pubdate)
|
|
90
97
|
|
|
91
98
|
m = re.search(r"https?://www\.imdb\.com/title/(tt\d+)", desc)
|
|
92
99
|
imdb_id = m.group(1) if m else None
|
|
93
100
|
|
|
94
101
|
payload = urlsafe_b64encode(
|
|
95
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
102
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
103
|
+
"utf-8"
|
|
104
|
+
)
|
|
96
105
|
).decode("utf-8")
|
|
97
106
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
98
107
|
|
|
99
|
-
releases.append(
|
|
100
|
-
|
|
101
|
-
"
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
108
|
+
releases.append(
|
|
109
|
+
{
|
|
110
|
+
"details": {
|
|
111
|
+
"title": title,
|
|
112
|
+
"hostname": hostname.lower(),
|
|
113
|
+
"imdb_id": imdb_id,
|
|
114
|
+
"link": link,
|
|
115
|
+
"mirror": mirror,
|
|
116
|
+
"size": size,
|
|
117
|
+
"date": published,
|
|
118
|
+
"source": source,
|
|
119
|
+
},
|
|
120
|
+
"type": "protected",
|
|
121
|
+
}
|
|
122
|
+
)
|
|
112
123
|
|
|
113
124
|
except Exception as e:
|
|
114
125
|
info(f"Error parsing {hostname.upper()} feed item: {e}")
|
|
115
|
-
mark_hostname_issue(
|
|
126
|
+
mark_hostname_issue(
|
|
127
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
128
|
+
)
|
|
116
129
|
continue
|
|
117
130
|
|
|
118
131
|
except Exception as e:
|
|
119
132
|
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
120
|
-
mark_hostname_issue(
|
|
133
|
+
mark_hostname_issue(
|
|
134
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
135
|
+
)
|
|
121
136
|
|
|
122
137
|
elapsed = time.time() - start_time
|
|
123
138
|
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
@@ -127,7 +142,15 @@ def sl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
127
142
|
return releases
|
|
128
143
|
|
|
129
144
|
|
|
130
|
-
def sl_search(
|
|
145
|
+
def sl_search(
|
|
146
|
+
shared_state,
|
|
147
|
+
start_time,
|
|
148
|
+
request_from,
|
|
149
|
+
search_string,
|
|
150
|
+
mirror=None,
|
|
151
|
+
season=None,
|
|
152
|
+
episode=None,
|
|
153
|
+
):
|
|
131
154
|
releases = []
|
|
132
155
|
sl = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
133
156
|
password = sl
|
|
@@ -140,13 +163,15 @@ def sl_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
140
163
|
feed_type = "tv-shows"
|
|
141
164
|
|
|
142
165
|
if mirror and mirror not in supported_mirrors:
|
|
143
|
-
debug(
|
|
166
|
+
debug(
|
|
167
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!'
|
|
168
|
+
)
|
|
144
169
|
return releases
|
|
145
170
|
|
|
146
171
|
try:
|
|
147
172
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
148
173
|
if imdb_id:
|
|
149
|
-
search_string = get_localized_title(shared_state, imdb_id,
|
|
174
|
+
search_string = get_localized_title(shared_state, imdb_id, "en") or ""
|
|
150
175
|
search_string = html.unescape(search_string)
|
|
151
176
|
if not search_string:
|
|
152
177
|
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
@@ -154,11 +179,11 @@ def sl_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
154
179
|
|
|
155
180
|
# Build the list of URLs to search. For tv-shows also search the "foreign" section.
|
|
156
181
|
q = quote_plus(search_string)
|
|
157
|
-
urls = [f
|
|
182
|
+
urls = [f"https://{sl}/{feed_type}/?s={q}"]
|
|
158
183
|
if feed_type == "tv-shows":
|
|
159
|
-
urls.append(f
|
|
184
|
+
urls.append(f"https://{sl}/foreign/?s={q}")
|
|
160
185
|
|
|
161
|
-
headers = {"User-Agent": shared_state.values[
|
|
186
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
162
187
|
|
|
163
188
|
# Fetch pages in parallel (so we don't double the slow site latency)
|
|
164
189
|
def fetch(url):
|
|
@@ -169,8 +194,10 @@ def sl_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
169
194
|
return r.text
|
|
170
195
|
except Exception as e:
|
|
171
196
|
info(f"Error fetching {hostname} url {url}: {e}")
|
|
172
|
-
mark_hostname_issue(
|
|
173
|
-
|
|
197
|
+
mark_hostname_issue(
|
|
198
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
199
|
+
)
|
|
200
|
+
return ""
|
|
174
201
|
|
|
175
202
|
html_texts = []
|
|
176
203
|
with ThreadPoolExecutor(max_workers=len(urls)) as tpe:
|
|
@@ -180,7 +207,9 @@ def sl_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
180
207
|
html_texts.append(future.result())
|
|
181
208
|
except Exception as e:
|
|
182
209
|
info(f"Error fetching {hostname} search page: {e}")
|
|
183
|
-
mark_hostname_issue(
|
|
210
|
+
mark_hostname_issue(
|
|
211
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
212
|
+
)
|
|
184
213
|
|
|
185
214
|
# Parse each result and collect unique releases (dedupe by source link)
|
|
186
215
|
seen_sources = set()
|
|
@@ -188,70 +217,85 @@ def sl_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
188
217
|
if not html_text:
|
|
189
218
|
continue
|
|
190
219
|
try:
|
|
191
|
-
soup = BeautifulSoup(html_text,
|
|
192
|
-
posts = soup.find_all(
|
|
220
|
+
soup = BeautifulSoup(html_text, "html.parser")
|
|
221
|
+
posts = soup.find_all(
|
|
222
|
+
"div", class_=lambda c: c and c.startswith("post-")
|
|
223
|
+
)
|
|
193
224
|
|
|
194
225
|
for post in posts:
|
|
195
226
|
try:
|
|
196
|
-
a = post.find(
|
|
227
|
+
a = post.find("h1").find("a")
|
|
197
228
|
title = a.get_text(strip=True)
|
|
198
229
|
|
|
199
|
-
if not shared_state.is_valid_release(
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
season,
|
|
203
|
-
episode):
|
|
230
|
+
if not shared_state.is_valid_release(
|
|
231
|
+
title, request_from, search_string, season, episode
|
|
232
|
+
):
|
|
204
233
|
continue
|
|
205
234
|
|
|
206
|
-
if
|
|
235
|
+
if "lazylibrarian" in request_from.lower():
|
|
207
236
|
title = shared_state.normalize_magazine_title(title)
|
|
208
237
|
|
|
209
|
-
source = a[
|
|
238
|
+
source = a["href"]
|
|
210
239
|
# dedupe
|
|
211
240
|
if source in seen_sources:
|
|
212
241
|
continue
|
|
213
242
|
seen_sources.add(source)
|
|
214
243
|
|
|
215
244
|
# Published date
|
|
216
|
-
time_tag = post.find(
|
|
245
|
+
time_tag = post.find("span", {"class": "localtime"})
|
|
217
246
|
published = None
|
|
218
|
-
if time_tag and time_tag.has_attr(
|
|
219
|
-
published = time_tag[
|
|
220
|
-
published =
|
|
247
|
+
if time_tag and time_tag.has_attr("data-lttime"):
|
|
248
|
+
published = time_tag["data-lttime"]
|
|
249
|
+
published = (
|
|
250
|
+
published
|
|
251
|
+
or datetime.datetime.utcnow().isoformat() + "+00:00"
|
|
252
|
+
)
|
|
221
253
|
|
|
222
254
|
size = 0
|
|
223
255
|
imdb_id = None
|
|
224
256
|
|
|
225
257
|
payload = urlsafe_b64encode(
|
|
226
|
-
f"{title}|{source}|{mirror}|0|{password}|{imdb_id}|{hostname}".encode(
|
|
227
|
-
|
|
258
|
+
f"{title}|{source}|{mirror}|0|{password}|{imdb_id}|{hostname}".encode(
|
|
259
|
+
"utf-8"
|
|
260
|
+
)
|
|
261
|
+
).decode("utf-8")
|
|
228
262
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
229
263
|
|
|
230
|
-
releases.append(
|
|
231
|
-
|
|
232
|
-
"
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
264
|
+
releases.append(
|
|
265
|
+
{
|
|
266
|
+
"details": {
|
|
267
|
+
"title": title,
|
|
268
|
+
"hostname": hostname.lower(),
|
|
269
|
+
"imdb_id": imdb_id,
|
|
270
|
+
"link": link,
|
|
271
|
+
"mirror": mirror,
|
|
272
|
+
"size": size,
|
|
273
|
+
"date": published,
|
|
274
|
+
"source": source,
|
|
275
|
+
},
|
|
276
|
+
"type": "protected",
|
|
277
|
+
}
|
|
278
|
+
)
|
|
243
279
|
except Exception as e:
|
|
244
280
|
info(f"Error parsing {hostname.upper()} search item: {e}")
|
|
245
|
-
mark_hostname_issue(
|
|
281
|
+
mark_hostname_issue(
|
|
282
|
+
hostname,
|
|
283
|
+
"search",
|
|
284
|
+
str(e) if "e" in dir() else "Error occurred",
|
|
285
|
+
)
|
|
246
286
|
continue
|
|
247
287
|
except Exception as e:
|
|
248
288
|
info(f"Error parsing {hostname.upper()} search HTML: {e}")
|
|
249
|
-
mark_hostname_issue(
|
|
289
|
+
mark_hostname_issue(
|
|
290
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
291
|
+
)
|
|
250
292
|
continue
|
|
251
293
|
|
|
252
294
|
except Exception as e:
|
|
253
295
|
info(f"Error loading {hostname.upper()} search page: {e}")
|
|
254
|
-
mark_hostname_issue(
|
|
296
|
+
mark_hostname_issue(
|
|
297
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
298
|
+
)
|
|
255
299
|
|
|
256
300
|
elapsed = time.time() - start_time
|
|
257
301
|
debug(f"Search time: {elapsed:.2f}s ({hostname})")
|