quasarr 2.4.8__py3-none-any.whl → 2.4.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/METADATA +4 -3
- quasarr-2.4.10.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/sf.py
CHANGED
|
@@ -10,9 +10,9 @@ from datetime import datetime, timedelta
|
|
|
10
10
|
|
|
11
11
|
import requests
|
|
12
12
|
|
|
13
|
-
from quasarr.providers.hostname_issues import
|
|
13
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
14
14
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
-
from quasarr.providers.log import
|
|
15
|
+
from quasarr.providers.log import debug, info
|
|
16
16
|
|
|
17
17
|
hostname = "sf"
|
|
18
18
|
supported_mirrors = ["1fichier", "ddownload", "katfile", "rapidgator", "turbobit"]
|
|
@@ -20,8 +20,8 @@ supported_mirrors = ["1fichier", "ddownload", "katfile", "rapidgator", "turbobit
|
|
|
20
20
|
from bs4 import BeautifulSoup
|
|
21
21
|
|
|
22
22
|
check = lambda s: s.replace(
|
|
23
|
-
|
|
24
|
-
|
|
23
|
+
"".join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "ylhr"),
|
|
24
|
+
"".join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "hu"),
|
|
25
25
|
)
|
|
26
26
|
|
|
27
27
|
|
|
@@ -37,19 +37,19 @@ def parse_mirrors(base_url, entry):
|
|
|
37
37
|
mirrors = {}
|
|
38
38
|
try:
|
|
39
39
|
host_map = {
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
40
|
+
"1F": "1fichier",
|
|
41
|
+
"DD": "ddownload",
|
|
42
|
+
"KA": "katfile",
|
|
43
|
+
"RG": "rapidgator",
|
|
44
|
+
"TB": "turbobit",
|
|
45
45
|
}
|
|
46
46
|
|
|
47
|
-
h3 = entry.select_one(
|
|
48
|
-
name = h3.get_text(separator=
|
|
47
|
+
h3 = entry.select_one("h3")
|
|
48
|
+
name = h3.get_text(separator=" ", strip=True) if h3 else ""
|
|
49
49
|
|
|
50
50
|
season = {}
|
|
51
|
-
for a in entry.select(
|
|
52
|
-
if a.find_parent(
|
|
51
|
+
for a in entry.select("a.dlb.row"):
|
|
52
|
+
if a.find_parent("div.list.simple"):
|
|
53
53
|
continue
|
|
54
54
|
host = a.get_text(strip=True)
|
|
55
55
|
if len(host) > 2: # episode hosts are 2 chars
|
|
@@ -58,41 +58,39 @@ def parse_mirrors(base_url, entry):
|
|
|
58
58
|
# fallback: if mirrors are falsely missing a mirror title, return first season link as "filecrypt"
|
|
59
59
|
if not season:
|
|
60
60
|
fallback = next(
|
|
61
|
-
(
|
|
62
|
-
|
|
61
|
+
(
|
|
62
|
+
a
|
|
63
|
+
for a in entry.select("a.dlb.row")
|
|
64
|
+
if not a.find_parent("div.list.simple")
|
|
65
|
+
),
|
|
66
|
+
None,
|
|
63
67
|
)
|
|
64
68
|
if fallback:
|
|
65
|
-
season[
|
|
69
|
+
season["filecrypt"] = f"{base_url}{fallback['href']}"
|
|
66
70
|
|
|
67
71
|
episodes = []
|
|
68
|
-
for ep_row in entry.select(
|
|
69
|
-
if
|
|
72
|
+
for ep_row in entry.select("div.list.simple > div.row"):
|
|
73
|
+
if "head" in ep_row.get("class", []):
|
|
70
74
|
continue
|
|
71
75
|
|
|
72
|
-
divs = ep_row.find_all(
|
|
73
|
-
number = int(divs[0].get_text(strip=True).rstrip(
|
|
76
|
+
divs = ep_row.find_all("div", recursive=False)
|
|
77
|
+
number = int(divs[0].get_text(strip=True).rstrip("."))
|
|
74
78
|
title = divs[1].get_text(strip=True)
|
|
75
79
|
|
|
76
80
|
ep_links = {}
|
|
77
|
-
for a in ep_row.select(
|
|
81
|
+
for a in ep_row.select("div.row > a.dlb.row"):
|
|
78
82
|
host = a.get_text(strip=True)
|
|
79
83
|
full_host = host_map.get(host, host)
|
|
80
84
|
ep_links[full_host] = f"{base_url}{a['href']}"
|
|
81
85
|
|
|
82
|
-
episodes.append({
|
|
83
|
-
'number': number,
|
|
84
|
-
'title': title,
|
|
85
|
-
'links': ep_links
|
|
86
|
-
})
|
|
86
|
+
episodes.append({"number": number, "title": title, "links": ep_links})
|
|
87
87
|
|
|
88
|
-
mirrors = {
|
|
89
|
-
'name': name,
|
|
90
|
-
'season': season,
|
|
91
|
-
'episodes': episodes
|
|
92
|
-
}
|
|
88
|
+
mirrors = {"name": name, "season": season, "episodes": episodes}
|
|
93
89
|
except Exception as e:
|
|
94
90
|
info(f"Error parsing mirrors: {e}")
|
|
95
|
-
mark_hostname_issue(
|
|
91
|
+
mark_hostname_issue(
|
|
92
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
93
|
+
)
|
|
96
94
|
|
|
97
95
|
return mirrors
|
|
98
96
|
|
|
@@ -103,16 +101,20 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
103
101
|
password = check(sf)
|
|
104
102
|
|
|
105
103
|
if not "sonarr" in request_from.lower():
|
|
106
|
-
debug(
|
|
104
|
+
debug(
|
|
105
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
106
|
+
)
|
|
107
107
|
return releases
|
|
108
108
|
|
|
109
109
|
if mirror and mirror not in supported_mirrors:
|
|
110
|
-
debug(
|
|
111
|
-
|
|
110
|
+
debug(
|
|
111
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
112
|
+
" Skipping search!"
|
|
113
|
+
)
|
|
112
114
|
return releases
|
|
113
115
|
|
|
114
116
|
headers = {
|
|
115
|
-
|
|
117
|
+
"User-Agent": shared_state.values["user_agent"],
|
|
116
118
|
}
|
|
117
119
|
|
|
118
120
|
date = datetime.now()
|
|
@@ -120,15 +122,19 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
120
122
|
|
|
121
123
|
while days_to_cover > 0:
|
|
122
124
|
days_to_cover -= 1
|
|
123
|
-
formatted_date = date.strftime(
|
|
125
|
+
formatted_date = date.strftime("%Y-%m-%d")
|
|
124
126
|
date -= timedelta(days=1)
|
|
125
127
|
|
|
126
128
|
try:
|
|
127
|
-
r = requests.get(
|
|
129
|
+
r = requests.get(
|
|
130
|
+
f"https://{sf}/updates/{formatted_date}#list", headers, timeout=30
|
|
131
|
+
)
|
|
128
132
|
r.raise_for_status()
|
|
129
133
|
except Exception as e:
|
|
130
134
|
info(f"Error loading {hostname.upper()} feed: {e} for {formatted_date}")
|
|
131
|
-
mark_hostname_issue(
|
|
135
|
+
mark_hostname_issue(
|
|
136
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
137
|
+
)
|
|
132
138
|
return releases
|
|
133
139
|
|
|
134
140
|
content = BeautifulSoup(r.text, "html.parser")
|
|
@@ -146,8 +152,10 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
146
152
|
imdb_id = None # imdb info is missing here
|
|
147
153
|
|
|
148
154
|
payload = urlsafe_b64encode(
|
|
149
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
150
|
-
|
|
155
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
|
|
156
|
+
"utf-8"
|
|
157
|
+
)
|
|
158
|
+
).decode("utf-8")
|
|
151
159
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
152
160
|
except:
|
|
153
161
|
continue
|
|
@@ -163,23 +171,27 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
163
171
|
except:
|
|
164
172
|
continue
|
|
165
173
|
|
|
166
|
-
releases.append(
|
|
167
|
-
|
|
168
|
-
"
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
174
|
+
releases.append(
|
|
175
|
+
{
|
|
176
|
+
"details": {
|
|
177
|
+
"title": title,
|
|
178
|
+
"hostname": hostname.lower(),
|
|
179
|
+
"imdb_id": imdb_id,
|
|
180
|
+
"link": link,
|
|
181
|
+
"mirror": mirror,
|
|
182
|
+
"size": size,
|
|
183
|
+
"date": published,
|
|
184
|
+
"source": source,
|
|
185
|
+
},
|
|
186
|
+
"type": "protected",
|
|
187
|
+
}
|
|
188
|
+
)
|
|
179
189
|
|
|
180
190
|
except Exception as e:
|
|
181
191
|
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
182
|
-
mark_hostname_issue(
|
|
192
|
+
mark_hostname_issue(
|
|
193
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
194
|
+
)
|
|
183
195
|
|
|
184
196
|
elapsed_time = time.time() - start_time
|
|
185
197
|
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
@@ -199,32 +211,44 @@ def extract_size(text):
|
|
|
199
211
|
raise ValueError(f"Invalid size format: {text}")
|
|
200
212
|
|
|
201
213
|
|
|
202
|
-
def sf_search(
|
|
214
|
+
def sf_search(
|
|
215
|
+
shared_state,
|
|
216
|
+
start_time,
|
|
217
|
+
request_from,
|
|
218
|
+
search_string,
|
|
219
|
+
mirror=None,
|
|
220
|
+
season=None,
|
|
221
|
+
episode=None,
|
|
222
|
+
):
|
|
203
223
|
releases = []
|
|
204
224
|
sf = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
205
225
|
password = check(sf)
|
|
206
226
|
|
|
207
227
|
imdb_id_in_search = shared_state.is_imdb_id(search_string)
|
|
208
228
|
if imdb_id_in_search:
|
|
209
|
-
search_string = get_localized_title(shared_state, imdb_id_in_search,
|
|
229
|
+
search_string = get_localized_title(shared_state, imdb_id_in_search, "de")
|
|
210
230
|
if not search_string:
|
|
211
231
|
info(f"Could not extract title from IMDb-ID {imdb_id_in_search}")
|
|
212
232
|
return releases
|
|
213
233
|
search_string = html.unescape(search_string)
|
|
214
234
|
|
|
215
235
|
if not "sonarr" in request_from.lower():
|
|
216
|
-
debug(
|
|
236
|
+
debug(
|
|
237
|
+
f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
|
|
238
|
+
)
|
|
217
239
|
return releases
|
|
218
240
|
|
|
219
241
|
if mirror and mirror not in supported_mirrors:
|
|
220
|
-
debug(
|
|
242
|
+
debug(
|
|
243
|
+
f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}.'
|
|
244
|
+
)
|
|
221
245
|
return releases
|
|
222
246
|
|
|
223
|
-
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime(
|
|
247
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
|
|
224
248
|
|
|
225
249
|
# search API
|
|
226
|
-
url = f
|
|
227
|
-
headers = {
|
|
250
|
+
url = f"https://{sf}/api/v2/search?q={search_string}&ql=DE"
|
|
251
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
228
252
|
|
|
229
253
|
try:
|
|
230
254
|
r = requests.get(url, headers=headers, timeout=10)
|
|
@@ -232,22 +256,30 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
232
256
|
feed = r.json()
|
|
233
257
|
except Exception as e:
|
|
234
258
|
info(f"Error loading {hostname.upper()} search: {e}")
|
|
235
|
-
mark_hostname_issue(
|
|
259
|
+
mark_hostname_issue(
|
|
260
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
261
|
+
)
|
|
236
262
|
return releases
|
|
237
263
|
|
|
238
|
-
results = feed.get(
|
|
264
|
+
results = feed.get("result", [])
|
|
239
265
|
for result in results:
|
|
240
266
|
sanitized_search_string = shared_state.sanitize_string(search_string)
|
|
241
267
|
sanitized_title = shared_state.sanitize_string(result.get("title", ""))
|
|
242
|
-
if not re.search(rf
|
|
243
|
-
debug(
|
|
268
|
+
if not re.search(rf"\b{re.escape(sanitized_search_string)}\b", sanitized_title):
|
|
269
|
+
debug(
|
|
270
|
+
f"Search string '{search_string}' doesn't match '{result.get('title')}'"
|
|
271
|
+
)
|
|
244
272
|
continue
|
|
245
|
-
debug(
|
|
273
|
+
debug(
|
|
274
|
+
f"Matched search string '{search_string}' with result '{result.get('title')}'"
|
|
275
|
+
)
|
|
246
276
|
|
|
247
277
|
series_id = result.get("url_id")
|
|
248
278
|
context = "recents_sf"
|
|
249
279
|
threshold = 60
|
|
250
|
-
recently_searched = shared_state.get_recently_searched(
|
|
280
|
+
recently_searched = shared_state.get_recently_searched(
|
|
281
|
+
shared_state, context, threshold
|
|
282
|
+
)
|
|
251
283
|
entry = recently_searched.get(series_id, {})
|
|
252
284
|
ts = entry.get("timestamp")
|
|
253
285
|
use_cache = ts and ts > datetime.now() - timedelta(seconds=threshold)
|
|
@@ -269,8 +301,12 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
269
301
|
r = requests.get(series_url, headers=headers, timeout=10)
|
|
270
302
|
r.raise_for_status()
|
|
271
303
|
series_page = r.text
|
|
272
|
-
imdb_link = BeautifulSoup(series_page, "html.parser").find(
|
|
273
|
-
|
|
304
|
+
imdb_link = BeautifulSoup(series_page, "html.parser").find(
|
|
305
|
+
"a", href=re.compile(r"imdb\.com")
|
|
306
|
+
)
|
|
307
|
+
imdb_id = (
|
|
308
|
+
re.search(r"tt\d+", str(imdb_link)).group() if imdb_link else None
|
|
309
|
+
)
|
|
274
310
|
season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
|
|
275
311
|
except Exception as e:
|
|
276
312
|
debug(f"Failed to load or parse series page for {series_id}")
|
|
@@ -278,20 +314,24 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
278
314
|
continue
|
|
279
315
|
|
|
280
316
|
# fetch API HTML
|
|
281
|
-
epoch = str(datetime.now().timestamp()).replace(
|
|
282
|
-
api_url = f
|
|
317
|
+
epoch = str(datetime.now().timestamp()).replace(".", "")[:-3]
|
|
318
|
+
api_url = f"https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}"
|
|
283
319
|
debug(f"Requesting SF API URL: {api_url}")
|
|
284
320
|
try:
|
|
285
321
|
r = requests.get(api_url, headers=headers, timeout=10)
|
|
286
322
|
r.raise_for_status()
|
|
287
323
|
resp_json = r.json()
|
|
288
|
-
if resp_json.get(
|
|
289
|
-
info(
|
|
324
|
+
if resp_json.get("error"):
|
|
325
|
+
info(
|
|
326
|
+
f"SF API error for series '{series_id}' at URL {api_url}: {resp_json.get('message')}"
|
|
327
|
+
)
|
|
290
328
|
continue
|
|
291
329
|
data_html = resp_json.get("html", "")
|
|
292
330
|
except Exception as e:
|
|
293
331
|
info(f"Error loading SF API for {series_id} at {api_url}: {e}")
|
|
294
|
-
mark_hostname_issue(
|
|
332
|
+
mark_hostname_issue(
|
|
333
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
334
|
+
)
|
|
295
335
|
continue
|
|
296
336
|
|
|
297
337
|
# cache content and imdb_id
|
|
@@ -308,13 +348,21 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
308
348
|
title = details.find("small").text.strip()
|
|
309
349
|
|
|
310
350
|
mirrors = parse_mirrors(f"https://{sf}", details)
|
|
311
|
-
source =
|
|
351
|
+
source = (
|
|
352
|
+
mirror
|
|
353
|
+
and mirrors["season"].get(mirror)
|
|
354
|
+
or next(iter(mirrors["season"].values()), None)
|
|
355
|
+
)
|
|
312
356
|
if not source:
|
|
313
357
|
debug(f"No source mirror found for {title}")
|
|
314
358
|
continue
|
|
315
359
|
|
|
316
360
|
try:
|
|
317
|
-
size_string =
|
|
361
|
+
size_string = (
|
|
362
|
+
item.find("span", {"class": "morespec"})
|
|
363
|
+
.text.split("|")[1]
|
|
364
|
+
.strip()
|
|
365
|
+
)
|
|
318
366
|
size_item = extract_size(size_string)
|
|
319
367
|
mb = shared_state.convert_to_mb(size_item)
|
|
320
368
|
except Exception as e:
|
|
@@ -323,33 +371,47 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
323
371
|
|
|
324
372
|
if episode:
|
|
325
373
|
try:
|
|
326
|
-
if not re.search(r
|
|
374
|
+
if not re.search(r"S\d{1,3}E\d{1,3}", title):
|
|
327
375
|
episodes_in_release = len(mirrors["episodes"])
|
|
328
376
|
|
|
329
377
|
# Get the correct episode entry (episode numbers are 1-based, list index is 0-based)
|
|
330
|
-
episode_data = next(
|
|
331
|
-
|
|
378
|
+
episode_data = next(
|
|
379
|
+
(
|
|
380
|
+
e
|
|
381
|
+
for e in mirrors["episodes"]
|
|
382
|
+
if e["number"] == int(episode)
|
|
383
|
+
),
|
|
384
|
+
None,
|
|
385
|
+
)
|
|
332
386
|
|
|
333
387
|
if episode_data:
|
|
334
|
-
title = re.sub(
|
|
388
|
+
title = re.sub(
|
|
389
|
+
r"(S\d{1,3})", rf"\1E{episode:02d}", title
|
|
390
|
+
)
|
|
335
391
|
if mirror:
|
|
336
392
|
if mirror not in episode_data["links"]:
|
|
337
393
|
debug(
|
|
338
|
-
f"Mirror '{mirror}' does not exist for '{title}' episode {episode}'"
|
|
394
|
+
f"Mirror '{mirror}' does not exist for '{title}' episode {episode}'"
|
|
395
|
+
)
|
|
339
396
|
else:
|
|
340
397
|
source = episode_data["links"][mirror]
|
|
341
398
|
|
|
342
399
|
else:
|
|
343
400
|
source = next(iter(episode_data["links"].values()))
|
|
344
401
|
else:
|
|
345
|
-
debug(
|
|
402
|
+
debug(
|
|
403
|
+
f"Episode '{episode}' data not found in mirrors for '{title}'"
|
|
404
|
+
)
|
|
346
405
|
|
|
347
406
|
if episodes_in_release:
|
|
348
407
|
try:
|
|
349
|
-
mb = shared_state.convert_to_mb(
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
408
|
+
mb = shared_state.convert_to_mb(
|
|
409
|
+
{
|
|
410
|
+
"size": float(size_item["size"])
|
|
411
|
+
// episodes_in_release,
|
|
412
|
+
"sizeunit": size_item["sizeunit"],
|
|
413
|
+
}
|
|
414
|
+
)
|
|
353
415
|
except Exception as e:
|
|
354
416
|
debug(f"Error calculating size for {title}: {e}")
|
|
355
417
|
mb = 0
|
|
@@ -357,31 +419,34 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
|
|
|
357
419
|
continue
|
|
358
420
|
|
|
359
421
|
# check down here on purpose, because the title may be modified at episode stage
|
|
360
|
-
if not shared_state.is_valid_release(
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
season,
|
|
364
|
-
episode):
|
|
422
|
+
if not shared_state.is_valid_release(
|
|
423
|
+
title, request_from, search_string, season, episode
|
|
424
|
+
):
|
|
365
425
|
continue
|
|
366
426
|
|
|
367
427
|
payload = urlsafe_b64encode(
|
|
368
|
-
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode()
|
|
428
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode()
|
|
429
|
+
).decode()
|
|
369
430
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
370
431
|
size_bytes = mb * 1024 * 1024
|
|
371
432
|
|
|
372
|
-
releases.append(
|
|
373
|
-
|
|
374
|
-
"
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
433
|
+
releases.append(
|
|
434
|
+
{
|
|
435
|
+
"details": {
|
|
436
|
+
"title": title,
|
|
437
|
+
"hostname": hostname.lower(),
|
|
438
|
+
"imdb_id": imdb_id,
|
|
439
|
+
"link": link,
|
|
440
|
+
"mirror": mirror,
|
|
441
|
+
"size": size_bytes,
|
|
442
|
+
"date": one_hour_ago,
|
|
443
|
+
"source": f"https://{sf}/{series_id}/{season}"
|
|
444
|
+
if season
|
|
445
|
+
else f"https://{sf}/{series_id}",
|
|
446
|
+
},
|
|
447
|
+
"type": "protected",
|
|
448
|
+
}
|
|
449
|
+
)
|
|
385
450
|
except Exception as e:
|
|
386
451
|
debug(f"Error parsing item for '{search_string}': {e}")
|
|
387
452
|
|