quasarr 2.4.7__py3-none-any.whl → 2.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
- quasarr-2.4.9.dist-info/RECORD +81 -0
- quasarr-2.4.7.dist-info/RECORD +0 -81
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.7.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
quasarr/downloads/sources/al.py
CHANGED
|
@@ -7,16 +7,21 @@ import json
|
|
|
7
7
|
import re
|
|
8
8
|
import time
|
|
9
9
|
from dataclasses import dataclass
|
|
10
|
-
from typing import
|
|
10
|
+
from typing import List, Optional
|
|
11
11
|
from urllib.parse import urlparse
|
|
12
12
|
|
|
13
13
|
from bs4 import BeautifulSoup
|
|
14
14
|
|
|
15
15
|
from quasarr.downloads.linkcrypters.al import decrypt_content, solve_captcha
|
|
16
16
|
from quasarr.providers.hostname_issues import mark_hostname_issue
|
|
17
|
-
from quasarr.providers.log import
|
|
18
|
-
from quasarr.providers.sessions.al import
|
|
19
|
-
fetch_via_flaresolverr,
|
|
17
|
+
from quasarr.providers.log import debug, info
|
|
18
|
+
from quasarr.providers.sessions.al import (
|
|
19
|
+
fetch_via_flaresolverr,
|
|
20
|
+
fetch_via_requests_session,
|
|
21
|
+
invalidate_session,
|
|
22
|
+
retrieve_and_validate_session,
|
|
23
|
+
unwrap_flaresolverr_body,
|
|
24
|
+
)
|
|
20
25
|
from quasarr.providers.statistics import StatsHelper
|
|
21
26
|
from quasarr.providers.utils import is_flaresolverr_available
|
|
22
27
|
|
|
@@ -40,7 +45,7 @@ class ReleaseInfo:
|
|
|
40
45
|
|
|
41
46
|
|
|
42
47
|
def roman_to_int(r: str) -> int:
|
|
43
|
-
roman_map = {
|
|
48
|
+
roman_map = {"I": 1, "V": 5, "X": 10}
|
|
44
49
|
total = 0
|
|
45
50
|
prev = 0
|
|
46
51
|
for ch in r.upper()[::-1]:
|
|
@@ -56,9 +61,9 @@ def roman_to_int(r: str) -> int:
|
|
|
56
61
|
def derive_mirror(url):
|
|
57
62
|
try:
|
|
58
63
|
hostname = urlparse(url).netloc.lower()
|
|
59
|
-
if hostname.startswith(
|
|
64
|
+
if hostname.startswith("www."):
|
|
60
65
|
hostname = hostname[4:]
|
|
61
|
-
parts = hostname.split(
|
|
66
|
+
parts = hostname.split(".")
|
|
62
67
|
return parts[-2] if len(parts) >= 2 else hostname
|
|
63
68
|
except:
|
|
64
69
|
return "unknown"
|
|
@@ -70,10 +75,10 @@ def extract_season_from_synonyms(soup):
|
|
|
70
75
|
Only scans the synonyms cell—no fallback to whole document.
|
|
71
76
|
"""
|
|
72
77
|
syn_td = None
|
|
73
|
-
for tr in soup.select(
|
|
74
|
-
th = tr.find(
|
|
75
|
-
if th and
|
|
76
|
-
syn_td = tr.find(
|
|
78
|
+
for tr in soup.select("tr"):
|
|
79
|
+
th = tr.find("th")
|
|
80
|
+
if th and "synonym" in th.get_text(strip=True).lower():
|
|
81
|
+
syn_td = tr.find("td")
|
|
77
82
|
break
|
|
78
83
|
|
|
79
84
|
if not syn_td:
|
|
@@ -120,20 +125,20 @@ def find_season_in_release_notes(soup):
|
|
|
120
125
|
re.compile(r"\b([IVXLCDM]+)\b(?=\s*$)"), # uppercase Roman at end
|
|
121
126
|
]
|
|
122
127
|
|
|
123
|
-
for tr in soup.select(
|
|
124
|
-
th = tr.find(
|
|
128
|
+
for tr in soup.select("tr"):
|
|
129
|
+
th = tr.find("th")
|
|
125
130
|
if not th:
|
|
126
131
|
continue
|
|
127
132
|
|
|
128
133
|
header = th.get_text(strip=True)
|
|
129
|
-
if
|
|
134
|
+
if "release " not in header.lower(): # release notes or release anmerkungen
|
|
130
135
|
continue
|
|
131
136
|
|
|
132
|
-
td = tr.find(
|
|
137
|
+
td = tr.find("td")
|
|
133
138
|
if not td:
|
|
134
139
|
continue
|
|
135
140
|
|
|
136
|
-
content = td.get_text(
|
|
141
|
+
content = td.get_text(" ", strip=True)
|
|
137
142
|
for pat in patterns:
|
|
138
143
|
m = pat.search(content)
|
|
139
144
|
if not m:
|
|
@@ -141,7 +146,7 @@ def find_season_in_release_notes(soup):
|
|
|
141
146
|
|
|
142
147
|
token = m.group(1)
|
|
143
148
|
# Roman numeral detection only uppercase
|
|
144
|
-
if pat.pattern.endswith(
|
|
149
|
+
if pat.pattern.endswith("(?=\\s*$)"):
|
|
145
150
|
if token.isupper():
|
|
146
151
|
return roman_to_int(token)
|
|
147
152
|
else:
|
|
@@ -171,7 +176,9 @@ def extract_season_number_from_title(page_title, release_type, release_title="")
|
|
|
171
176
|
season_num = None
|
|
172
177
|
|
|
173
178
|
if release_title:
|
|
174
|
-
match = re.search(
|
|
179
|
+
match = re.search(
|
|
180
|
+
r"\.(?:S(\d{1,4})|R(2))(?:E\d{1,4})?", release_title, re.IGNORECASE
|
|
181
|
+
)
|
|
175
182
|
if match:
|
|
176
183
|
if match.group(1) is not None:
|
|
177
184
|
season_num = int(match.group(1))
|
|
@@ -180,8 +187,16 @@ def extract_season_number_from_title(page_title, release_type, release_title="")
|
|
|
180
187
|
|
|
181
188
|
if season_num is None:
|
|
182
189
|
page_title = page_title or ""
|
|
183
|
-
if
|
|
184
|
-
|
|
190
|
+
if (
|
|
191
|
+
"staffel" in page_title.lower()
|
|
192
|
+
or "season" in page_title.lower()
|
|
193
|
+
or release_type == "series"
|
|
194
|
+
):
|
|
195
|
+
match = re.search(
|
|
196
|
+
r"\b(?:Season|Staffel)\s+(\d+|[IVX]+)\b|\bR(2)\b",
|
|
197
|
+
page_title,
|
|
198
|
+
re.IGNORECASE,
|
|
199
|
+
)
|
|
185
200
|
if match:
|
|
186
201
|
if match.group(1) is not None:
|
|
187
202
|
num = match.group(1)
|
|
@@ -189,7 +204,9 @@ def extract_season_number_from_title(page_title, release_type, release_title="")
|
|
|
189
204
|
elif match.group(2) is not None:
|
|
190
205
|
season_num = int(match.group(2))
|
|
191
206
|
else:
|
|
192
|
-
trailing_match = re.search(
|
|
207
|
+
trailing_match = re.search(
|
|
208
|
+
r"\s+([2-9]\d*|[IVXLCDM]+)\s*$", page_title, re.IGNORECASE
|
|
209
|
+
)
|
|
193
210
|
if trailing_match:
|
|
194
211
|
num = trailing_match.group(1)
|
|
195
212
|
season_candidate = int(num) if num.isdigit() else roman_to_int(num)
|
|
@@ -224,10 +241,15 @@ def parse_info_from_feed_entry(block, series_page_title, release_type) -> Releas
|
|
|
224
241
|
audio_icon = block.find("i", class_="fa-volume-up")
|
|
225
242
|
if audio_icon:
|
|
226
243
|
for sib in audio_icon.find_next_siblings():
|
|
227
|
-
if sib.name == "i" and "fa-closed-captioning" in sib.get("class", []):
|
|
244
|
+
if sib.name == "i" and "fa-closed-captioning" in sib.get("class", []):
|
|
245
|
+
break
|
|
228
246
|
if sib.name == "i" and "flag" in sib.get("class", []):
|
|
229
247
|
code = sib["class"][1].replace("flag-", "").lower()
|
|
230
|
-
audio_langs.append(
|
|
248
|
+
audio_langs.append(
|
|
249
|
+
{"jp": "Japanese", "de": "German", "en": "English"}.get(
|
|
250
|
+
code, code.title()
|
|
251
|
+
)
|
|
252
|
+
)
|
|
231
253
|
|
|
232
254
|
# parse subtitle flags
|
|
233
255
|
subtitle_langs: List[str] = []
|
|
@@ -236,7 +258,11 @@ def parse_info_from_feed_entry(block, series_page_title, release_type) -> Releas
|
|
|
236
258
|
for sib in subtitle_icon.find_next_siblings():
|
|
237
259
|
if sib.name == "i" and "flag" in sib.get("class", []):
|
|
238
260
|
code = sib["class"][1].replace("flag-", "").lower()
|
|
239
|
-
subtitle_langs.append(
|
|
261
|
+
subtitle_langs.append(
|
|
262
|
+
{"jp": "Japanese", "de": "German", "en": "English"}.get(
|
|
263
|
+
code, code.title()
|
|
264
|
+
)
|
|
265
|
+
)
|
|
240
266
|
|
|
241
267
|
# resolution
|
|
242
268
|
m_res = re.search(r":\s*([0-9]{3,4}p)", text, re.IGNORECASE)
|
|
@@ -267,12 +293,13 @@ def parse_info_from_feed_entry(block, series_page_title, release_type) -> Releas
|
|
|
267
293
|
season_part=None,
|
|
268
294
|
season=season_num,
|
|
269
295
|
episode_min=episode_min,
|
|
270
|
-
episode_max=episode_max
|
|
296
|
+
episode_max=episode_max,
|
|
271
297
|
)
|
|
272
298
|
|
|
273
299
|
|
|
274
|
-
def parse_info_from_download_item(
|
|
275
|
-
|
|
300
|
+
def parse_info_from_download_item(
|
|
301
|
+
tab, content, page_title=None, release_type=None, requested_episode=None
|
|
302
|
+
) -> ReleaseInfo:
|
|
276
303
|
"""
|
|
277
304
|
Parse a BeautifulSoup 'tab' from a download item into ReleaseInfo.
|
|
278
305
|
"""
|
|
@@ -284,7 +311,7 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
284
311
|
release_title = None
|
|
285
312
|
if notes_text:
|
|
286
313
|
rn_with_dots = notes_text.replace(" ", ".").replace(".-.", "-")
|
|
287
|
-
rn_no_dot_duplicates = re.sub(r
|
|
314
|
+
rn_no_dot_duplicates = re.sub(r"\.{2,}", ".", rn_with_dots)
|
|
288
315
|
if "." in rn_with_dots and "-" in rn_with_dots:
|
|
289
316
|
# Check if string ends with Group tag (word after dash) - this should prevent false positives
|
|
290
317
|
if re.search(r"-[\s.]?\w+$", rn_with_dots):
|
|
@@ -297,17 +324,25 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
297
324
|
match = re.search(r"(\d+)\s*x\s*(\d+)", res_td.get_text(strip=True))
|
|
298
325
|
if match:
|
|
299
326
|
h = int(match.group(2))
|
|
300
|
-
resolution =
|
|
327
|
+
resolution = "2160p" if h >= 2000 else "1080p" if h >= 1000 else "720p"
|
|
301
328
|
|
|
302
329
|
# audio and subtitles
|
|
303
|
-
audio_codes = [
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
330
|
+
audio_codes = [
|
|
331
|
+
icon["class"][1].replace("flag-", "")
|
|
332
|
+
for icon in tab.select("tr:has(th>i.fa-volume-up) i.flag")
|
|
333
|
+
]
|
|
334
|
+
audio_langs = [
|
|
335
|
+
{"jp": "Japanese", "de": "German", "en": "English"}.get(c, c.title())
|
|
336
|
+
for c in audio_codes
|
|
337
|
+
]
|
|
338
|
+
sub_codes = [
|
|
339
|
+
icon["class"][1].replace("flag-", "")
|
|
340
|
+
for icon in tab.select("tr:has(th>i.fa-closed-captioning) i.flag")
|
|
341
|
+
]
|
|
342
|
+
subtitle_langs = [
|
|
343
|
+
{"jp": "Japanese", "de": "German", "en": "English"}.get(c, c.title())
|
|
344
|
+
for c in sub_codes
|
|
345
|
+
]
|
|
311
346
|
|
|
312
347
|
# audio codec
|
|
313
348
|
if "flac" in notes_lower:
|
|
@@ -365,18 +400,27 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
365
400
|
if not season_num:
|
|
366
401
|
season_num = find_season_in_release_notes(content)
|
|
367
402
|
if not season_num:
|
|
368
|
-
season_num = extract_season_number_from_title(
|
|
403
|
+
season_num = extract_season_number_from_title(
|
|
404
|
+
page_title, release_type, release_title=release_title
|
|
405
|
+
)
|
|
369
406
|
|
|
370
407
|
# check if season part info is present
|
|
371
408
|
season_part: Optional[int] = None
|
|
372
409
|
if page_title:
|
|
373
|
-
match = re.search(
|
|
410
|
+
match = re.search(
|
|
411
|
+
r"(?i)\b(?:Part|Teil)\s+(\d+|[IVX]+)\b", page_title, re.IGNORECASE
|
|
412
|
+
)
|
|
374
413
|
if match:
|
|
375
414
|
num = match.group(1)
|
|
376
415
|
season_part = int(num) if num.isdigit() else roman_to_int(num)
|
|
377
416
|
part_string = f"Part.{season_part}"
|
|
378
417
|
if release_title and part_string not in release_title:
|
|
379
|
-
release_title = re.sub(
|
|
418
|
+
release_title = re.sub(
|
|
419
|
+
r"\.(German|Japanese|English)\.",
|
|
420
|
+
f".{part_string}.\\1.",
|
|
421
|
+
release_title,
|
|
422
|
+
1,
|
|
423
|
+
)
|
|
380
424
|
|
|
381
425
|
# determine if optional episode exists on release page
|
|
382
426
|
episode_min: Optional[int] = None
|
|
@@ -384,7 +428,9 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
384
428
|
if requested_episode:
|
|
385
429
|
episodes_div = tab.find("div", class_="episodes")
|
|
386
430
|
if episodes_div:
|
|
387
|
-
episode_links = episodes_div.find_all(
|
|
431
|
+
episode_links = episodes_div.find_all(
|
|
432
|
+
"a", attrs={"data-loop": re.compile(r"^\d+$")}
|
|
433
|
+
)
|
|
388
434
|
total_episodes = len(episode_links)
|
|
389
435
|
if total_episodes > 0:
|
|
390
436
|
ep = int(requested_episode)
|
|
@@ -393,11 +439,11 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
393
439
|
episode_max = total_episodes
|
|
394
440
|
if release_title:
|
|
395
441
|
release_title = re.sub(
|
|
396
|
-
r
|
|
442
|
+
r"(?<=\.)S(\d{1,4})(?=\.)",
|
|
397
443
|
lambda m: f"S{int(m.group(1)):02d}E{ep:02d}",
|
|
398
444
|
release_title,
|
|
399
445
|
count=1,
|
|
400
|
-
flags=re.IGNORECASE
|
|
446
|
+
flags=re.IGNORECASE,
|
|
401
447
|
)
|
|
402
448
|
|
|
403
449
|
return ReleaseInfo(
|
|
@@ -412,16 +458,16 @@ def parse_info_from_download_item(tab, content, page_title=None, release_type=No
|
|
|
412
458
|
season_part=season_part,
|
|
413
459
|
season=season_num,
|
|
414
460
|
episode_min=episode_min,
|
|
415
|
-
episode_max=episode_max
|
|
461
|
+
episode_max=episode_max,
|
|
416
462
|
)
|
|
417
463
|
|
|
418
464
|
|
|
419
465
|
def guess_title(shared_state, page_title, release_info: ReleaseInfo) -> str:
|
|
420
466
|
# remove labels
|
|
421
|
-
clean_title = page_title.rsplit(
|
|
467
|
+
clean_title = page_title.rsplit("(", 1)[0].strip()
|
|
422
468
|
# Remove season/staffel info
|
|
423
|
-
pattern = r
|
|
424
|
-
clean_title = re.sub(pattern,
|
|
469
|
+
pattern = r"(?i)\b(?:Season|Staffel)\s*\.?\s*\d+\b|\bR\d+\b"
|
|
470
|
+
clean_title = re.sub(pattern, "", clean_title)
|
|
425
471
|
|
|
426
472
|
# determine season token
|
|
427
473
|
if release_info.season is not None:
|
|
@@ -430,13 +476,13 @@ def guess_title(shared_state, page_title, release_info: ReleaseInfo) -> str:
|
|
|
430
476
|
season_token = ""
|
|
431
477
|
|
|
432
478
|
# episode token
|
|
433
|
-
ep_token =
|
|
479
|
+
ep_token = ""
|
|
434
480
|
if release_info.episode_min is not None:
|
|
435
481
|
s = release_info.episode_min
|
|
436
482
|
e = release_info.episode_max if release_info.episode_max is not None else s
|
|
437
483
|
ep_token = f"E{s:02d}" + (f"-{e:02d}" if e != s else "")
|
|
438
484
|
|
|
439
|
-
title_core = clean_title.strip().replace(
|
|
485
|
+
title_core = clean_title.strip().replace(" ", ".")
|
|
440
486
|
if season_token:
|
|
441
487
|
title_core += f".{season_token}{ep_token}"
|
|
442
488
|
elif ep_token:
|
|
@@ -450,23 +496,24 @@ def guess_title(shared_state, page_title, release_info: ReleaseInfo) -> str:
|
|
|
450
496
|
if part_string not in title_core:
|
|
451
497
|
parts.append(part_string)
|
|
452
498
|
|
|
453
|
-
prefix =
|
|
499
|
+
prefix = ""
|
|
454
500
|
a, su = release_info.audio_langs, release_info.subtitle_langs
|
|
455
|
-
if len(a) > 2 and
|
|
456
|
-
prefix =
|
|
457
|
-
elif len(a) == 2 and
|
|
458
|
-
prefix =
|
|
459
|
-
elif len(a) == 1 and
|
|
460
|
-
prefix =
|
|
461
|
-
elif a and
|
|
501
|
+
if len(a) > 2 and "German" in a:
|
|
502
|
+
prefix = "German.ML"
|
|
503
|
+
elif len(a) == 2 and "German" in a:
|
|
504
|
+
prefix = "German.DL"
|
|
505
|
+
elif len(a) == 1 and "German" in a:
|
|
506
|
+
prefix = "German"
|
|
507
|
+
elif a and "German" in su:
|
|
462
508
|
prefix = f"{a[0]}.Subbed"
|
|
463
|
-
if prefix:
|
|
509
|
+
if prefix:
|
|
510
|
+
parts.append(prefix)
|
|
464
511
|
|
|
465
512
|
if release_info.audio:
|
|
466
513
|
parts.append(release_info.audio)
|
|
467
514
|
|
|
468
515
|
parts.extend([release_info.resolution, release_info.source, release_info.video])
|
|
469
|
-
title =
|
|
516
|
+
title = ".".join(parts)
|
|
470
517
|
if release_info.release_group:
|
|
471
518
|
title += f"-{release_info.release_group}"
|
|
472
519
|
return shared_state.sanitize_title(title)
|
|
@@ -476,7 +523,9 @@ def check_release(shared_state, details_html, release_id, title, episode_in_titl
|
|
|
476
523
|
soup = BeautifulSoup(details_html, "html.parser")
|
|
477
524
|
|
|
478
525
|
if int(release_id) == 0:
|
|
479
|
-
info(
|
|
526
|
+
info(
|
|
527
|
+
"Feed download detected, hard-coding release_id to 1 to achieve successful download"
|
|
528
|
+
)
|
|
480
529
|
release_id = 1
|
|
481
530
|
# The following logic works, but the highest release ID sometimes does not have the desired episode
|
|
482
531
|
#
|
|
@@ -507,12 +556,19 @@ def check_release(shared_state, details_html, release_id, title, episode_in_titl
|
|
|
507
556
|
else:
|
|
508
557
|
release_type = "movie"
|
|
509
558
|
|
|
510
|
-
release_info = parse_info_from_download_item(
|
|
511
|
-
|
|
559
|
+
release_info = parse_info_from_download_item(
|
|
560
|
+
tab,
|
|
561
|
+
soup,
|
|
562
|
+
page_title=page_title,
|
|
563
|
+
release_type=release_type,
|
|
564
|
+
requested_episode=episode_in_title,
|
|
565
|
+
)
|
|
512
566
|
real_title = release_info.release_title
|
|
513
567
|
if real_title:
|
|
514
568
|
if real_title.lower() != title.lower():
|
|
515
|
-
info(
|
|
569
|
+
info(
|
|
570
|
+
f'Identified true release title "{real_title}" on details page'
|
|
571
|
+
)
|
|
516
572
|
return real_title, release_id
|
|
517
573
|
else:
|
|
518
574
|
# Overwrite values so guessing the title only applies the requested episode
|
|
@@ -522,22 +578,26 @@ def check_release(shared_state, details_html, release_id, title, episode_in_titl
|
|
|
522
578
|
|
|
523
579
|
guessed_title = guess_title(shared_state, page_title, release_info)
|
|
524
580
|
if guessed_title and guessed_title.lower() != title.lower():
|
|
525
|
-
info(
|
|
581
|
+
info(
|
|
582
|
+
f'Adjusted guessed release title to "{guessed_title}" from details page'
|
|
583
|
+
)
|
|
526
584
|
return guessed_title, release_id
|
|
527
585
|
except Exception as e:
|
|
528
586
|
info(f"Error guessing release title from release: {e}")
|
|
529
|
-
mark_hostname_issue(
|
|
587
|
+
mark_hostname_issue(
|
|
588
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
589
|
+
)
|
|
530
590
|
|
|
531
591
|
return title, release_id
|
|
532
592
|
|
|
533
593
|
|
|
534
594
|
def extract_episode(title: str) -> int | None:
|
|
535
|
-
match = re.search(r
|
|
595
|
+
match = re.search(r"\bS\d{1,4}E(\d+)\b(?![\-E\d])", title)
|
|
536
596
|
if match:
|
|
537
597
|
return int(match.group(1))
|
|
538
598
|
|
|
539
|
-
if not re.search(r
|
|
540
|
-
match = re.search(r
|
|
599
|
+
if not re.search(r"\bS\d{1,4}\b", title):
|
|
600
|
+
match = re.search(r"\.E(\d+)\b(?![\-E\d])", title)
|
|
541
601
|
if match:
|
|
542
602
|
return int(match.group(1))
|
|
543
603
|
|
|
@@ -557,8 +617,10 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
557
617
|
|
|
558
618
|
# Check if FlareSolverr is available - AL requires it
|
|
559
619
|
if not is_flaresolverr_available(shared_state):
|
|
560
|
-
info(
|
|
561
|
-
|
|
620
|
+
info(
|
|
621
|
+
f'"{hostname.upper()}" requires FlareSolverr which is not configured. '
|
|
622
|
+
f"Please configure FlareSolverr in the web UI to use this site."
|
|
623
|
+
)
|
|
562
624
|
return {}
|
|
563
625
|
|
|
564
626
|
release_id = password # password field carries release_id for AL
|
|
@@ -583,7 +645,9 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
583
645
|
else:
|
|
584
646
|
selection = "cnl"
|
|
585
647
|
|
|
586
|
-
title, release_id = check_release(
|
|
648
|
+
title, release_id = check_release(
|
|
649
|
+
shared_state, details_html, release_id, title, episode_in_title
|
|
650
|
+
)
|
|
587
651
|
if int(release_id) == 0:
|
|
588
652
|
info(f"No valid release ID found for {title} - Download failed!")
|
|
589
653
|
return {}
|
|
@@ -607,7 +671,7 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
607
671
|
method="POST",
|
|
608
672
|
target_url=post_url,
|
|
609
673
|
post_data=payload,
|
|
610
|
-
timeout=30
|
|
674
|
+
timeout=30,
|
|
611
675
|
)
|
|
612
676
|
|
|
613
677
|
status = result.get("status_code")
|
|
@@ -630,9 +694,9 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
630
694
|
|
|
631
695
|
tries = 0
|
|
632
696
|
if code == "success" and content_items:
|
|
633
|
-
info(
|
|
697
|
+
info("CAPTCHA not required")
|
|
634
698
|
elif message == "cnl_login":
|
|
635
|
-
info(
|
|
699
|
+
info("Login expired, re-creating session...")
|
|
636
700
|
invalidate_session(shared_state)
|
|
637
701
|
else:
|
|
638
702
|
tries = 0
|
|
@@ -643,10 +707,16 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
643
707
|
f"Starting attempt {tries} to solve CAPTCHA for "
|
|
644
708
|
f"{f'episode {episode_in_title}' if selection and selection != 'cnl' else 'all links'}"
|
|
645
709
|
)
|
|
646
|
-
attempt = solve_captcha(
|
|
647
|
-
|
|
710
|
+
attempt = solve_captcha(
|
|
711
|
+
hostname,
|
|
712
|
+
shared_state,
|
|
713
|
+
fetch_via_flaresolverr,
|
|
714
|
+
fetch_via_requests_session,
|
|
715
|
+
)
|
|
648
716
|
|
|
649
|
-
solved = (
|
|
717
|
+
solved = (
|
|
718
|
+
unwrap_flaresolverr_body(attempt.get("response")) == "1"
|
|
719
|
+
)
|
|
650
720
|
captcha_id = attempt.get("captcha_id", None)
|
|
651
721
|
|
|
652
722
|
if solved and captcha_id:
|
|
@@ -654,18 +724,21 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
654
724
|
"enc": b64,
|
|
655
725
|
"response": "captcha",
|
|
656
726
|
"captcha-idhf": 0,
|
|
657
|
-
"captcha-hf": captcha_id
|
|
727
|
+
"captcha-hf": captcha_id,
|
|
658
728
|
}
|
|
659
|
-
check_solution = fetch_via_flaresolverr(
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
729
|
+
check_solution = fetch_via_flaresolverr(
|
|
730
|
+
shared_state,
|
|
731
|
+
method="POST",
|
|
732
|
+
target_url=post_url,
|
|
733
|
+
post_data=payload,
|
|
734
|
+
timeout=30,
|
|
735
|
+
)
|
|
664
736
|
try:
|
|
665
737
|
response_json = check_solution.get("json", {})
|
|
666
738
|
except ValueError:
|
|
667
739
|
raise RuntimeError(
|
|
668
|
-
f"Unexpected /ajax/captcha response: {check_solution.get('text', '')}"
|
|
740
|
+
f"Unexpected /ajax/captcha response: {check_solution.get('text', '')}"
|
|
741
|
+
)
|
|
669
742
|
|
|
670
743
|
code = response_json.get("code", "")
|
|
671
744
|
message = response_json.get("message", "")
|
|
@@ -673,32 +746,50 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
673
746
|
|
|
674
747
|
if code == "success":
|
|
675
748
|
if content_items:
|
|
676
|
-
info(
|
|
749
|
+
info(
|
|
750
|
+
"CAPTCHA solved successfully on attempt {}.".format(
|
|
751
|
+
tries
|
|
752
|
+
)
|
|
753
|
+
)
|
|
677
754
|
break
|
|
678
755
|
else:
|
|
679
|
-
info(
|
|
680
|
-
|
|
756
|
+
info(
|
|
757
|
+
f"CAPTCHA was solved, but no links are available for the selection!"
|
|
758
|
+
)
|
|
759
|
+
StatsHelper(
|
|
760
|
+
shared_state
|
|
761
|
+
).increment_failed_decryptions_automatic()
|
|
681
762
|
return {}
|
|
682
763
|
elif message == "cnl_login":
|
|
683
|
-
info(
|
|
764
|
+
info("Login expired, re-creating session...")
|
|
684
765
|
invalidate_session(shared_state)
|
|
685
766
|
else:
|
|
686
767
|
info(
|
|
687
|
-
f"CAPTCHA POST returned code={code}, message={message}. Retrying... (attempt {tries})"
|
|
768
|
+
f"CAPTCHA POST returned code={code}, message={message}. Retrying... (attempt {tries})"
|
|
769
|
+
)
|
|
688
770
|
|
|
689
771
|
if "slowndown" in str(message).lower():
|
|
690
772
|
wait_period = 30
|
|
691
773
|
info(
|
|
692
|
-
f"CAPTCHAs solved too quickly. Waiting {wait_period} seconds before next attempt..."
|
|
774
|
+
f"CAPTCHAs solved too quickly. Waiting {wait_period} seconds before next attempt..."
|
|
775
|
+
)
|
|
693
776
|
time.sleep(wait_period)
|
|
694
777
|
else:
|
|
695
|
-
info(
|
|
778
|
+
info(
|
|
779
|
+
f"CAPTCHA solver returned invalid solution, retrying... (attempt {tries})"
|
|
780
|
+
)
|
|
696
781
|
|
|
697
782
|
except RuntimeError as e:
|
|
698
783
|
info(f"Error solving CAPTCHA: {e}")
|
|
699
|
-
mark_hostname_issue(
|
|
784
|
+
mark_hostname_issue(
|
|
785
|
+
hostname,
|
|
786
|
+
"download",
|
|
787
|
+
str(e) if "e" in dir() else "Download error",
|
|
788
|
+
)
|
|
700
789
|
else:
|
|
701
|
-
info(
|
|
790
|
+
info(
|
|
791
|
+
f"CAPTCHA solver returned invalid solution, retrying... (attempt {tries})"
|
|
792
|
+
)
|
|
702
793
|
|
|
703
794
|
if code != "success":
|
|
704
795
|
info(
|
|
@@ -714,10 +805,14 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
714
805
|
debug(f"Decrypted URLs: {links}")
|
|
715
806
|
except Exception as e:
|
|
716
807
|
info(f"Error during decryption: {e}")
|
|
717
|
-
mark_hostname_issue(
|
|
808
|
+
mark_hostname_issue(
|
|
809
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
810
|
+
)
|
|
718
811
|
except Exception as e:
|
|
719
812
|
info(f"Error loading AL download: {e}")
|
|
720
|
-
mark_hostname_issue(
|
|
813
|
+
mark_hostname_issue(
|
|
814
|
+
hostname, "download", str(e) if "e" in dir() else "Download error"
|
|
815
|
+
)
|
|
721
816
|
invalidate_session(shared_state)
|
|
722
817
|
|
|
723
818
|
success = bool(links)
|
|
@@ -728,8 +823,4 @@ def get_al_download_links(shared_state, url, mirror, title, password):
|
|
|
728
823
|
|
|
729
824
|
links_with_mirrors = [[url, derive_mirror(url)] for url in links]
|
|
730
825
|
|
|
731
|
-
return {
|
|
732
|
-
"links": links_with_mirrors,
|
|
733
|
-
"password": f"www.{al}",
|
|
734
|
-
"title": title
|
|
735
|
-
}
|
|
826
|
+
return {"links": links_with_mirrors, "password": f"www.{al}", "title": title}
|