quasarr 1.4.1__py3-none-any.whl → 1.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (67) hide show
  1. quasarr/__init__.py +157 -67
  2. quasarr/api/__init__.py +126 -43
  3. quasarr/api/arr/__init__.py +197 -78
  4. quasarr/api/captcha/__init__.py +885 -39
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +84 -22
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +236 -487
  9. quasarr/downloads/linkcrypters/al.py +237 -0
  10. quasarr/downloads/linkcrypters/filecrypt.py +178 -31
  11. quasarr/downloads/linkcrypters/hide.py +123 -0
  12. quasarr/downloads/packages/__init__.py +461 -0
  13. quasarr/downloads/sources/al.py +697 -0
  14. quasarr/downloads/sources/by.py +106 -0
  15. quasarr/downloads/sources/dd.py +6 -78
  16. quasarr/downloads/sources/dj.py +7 -0
  17. quasarr/downloads/sources/dt.py +1 -1
  18. quasarr/downloads/sources/dw.py +2 -2
  19. quasarr/downloads/sources/he.py +112 -0
  20. quasarr/downloads/sources/mb.py +47 -0
  21. quasarr/downloads/sources/nk.py +51 -0
  22. quasarr/downloads/sources/nx.py +36 -81
  23. quasarr/downloads/sources/sf.py +27 -4
  24. quasarr/downloads/sources/sj.py +7 -0
  25. quasarr/downloads/sources/sl.py +90 -0
  26. quasarr/downloads/sources/wd.py +110 -0
  27. quasarr/providers/cloudflare.py +204 -0
  28. quasarr/providers/html_images.py +20 -0
  29. quasarr/providers/html_templates.py +48 -39
  30. quasarr/providers/imdb_metadata.py +15 -2
  31. quasarr/providers/myjd_api.py +34 -5
  32. quasarr/providers/notifications.py +30 -5
  33. quasarr/providers/obfuscated.py +35 -0
  34. quasarr/providers/sessions/__init__.py +0 -0
  35. quasarr/providers/sessions/al.py +286 -0
  36. quasarr/providers/sessions/dd.py +78 -0
  37. quasarr/providers/sessions/nx.py +76 -0
  38. quasarr/providers/shared_state.py +347 -20
  39. quasarr/providers/statistics.py +154 -0
  40. quasarr/providers/version.py +1 -1
  41. quasarr/search/__init__.py +112 -36
  42. quasarr/search/sources/al.py +448 -0
  43. quasarr/search/sources/by.py +203 -0
  44. quasarr/search/sources/dd.py +17 -6
  45. quasarr/search/sources/dj.py +213 -0
  46. quasarr/search/sources/dt.py +37 -7
  47. quasarr/search/sources/dw.py +27 -47
  48. quasarr/search/sources/fx.py +27 -29
  49. quasarr/search/sources/he.py +196 -0
  50. quasarr/search/sources/mb.py +195 -0
  51. quasarr/search/sources/nk.py +188 -0
  52. quasarr/search/sources/nx.py +22 -6
  53. quasarr/search/sources/sf.py +143 -151
  54. quasarr/search/sources/sj.py +213 -0
  55. quasarr/search/sources/sl.py +246 -0
  56. quasarr/search/sources/wd.py +208 -0
  57. quasarr/storage/config.py +20 -4
  58. quasarr/storage/setup.py +216 -51
  59. quasarr-1.20.4.dist-info/METADATA +304 -0
  60. quasarr-1.20.4.dist-info/RECORD +72 -0
  61. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/WHEEL +1 -1
  62. quasarr/providers/tvmaze_metadata.py +0 -23
  63. quasarr-1.4.1.dist-info/METADATA +0 -174
  64. quasarr-1.4.1.dist-info/RECORD +0 -43
  65. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/entry_points.txt +0 -0
  66. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/licenses/LICENSE +0 -0
  67. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,106 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import concurrent.futures
6
+ import re
7
+ import time
8
+ from urllib.parse import urlparse
9
+
10
+ import requests
11
+ from bs4 import BeautifulSoup
12
+
13
+ from quasarr.providers.log import info, debug
14
+
15
+
16
+ def get_by_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
17
+ by = shared_state.values["config"]("Hostnames").get("by")
18
+ headers = {
19
+ 'User-Agent': shared_state.values["user_agent"],
20
+ }
21
+
22
+ mirror_lower = mirror.lower() if mirror else None
23
+ links = []
24
+
25
+ try:
26
+ resp = requests.get(url, headers=headers, timeout=10)
27
+ page_content = resp.text
28
+ soup = BeautifulSoup(page_content, "html.parser")
29
+ frames = [iframe.get("src") for iframe in soup.find_all("iframe") if iframe.get("src")]
30
+
31
+ frame_urls = [src for src in frames if f'https://{by}' in src]
32
+ if not frame_urls:
33
+ debug(f"No iframe hosts found on {url} for {title}.")
34
+ return []
35
+
36
+ async_results = []
37
+
38
+ def fetch(url):
39
+ try:
40
+ r = requests.get(url, headers=headers, timeout=10)
41
+ return r.text, url
42
+ except Exception:
43
+ info(f"Error fetching iframe URL: {url}")
44
+ return None, url
45
+
46
+ with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
47
+ future_to_url = {executor.submit(fetch, url): url for url in frame_urls}
48
+ for future in concurrent.futures.as_completed(future_to_url):
49
+ content, source = future.result()
50
+ if content:
51
+ async_results.append((content, source))
52
+
53
+ url_hosters = []
54
+ for content, source in async_results:
55
+ host_soup = BeautifulSoup(content, "html.parser")
56
+ link = host_soup.find("a", href=re.compile(
57
+ r"https?://(?:www\.)?(?:hide\.cx|filecrypt\.(?:cc|co|to))/container/"))
58
+
59
+ # Fallback to the old format
60
+ if not link:
61
+ link = host_soup.find("a", href=re.compile(r"/go\.php\?"))
62
+
63
+ if not link:
64
+ continue
65
+
66
+ href = link["href"]
67
+ hostname = link.text.strip().replace(" ", "")
68
+ hostname_lower = hostname.lower()
69
+
70
+ if mirror_lower and mirror_lower not in hostname_lower:
71
+ debug(f'Skipping link from "{hostname}" (not the desired mirror "{mirror}")!')
72
+ continue
73
+
74
+ url_hosters.append((href, hostname))
75
+
76
+ def resolve_redirect(href_hostname):
77
+ href, hostname = href_hostname
78
+ try:
79
+ r = requests.get(href, headers=headers, timeout=10, allow_redirects=True)
80
+ if "/404.html" in r.url:
81
+ info(f"Link leads to 404 page for {hostname}: {r.url}")
82
+ return None
83
+ time.sleep(1)
84
+ return r.url
85
+ except Exception as e:
86
+ info(f"Error resolving link for {hostname}: {e}")
87
+ return None
88
+
89
+ for pair in url_hosters:
90
+ resolved_url = resolve_redirect(pair)
91
+ hostname = pair[1]
92
+
93
+ if not hostname:
94
+ hostname = urlparse(resolved_url).hostname
95
+
96
+ if resolved_url and hostname and hostname.startswith(("ddownload", "rapidgator", "turbobit", "filecrypt")):
97
+ if "rapidgator" in hostname:
98
+ links.insert(0, [resolved_url, hostname])
99
+ else:
100
+ links.append([resolved_url, hostname])
101
+
102
+
103
+ except Exception as e:
104
+ info(f"Error loading BY download links: {e}")
105
+
106
+ return links
@@ -2,83 +2,11 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
- import base64
6
- import pickle
7
-
8
- import requests
9
-
10
5
  from quasarr.providers.log import info, debug
6
+ from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
11
7
 
12
8
 
13
- def create_and_persist_session(shared_state):
14
- dd = shared_state.values["config"]("Hostnames").get("dd")
15
-
16
- dd_session = requests.Session()
17
-
18
- cookies = {}
19
- headers = {
20
- 'User-Agent': shared_state.values["user_agent"],
21
- }
22
-
23
- data = {
24
- 'username': shared_state.values["config"]("DD").get("user"),
25
- 'password': shared_state.values["config"]("DD").get("password"),
26
- 'ajax': 'true',
27
- 'Login': 'true',
28
- }
29
-
30
- dd_response = dd_session.post(f'https://{dd}/index/index',
31
- cookies=cookies, headers=headers, data=data, timeout=10)
32
-
33
- error = False
34
- if dd_response.status_code == 200:
35
- try:
36
- response_data = dd_response.json()
37
- if not response_data.get('loggedin'):
38
- info("DD rejected login.")
39
- raise ValueError
40
- session_id = dd_response.cookies.get("PHPSESSID")
41
- if session_id:
42
- dd_session.cookies.set('PHPSESSID', session_id, domain=dd)
43
- else:
44
- info("Invalid DD response on login.")
45
- error = True
46
- except ValueError:
47
- info("Could not parse DD response on login.")
48
- error = True
49
-
50
- if error:
51
- shared_state.values["config"]("DD").save("user", "")
52
- shared_state.values["config"]("DD").save("password", "")
53
- return None
54
-
55
- serialized_session = pickle.dumps(dd_session)
56
- session_string = base64.b64encode(serialized_session).decode('utf-8')
57
- shared_state.values["database"]("sessions").update_store("dd", session_string)
58
- return dd_session
59
- else:
60
- info("Could not create DD session")
61
- return None
62
-
63
-
64
- def retrieve_and_validate_session(shared_state):
65
- session_string = shared_state.values["database"]("sessions").retrieve("dd")
66
- if not session_string:
67
- dd_session = create_and_persist_session(shared_state)
68
- else:
69
- try:
70
- serialized_session = base64.b64decode(session_string.encode('utf-8'))
71
- dd_session = pickle.loads(serialized_session)
72
- if not isinstance(dd_session, requests.Session):
73
- raise ValueError("Retrieved object is not a valid requests.Session instance.")
74
- except Exception as e:
75
- info(f"Session retrieval failed: {e}")
76
- dd_session = create_and_persist_session(shared_state)
77
-
78
- return dd_session
79
-
80
-
81
- def get_dd_download_links(shared_state, mirror, search_string):
9
+ def get_dd_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
82
10
  dd = shared_state.values["config"]("Hostnames").get("dd")
83
11
 
84
12
  dd_session = retrieve_and_validate_session(shared_state)
@@ -107,7 +35,7 @@ def get_dd_download_links(shared_state, mirror, search_string):
107
35
  try:
108
36
  release_list = []
109
37
  for page in range(0, 100, 20):
110
- url = f'https://{dd}/index/search/keyword/{search_string}/qualities/{','.join(qualities)}/from/{page}/search'
38
+ url = f'https://{dd}/index/search/keyword/{title}/qualities/{','.join(qualities)}/from/{page}/search'
111
39
 
112
40
  releases_on_page = dd_session.get(url, headers=headers, timeout=10).json()
113
41
  if releases_on_page:
@@ -119,7 +47,7 @@ def get_dd_download_links(shared_state, mirror, search_string):
119
47
  debug(f"Release {release.get('release')} marked as fake. Invalidating DD session...")
120
48
  create_and_persist_session(shared_state)
121
49
  return []
122
- elif release.get("release") == search_string:
50
+ elif release.get("release") == title:
123
51
  filtered_links = []
124
52
  for link in release["links"]:
125
53
  if mirror and mirror not in link["hostname"]:
@@ -139,10 +67,10 @@ def get_dd_download_links(shared_state, mirror, search_string):
139
67
  links = [link["url"] for link in filtered_links]
140
68
  break
141
69
  except Exception as e:
142
- info(f"Error parsing DD feed: {e}")
70
+ info(f"Error parsing DD download: {e}")
143
71
  continue
144
72
 
145
73
  except Exception as e:
146
- info(f"Error loading DD feed: {e}")
74
+ info(f"Error loading DD download: {e}")
147
75
 
148
76
  return links
@@ -0,0 +1,7 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+
6
+ def get_dj_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
7
+ return [url]
@@ -8,7 +8,7 @@ from bs4 import BeautifulSoup
8
8
  from quasarr.providers.log import info
9
9
 
10
10
 
11
- def get_dt_download_links(shared_state, url, mirror, title):
11
+ def get_dt_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
12
12
  headers = {"User-Agent": shared_state.values["user_agent"]}
13
13
  session = requests.Session()
14
14
 
@@ -10,7 +10,7 @@ from bs4 import BeautifulSoup
10
10
  from quasarr.providers.log import info, debug
11
11
 
12
12
 
13
- def get_dw_download_links(shared_state, url, mirror, title):
13
+ def get_dw_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
14
14
  dw = shared_state.values["config"]("Hostnames").get("dw")
15
15
  ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
16
16
 
@@ -53,7 +53,7 @@ def get_dw_download_links(shared_state, url, mirror, title):
53
53
 
54
54
  hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
55
55
  hoster = f"1fichier" if hoster.startswith("fichier") else hoster # align with expected mirror name
56
- if mirror and mirror not in hoster:
56
+ if mirror and mirror.lower() not in hoster.lower():
57
57
  debug(f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!')
58
58
  continue
59
59
 
@@ -0,0 +1,112 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ from urllib.parse import urlparse, urljoin
7
+
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+
11
+ from quasarr.providers.log import info, debug
12
+
13
+ hostname = "he"
14
+
15
+
16
+ def get_he_download_links(shared_state, url, mirror, title):
17
+ headers = {
18
+ 'User-Agent': shared_state.values["user_agent"],
19
+ }
20
+
21
+ session = requests.Session()
22
+
23
+ try:
24
+ resp = session.get(url, headers=headers, timeout=30)
25
+ soup = BeautifulSoup(resp.text, 'html.parser')
26
+ except Exception as e:
27
+ info(f"{hostname}: could not fetch release for {title}: {e}")
28
+ return False
29
+
30
+ imdb_id = None
31
+ try:
32
+ imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
33
+ if imdb_link:
34
+ href = imdb_link['href'].strip()
35
+ m = re.search(r"(tt\d{4,7})", href)
36
+ if m:
37
+ imdb_id = m.group(1)
38
+ else:
39
+ debug(f"{hostname}: imdb_id not found for title {title} in link href.")
40
+ else:
41
+ debug(f"{hostname}: imdb_id link href not found for title {title}.")
42
+ except Exception:
43
+ debug(f"{hostname}: failed to extract imdb_id for title {title}.")
44
+
45
+ anchors = []
46
+ for retries in range(10):
47
+ form = soup.find('form', id=re.compile(r'content-protector-access-form'))
48
+ if not form:
49
+ return False
50
+
51
+ action = form.get('action') or url
52
+ action_url = urljoin(resp.url, action)
53
+
54
+ payload = {}
55
+ for inp in form.find_all('input'):
56
+ name = inp.get('name')
57
+ if not name:
58
+ continue
59
+ value = inp.get('value', '')
60
+ payload[name] = value
61
+
62
+ append_patt = re.compile(r"append\(\s*[\'\"](?P<key>[^\'\"]+)[\'\"]\s*,\s*[\'\"](?P<val>[^\'\"]+)[\'\"]\s*\)",
63
+ re.IGNORECASE)
64
+
65
+ for script in soup.find_all('script'):
66
+ txt = script.string if script.string is not None else script.get_text()
67
+ if not txt:
68
+ continue
69
+ for m in append_patt.finditer(txt):
70
+ payload[m.group('key')] = m.group('val')
71
+
72
+ post_headers = headers.copy()
73
+ post_headers.update({'Referer': resp.url})
74
+ try:
75
+ resp = session.post(action_url, data=payload, headers=post_headers, timeout=30)
76
+ soup = BeautifulSoup(resp.text, 'html.parser')
77
+ except Exception as e:
78
+ info(f"{hostname}: could not submit protector form for {title}: {e}")
79
+ break
80
+
81
+ unlocked = soup.select('.content-protector-access-form')
82
+ if unlocked:
83
+ for u in unlocked:
84
+ anchors.extend(u.find_all('a', href=True))
85
+
86
+ if anchors:
87
+ break
88
+
89
+ links = []
90
+ for a in anchors:
91
+ try:
92
+ href = a['href'].strip()
93
+
94
+ netloc = urlparse(href).netloc
95
+ hoster = netloc.split(':')[0].lower()
96
+ parts = hoster.split('.')
97
+ if len(parts) >= 2:
98
+ hoster = parts[-2]
99
+
100
+ links.append([href, hoster])
101
+ except Exception:
102
+ debug(f"{hostname}: could not resolve download link hoster for {title}")
103
+ continue
104
+
105
+ if not links:
106
+ info(f"No external download links found on {hostname} page for {title}")
107
+ return False
108
+
109
+ return {
110
+ "links": links,
111
+ "imdb_id": imdb_id,
112
+ }
@@ -0,0 +1,47 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+
7
+ import requests
8
+ from bs4 import BeautifulSoup
9
+
10
+ from quasarr.providers.log import info, debug
11
+
12
+
13
+ def get_mb_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
14
+ headers = {
15
+ 'User-Agent': shared_state.values["user_agent"],
16
+ }
17
+
18
+ try:
19
+ response = requests.get(url, headers=headers, timeout=10)
20
+ response.raise_for_status()
21
+ except Exception as e:
22
+ info(f"Failed to fetch page for {title or url}: {e}")
23
+ return False
24
+
25
+ soup = BeautifulSoup(response.text, "html.parser")
26
+
27
+ download_links = []
28
+
29
+ pattern = re.compile(r'https?://(?:www\.)?filecrypt\.[^/]+/Container/', re.IGNORECASE)
30
+ for a in soup.find_all('a', href=pattern):
31
+ try:
32
+ link = a['href']
33
+ hoster = a.get_text(strip=True).lower()
34
+
35
+ if mirror and mirror.lower() not in hoster.lower():
36
+ debug(f'Skipping link from "{hoster}" (not the desired mirror "{mirror}")!')
37
+ continue
38
+
39
+ download_links.append([link, hoster])
40
+ except Exception as e:
41
+ debug(f"Error parsing MB download links: {e}")
42
+
43
+ if not download_links:
44
+ info(f"No download links found for {title}. Site structure may have changed. - {url}")
45
+ return False
46
+
47
+ return download_links
@@ -0,0 +1,51 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
+
8
+ from quasarr.providers.log import info
9
+
10
+ hostname = "nk"
11
+
12
+
13
+ def get_nk_download_links(shared_state, url, mirror, title):
14
+ host = shared_state.values["config"]("Hostnames").get(hostname)
15
+ headers = {
16
+ 'User-Agent': shared_state.values["user_agent"],
17
+ }
18
+
19
+ session = requests.Session()
20
+
21
+ try:
22
+ resp = session.get(url, headers=headers, timeout=20)
23
+ soup = BeautifulSoup(resp.text, 'html.parser')
24
+ except Exception as e:
25
+ info(f"{hostname}: could not fetch release page for {title}: {e}")
26
+ return False
27
+
28
+ anchors = soup.select('a.btn-orange')
29
+ candidates = []
30
+ for a in anchors:
31
+
32
+ href = a.get('href', '').strip()
33
+ hoster = href.split('/')[3].lower()
34
+ if not href.lower().startswith(('http://', 'https://')):
35
+ href = 'https://' + host + href
36
+
37
+ try:
38
+ href = requests.head(href, headers=headers, allow_redirects=True, timeout=20).url
39
+ except Exception as e:
40
+ info(f"{hostname}: could not resolve download link for {title}: {e}")
41
+ continue
42
+
43
+ if hoster == 'ddl.to':
44
+ hoster = 'ddownload'
45
+
46
+ candidates.append([href, hoster])
47
+
48
+ if not candidates:
49
+ info(f"No external download links found on {hostname} page for {title}")
50
+
51
+ return candidates
@@ -2,104 +2,53 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
- import base64
6
- import pickle
7
5
  import re
8
6
 
9
7
  import requests
10
8
  from bs4 import BeautifulSoup
11
9
 
12
10
  from quasarr.providers.log import info
11
+ from quasarr.providers.sessions.nx import retrieve_and_validate_session
13
12
 
14
13
 
15
- def create_and_persist_session(shared_state):
16
- nx = shared_state.values["config"]("Hostnames").get("nx")
17
-
18
- nx_session = requests.Session()
14
+ def get_filer_folder_links_via_api(shared_state, url):
15
+ try:
16
+ headers = {
17
+ 'User-Agent': shared_state.values["user_agent"],
18
+ 'Referer': url
19
+ }
19
20
 
20
- cookies = {}
21
- headers = {
22
- 'User-Agent': shared_state.values["user_agent"],
23
- }
21
+ m = re.search(r"/folder/([A-Za-z0-9]+)", url)
22
+ if not m:
23
+ return url # not a folder URL
24
24
 
25
- json_data = {
26
- 'username': shared_state.values["config"]("NX").get("user"),
27
- 'password': shared_state.values["config"]("NX").get("password")
28
- }
25
+ folder_hash = m.group(1)
26
+ api_url = f"https://filer.net/api/folder/{folder_hash}"
29
27
 
30
- nx_response = nx_session.post(f'https://{nx}/api/user/auth', cookies=cookies, headers=headers, json=json_data,
31
- timeout=10)
28
+ response = requests.get(api_url, headers=headers, timeout=10)
29
+ if not response or response.status_code != 200:
30
+ return url
32
31
 
33
- error = False
34
- if nx_response.status_code == 200:
35
- try:
36
- response_data = nx_response.json()
37
- if response_data.get('err', {}).get('status') == 403:
38
- info("Invalid NX credentials provided.")
39
- error = True
40
- elif response_data.get('user').get('username') != shared_state.values["config"]("NX").get("user"):
41
- info("Invalid NX response on login.")
42
- error = True
43
- else:
44
- sessiontoken = response_data.get('user').get('sessiontoken')
45
- nx_session.cookies.set('sessiontoken', sessiontoken, domain=nx)
46
- except ValueError:
47
- info("Could not parse NX response on login.")
48
- error = True
49
-
50
- if error:
51
- shared_state.values["config"]("NX").save("user", "")
52
- shared_state.values["config"]("NX").save("password", "")
53
- return None
54
-
55
- serialized_session = pickle.dumps(nx_session)
56
- session_string = base64.b64encode(serialized_session).decode('utf-8')
57
- shared_state.values["database"]("sessions").update_store("nx", session_string)
58
- return nx_session
59
- else:
60
- info("Could not create NX session")
61
- return None
62
-
63
-
64
- def retrieve_and_validate_session(shared_state):
65
- session_string = shared_state.values["database"]("sessions").retrieve("nx")
66
- if not session_string:
67
- nx_session = create_and_persist_session(shared_state)
68
- else:
69
- try:
70
- serialized_session = base64.b64decode(session_string.encode('utf-8'))
71
- nx_session = pickle.loads(serialized_session)
72
- if not isinstance(nx_session, requests.Session):
73
- raise ValueError("Retrieved object is not a valid requests.Session instance.")
74
- except Exception as e:
75
- info(f"Session retrieval failed: {e}")
76
- nx_session = create_and_persist_session(shared_state)
32
+ data = response.json()
33
+ files = data.get("files", [])
34
+ links = []
77
35
 
78
- return nx_session
36
+ # Build download URLs from their file hashes
37
+ for f in files:
38
+ file_hash = f.get("hash")
39
+ if not file_hash:
40
+ continue
41
+ dl_url = f"https://filer.net/get/{file_hash}"
42
+ links.append(dl_url)
79
43
 
44
+ # Return extracted links or fallback
45
+ return links if links else url
80
46
 
81
- def get_filer_folder_links(shared_state, url):
82
- try:
83
- headers = {
84
- 'User-Agent': shared_state.values["user_agent"],
85
- 'Referer': url
86
- }
87
- response = requests.get(url, headers=headers, timeout=10)
88
- links = []
89
- if response:
90
- soup = BeautifulSoup(response.content, 'html.parser')
91
- folder_links = soup.find_all('a', href=re.compile("/get/"))
92
- for link in folder_links:
93
- link = "https://filer.net" + link.get('href')
94
- if link not in links:
95
- links.append(link)
96
- return links
97
47
  except:
98
- pass
99
- return url
48
+ return url
100
49
 
101
50
 
102
- def get_nx_download_links(shared_state, url, title):
51
+ def get_nx_download_links(shared_state, url, mirror, title): # signature must align with other download link functions!
103
52
  nx = shared_state.values["config"]("Hostnames").get("nx")
104
53
 
105
54
  if f"{nx}/release/" not in url:
@@ -134,11 +83,17 @@ def get_nx_download_links(shared_state, url, title):
134
83
  shared_state.values["database"]("sessions").delete("nx")
135
84
  return []
136
85
 
86
+ if payload and any(key in payload for key in ("err", "error")):
87
+ error_msg = payload.get("err") or payload.get("error")
88
+ info(f"Error decrypting {title!r} URL: {url!r} - {error_msg}")
89
+ shared_state.values["database"]("sessions").delete("nx")
90
+ return []
91
+
137
92
  try:
138
93
  decrypted_url = payload['link'][0]['url']
139
94
  if decrypted_url:
140
95
  if "filer.net/folder/" in decrypted_url:
141
- urls = get_filer_folder_links(shared_state, decrypted_url)
96
+ urls = get_filer_folder_links_via_api(shared_state, decrypted_url)
142
97
  else:
143
98
  urls = [decrypted_url]
144
99
  return urls