quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -1,270 +1,333 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
+ #
5
+ # Special note: The signatures of all handlers must stay the same so we can neatly call them in download()
6
+ # Same is true for every get_xx_download_links() function in sources/xx.py
4
7
 
5
8
  import json
6
9
 
10
+ from quasarr.downloads.linkcrypters.hide import decrypt_links_if_hide
11
+ from quasarr.downloads.sources.al import get_al_download_links
12
+ from quasarr.downloads.sources.by import get_by_download_links
13
+ from quasarr.downloads.sources.dd import get_dd_download_links
14
+ from quasarr.downloads.sources.dj import get_dj_download_links
15
+ from quasarr.downloads.sources.dl import get_dl_download_links
16
+ from quasarr.downloads.sources.dt import get_dt_download_links
7
17
  from quasarr.downloads.sources.dw import get_dw_download_links
18
+ from quasarr.downloads.sources.he import get_he_download_links
19
+ from quasarr.downloads.sources.mb import get_mb_download_links
20
+ from quasarr.downloads.sources.nk import get_nk_download_links
8
21
  from quasarr.downloads.sources.nx import get_nx_download_links
9
- from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
10
- from quasarr.providers.notifications import send_discord_captcha_alert
11
-
12
-
13
- def get_first_matching_comment(package, package_links):
14
- package_uuid = package.get("uuid")
15
- if package_uuid:
16
- for link in package_links:
17
- if link.get("packageUUID") == package_uuid:
18
- return link.get("comment")
19
- return None
20
-
21
-
22
- def get_links_matching_package_uuid(package, package_links):
23
- package_uuid = package.get("uuid")
24
- link_ids = []
25
- if package_uuid:
26
- for link in package_links:
27
- if link.get("packageUUID") == package_uuid:
28
- link_ids.append(link.get("uuid"))
29
- return link_ids
30
-
31
-
32
- def format_eta(seconds):
33
- hours = seconds // 3600
34
- minutes = (seconds % 3600) // 60
35
- seconds = seconds % 60
36
- return f"{hours:02}:{minutes:02}:{seconds:02}"
37
-
38
-
39
- def get_packages(shared_state):
40
- packages = []
41
-
42
- protected_packages = shared_state.get_db("protected").retrieve_all_titles()
43
- if protected_packages:
44
- for package in protected_packages:
45
- package_id = package[0]
46
-
47
- data = json.loads(package[1])
48
- details = {
49
- "title": data["title"],
50
- "urls": data["links"],
51
- "size_mb": data["size_mb"],
52
- "password": data["password"]
53
- }
54
-
55
- packages.append({
56
- "details": details,
57
- "location": "queue",
58
- "type": "protected",
59
- "package_id": package_id
60
- })
61
- try:
62
- linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
63
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
64
- linkgrabber_packages = []
65
-
66
- if linkgrabber_packages:
67
- for package in linkgrabber_packages:
68
- comment = get_first_matching_comment(package, shared_state.get_device().linkgrabber.query_links())
69
- packages.append({
70
- "details": package,
71
- "location": "queue",
72
- "type": "linkgrabber",
73
- "comment": comment,
74
- "uuid": package.get("uuid")
75
- })
76
- try:
77
- downloader_packages = shared_state.get_device().downloads.query_packages()
78
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
79
- downloader_packages = []
80
-
81
- if downloader_packages:
82
- for package in downloader_packages:
83
- comment = get_first_matching_comment(package, shared_state.get_device().downloads.query_links())
84
- status = package.get("status", "")
85
-
86
- if any(ex_str in status.lower() for ex_str in ["entpacken", "extracting"]) and "ok:" not in status.lower():
87
- finished = False
88
- else:
89
- finished = package.get("finished", False)
90
-
91
- packages.append({
92
- "details": package,
93
- "location": "history" if finished else "queue",
94
- "type": "downloader",
95
- "comment": comment,
96
- "uuid": package.get("uuid")
97
- })
98
-
99
- downloads = {
100
- "queue": [],
101
- "history": []
102
- }
103
- for package in packages:
104
- queue_index = 0
105
- history_index = 0
106
-
107
- if package["location"] == "queue":
108
- time_left = "2376:00:00" # yields "99d" to signify that its not running
109
- if package["type"] == "linkgrabber":
110
- details = package["details"]
111
- name = f"[Linkgrabber] {details["name"]}"
112
- try:
113
- mb = mb_left = int(details["bytesTotal"]) / (1024 * 1024)
114
- except KeyError:
115
- mb = mb_left = 0
116
- package_id = package["comment"]
117
- if "movies" in package_id:
118
- category = "movies"
119
- else:
120
- category = "tv"
121
- package_type = "linkgrabber"
122
- package_uuid = package["uuid"]
123
- elif package["type"] == "downloader":
124
- details = package["details"]
125
- name = f"[Downloading] {details["name"]}"
126
- try:
127
- if details["eta"]:
128
- time_left = format_eta(int(details["eta"]))
129
- except KeyError:
130
- name = name.replace("[Downloading]", "[Paused]")
131
- try:
132
- mb = int(details["bytesTotal"]) / (1024 * 1024)
133
- mb_left = (int(details["bytesTotal"]) - int(details["bytesLoaded"])) / (1024 * 1024)
134
- except KeyError:
135
- mb = mb_left = 0
136
- package_id = package["comment"]
137
- if "movies" in package_id:
138
- category = "movies"
139
- else:
140
- category = "tv"
141
- package_type = "downloader"
142
- package_uuid = package["uuid"]
143
- else:
144
- details = package["details"]
145
- name = f"[CAPTCHA not solved!] {details["title"]}"
146
- mb = mb_left = details["size_mb"]
147
- package_id = package["package_id"]
148
- if "movies" in package_id:
149
- category = "movies"
150
- else:
151
- category = "tv"
152
- package_type = "protected"
153
- package_uuid = None
154
-
155
- try:
156
- downloads["queue"].append({
157
- "index": queue_index,
158
- "nzo_id": package_id,
159
- "priority": "Normal",
160
- "filename": name,
161
- "cat": category,
162
- "mbleft": int(mb_left),
163
- "mb": int(mb),
164
- "status": "Downloading",
165
- "timeleft": time_left,
166
- "type": package_type,
167
- "uuid": package_uuid
168
- })
169
- except:
170
- print(f"Parameters missing for {package}")
171
- queue_index += 1
172
- elif package["location"] == "history":
173
- details = package["details"]
174
- name = details["name"]
175
- size = int(details["bytesLoaded"])
176
- storage = details["saveTo"]
177
- package_id = package["comment"]
178
- if "movies" in package_id:
179
- category = "movies"
180
- else:
181
- category = "tv"
182
-
183
- downloads["history"].append({
184
- "fail_message": "",
185
- "category": category,
186
- "storage": storage,
187
- "status": "Completed",
188
- "nzo_id": package_id,
189
- "name": name,
190
- "bytes": int(size),
191
- "type": "downloader",
192
- "uuid": package["uuid"]
193
- })
194
- history_index += 1
195
- else:
196
- print(f"Invalid package location {package['location']}")
197
-
198
- return downloads
199
-
200
-
201
- def delete_package(shared_state, package_id):
202
- deleted = ""
203
-
204
- packages = get_packages(shared_state)
205
- for package_location in packages:
206
- for package in packages[package_location]:
207
- if package["nzo_id"] == package_id:
208
- if package["type"] == "linkgrabber":
209
- ids = get_links_matching_package_uuid(package, shared_state.get_device().linkgrabber.query_links())
210
- shared_state.get_device().linkgrabber.remove_links(ids, [package["uuid"]])
211
- elif package["type"] == "downloader":
212
- ids = get_links_matching_package_uuid(package, shared_state.get_device().downloads.query_links())
213
- shared_state.get_device().downloads.remove_links(ids, [package["uuid"]])
214
- else:
215
- shared_state.get_db("protected").delete(package_id)
216
- if package_location == "queue":
217
- package_name_field = "filename"
218
- else:
219
- package_name_field = "name"
220
-
221
- deleted = package[package_name_field]
222
- break
223
- if deleted:
224
- break
225
-
226
- if deleted:
227
- print(f"Deleted package {deleted} with ID {package_id}")
228
- else:
229
- print(f"Failed to delete package {package_id}")
230
- return deleted
231
-
232
-
233
- def download_package(shared_state, request_from, title, url, size_mb, password):
234
- if "radarr".lower() in request_from.lower():
235
- category = "movies"
236
- else:
237
- category = "tv"
238
-
239
- package_id = ""
240
-
241
- dw = shared_state.values["config"]("Hostnames").get("dw")
242
- nx = shared_state.values["config"]("Hostnames").get("nx")
243
-
244
- if nx.lower() in url.lower():
245
- links = get_nx_download_links(shared_state, url, title)
246
- print(f"Decrypted {len(links)} download links for {title}")
247
- package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}"
248
-
22
+ from quasarr.downloads.sources.sf import get_sf_download_links, resolve_sf_redirect
23
+ from quasarr.downloads.sources.sj import get_sj_download_links
24
+ from quasarr.downloads.sources.sl import get_sl_download_links
25
+ from quasarr.downloads.sources.wd import get_wd_download_links
26
+ from quasarr.downloads.sources.wx import get_wx_download_links
27
+ from quasarr.providers.log import info
28
+ from quasarr.providers.notifications import send_discord_message
29
+ from quasarr.providers.statistics import StatsHelper
30
+
31
+
32
+ def handle_unprotected(shared_state, title, password, package_id, imdb_id, url,
33
+ mirror=None, size_mb=None, links=None, func=None, label=""):
34
+ if func:
35
+ links = func(shared_state, url, mirror, title)
36
+
37
+ if links:
38
+ info(f"Decrypted {len(links)} download links for {title}")
39
+ send_discord_message(shared_state, title=title, case="unprotected", imdb_id=imdb_id, source=url)
249
40
  added = shared_state.download_package(links, title, password, package_id)
250
-
251
41
  if not added:
252
- print(f"Failed to add {title} to linkgrabber")
253
- package_id = None
254
-
255
- elif dw.lower() in url.lower():
256
- links = get_dw_download_links(shared_state, url, title)
257
- print(f"CAPTCHA-Solution required for {title} - {shared_state.values['external_address']}/captcha")
258
- send_discord_captcha_alert(shared_state, title)
259
- package_id = f"Quasarr_{category}_{str(hash(title + str(links))).replace('-', '')}"
42
+ fail(title, package_id, shared_state,
43
+ reason=f'Failed to add {len(links)} links for "{title}" to linkgrabber')
44
+ return {"success": False, "title": title}
45
+ else:
46
+ fail(title, package_id, shared_state,
47
+ reason=f'Offline / no links found for "{title}" on {label} - "{url}"')
48
+ return {"success": False, "title": title}
49
+
50
+ StatsHelper(shared_state).increment_package_with_links(links)
51
+ return {"success": True, "title": title}
52
+
53
+
54
+ def handle_protected(shared_state, title, password, package_id, imdb_id, url,
55
+ mirror=None, size_mb=None, func=None, label=""):
56
+ links = func(shared_state, url, mirror, title)
57
+ if links:
58
+ valid_links = [pair for pair in links if "/404.html" not in pair[0]]
59
+
60
+ # If none left, IP was banned
61
+ if not valid_links:
62
+ fail(
63
+ title,
64
+ package_id,
65
+ shared_state,
66
+ reason=f'IP was banned during download of "{title}" on {label} - "{url}"'
67
+ )
68
+ return {"success": False, "title": title}
69
+ links = valid_links
70
+
71
+ info(f'CAPTCHA-Solution required for "{title}" at: "{shared_state.values['external_address']}/captcha"')
72
+ send_discord_message(shared_state, title=title, case="captcha", imdb_id=imdb_id, source=url)
260
73
  blob = json.dumps({"title": title, "links": links, "size_mb": size_mb, "password": password})
261
74
  shared_state.values["database"]("protected").update_store(package_id, blob)
75
+ else:
76
+ fail(title, package_id, shared_state,
77
+ reason=f'No protected links found for "{title}" on {label} - "{url}"')
78
+ return {"success": False, "title": title}
79
+ return {"success": True, "title": title}
80
+
81
+
82
+ def handle_hide(shared_state, title, password, package_id, imdb_id, url, links, label):
83
+ """
84
+ Attempt to decrypt hide.cx links and handle the result.
85
+ Returns a dict with 'handled' (bool) and 'result' (response dict or None).
86
+ """
87
+ decrypted = decrypt_links_if_hide(shared_state, links)
88
+
89
+ if not decrypted or decrypted.get("status") == "none":
90
+ return {"handled": False, "result": None}
91
+
92
+ status = decrypted.get("status", "error")
93
+ decrypted_links = decrypted.get("results", [])
94
+
95
+ if status == "success":
96
+ result = handle_unprotected(
97
+ shared_state, title, password, package_id, imdb_id, url,
98
+ links=decrypted_links, label=label
99
+ )
100
+ return {"handled": True, "result": result}
101
+ else:
102
+ fail(title, package_id, shared_state,
103
+ reason=f'Error decrypting hide.cx links for "{title}" on {label} - "{url}"')
104
+ return {"handled": True, "result": {"success": False, "title": title}}
105
+
106
+
107
+ def handle_al(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
108
+ data = get_al_download_links(shared_state, url, mirror, title, password)
109
+ links = data.get("links", [])
110
+ title = data.get("title", title)
111
+ password = data.get("password", "")
112
+ return handle_unprotected(
113
+ shared_state, title, password, package_id, imdb_id, url,
114
+ links=links,
115
+ label='AL'
116
+ )
117
+
118
+
119
+ def handle_by(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
120
+ links = get_by_download_links(shared_state, url, mirror, title)
121
+ if not links:
122
+ fail(title, package_id, shared_state,
123
+ reason=f'Offline / no links found for "{title}" on BY - "{url}"')
124
+ return {"success": False, "title": title}
125
+
126
+ decrypt_result = handle_hide(
127
+ shared_state, title, password, package_id, imdb_id, url, links, 'BY'
128
+ )
129
+
130
+ if decrypt_result["handled"]:
131
+ return decrypt_result["result"]
132
+
133
+ return handle_protected(
134
+ shared_state, title, password, package_id, imdb_id, url,
135
+ mirror=mirror,
136
+ size_mb=size_mb,
137
+ func=lambda ss, u, m, t: links,
138
+ label='BY'
139
+ )
140
+
141
+
142
+ def handle_dl(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
143
+ links, extracted_password = get_dl_download_links(shared_state, url, mirror, title)
144
+ if not links:
145
+ fail(title, package_id, shared_state,
146
+ reason=f'Offline / no links found for "{title}" on DL - "{url}"')
147
+ return {"success": False, "title": title}
148
+
149
+ # Use extracted password if available, otherwise fall back to provided password
150
+ final_password = extracted_password if extracted_password else password
151
+
152
+ decrypt_result = handle_hide(
153
+ shared_state, title, final_password, package_id, imdb_id, url, links, 'DL'
154
+ )
155
+
156
+ if decrypt_result["handled"]:
157
+ return decrypt_result["result"]
158
+
159
+ return handle_protected(
160
+ shared_state, title, final_password, package_id, imdb_id, url,
161
+ mirror=mirror,
162
+ size_mb=size_mb,
163
+ func=lambda ss, u, m, t: links,
164
+ label='DL'
165
+ )
166
+
167
+
168
+ def handle_sf(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
169
+ if url.startswith(f"https://{shared_state.values['config']('Hostnames').get('sf')}/external"):
170
+ url = resolve_sf_redirect(url, shared_state.values["user_agent"])
171
+ elif url.startswith(f"https://{shared_state.values['config']('Hostnames').get('sf')}/"):
172
+ data = get_sf_download_links(shared_state, url, mirror, title)
173
+ url = data.get("real_url")
174
+ if not imdb_id:
175
+ imdb_id = data.get("imdb_id")
176
+
177
+ if not url:
178
+ fail(title, package_id, shared_state,
179
+ reason=f'Failed to get download link from SF for "{title}" - "{url}"')
180
+ return {"success": False, "title": title}
181
+
182
+ return handle_protected(
183
+ shared_state, title, password, package_id, imdb_id, url,
184
+ mirror=mirror,
185
+ size_mb=size_mb,
186
+ func=lambda ss, u, m, t: [[url, "filecrypt"]],
187
+ label='SF'
188
+ )
189
+
190
+
191
+ def handle_sl(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
192
+ data = get_sl_download_links(shared_state, url, mirror, title)
193
+ links = data.get("links")
194
+ if not imdb_id:
195
+ imdb_id = data.get("imdb_id")
196
+ return handle_unprotected(
197
+ shared_state, title, password, package_id, imdb_id, url,
198
+ links=links,
199
+ label='SL'
200
+ )
201
+
202
+
203
+ def handle_wd(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
204
+ data = get_wd_download_links(shared_state, url, mirror, title)
205
+ links = data.get("links", []) if data else []
206
+ if not links:
207
+ fail(title, package_id, shared_state,
208
+ reason=f'Offline / no links found for "{title}" on WD - "{url}"')
209
+ return {"success": False, "title": title}
210
+
211
+ decrypt_result = handle_hide(
212
+ shared_state, title, password, package_id, imdb_id, url, links, 'WD'
213
+ )
214
+
215
+ if decrypt_result["handled"]:
216
+ return decrypt_result["result"]
217
+
218
+ return handle_protected(
219
+ shared_state, title, password, package_id, imdb_id, url,
220
+ mirror=mirror,
221
+ size_mb=size_mb,
222
+ func=lambda ss, u, m, t: links,
223
+ label='WD'
224
+ )
225
+
226
+
227
+ def handle_wx(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
228
+ links = get_wx_download_links(shared_state, url, mirror, title)
229
+ if not links:
230
+ fail(title, package_id, shared_state,
231
+ reason=f'Offline / no links found for "{title}" on WX - "{url}"')
232
+ return {"success": False, "title": title}
233
+
234
+ decrypt_result = handle_hide(
235
+ shared_state, title, password, package_id, imdb_id, url, links, 'WX'
236
+ )
237
+
238
+ if decrypt_result["handled"]:
239
+ return decrypt_result["result"]
240
+
241
+ return handle_protected(
242
+ shared_state, title, password, package_id, imdb_id, url,
243
+ mirror=mirror,
244
+ size_mb=size_mb,
245
+ func=lambda ss, u, m, t: links,
246
+ label='WX'
247
+ )
248
+
249
+
250
+ def download(shared_state, request_from, title, url, mirror, size_mb, password, imdb_id=None):
251
+ if "lazylibrarian" in request_from.lower():
252
+ category = "docs"
253
+ elif "radarr" in request_from.lower():
254
+ category = "movies"
255
+ else:
256
+ category = "tv"
262
257
 
263
- elif "filecrypt".lower() in url.lower():
264
- print(f"CAPTCHA-Solution required for {title} - {shared_state.values['external_address']}/captcha")
265
- send_discord_captcha_alert(shared_state, title)
266
- package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}"
267
- blob = json.dumps({"title": title, "links": [[url, "filecrypt"]], "size_mb": size_mb, "password": password})
268
- shared_state.values["database"]("protected").update_store(package_id, blob)
258
+ package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}"
259
+
260
+ if imdb_id is not None and imdb_id.lower() == "none":
261
+ imdb_id = None
262
+
263
+ config = shared_state.values["config"]("Hostnames")
264
+ flags = {
265
+ 'AL': config.get("al"),
266
+ 'BY': config.get("by"),
267
+ 'DD': config.get("dd"),
268
+ 'DJ': config.get("dj"),
269
+ 'DL': config.get("dl"),
270
+ 'DT': config.get("dt"),
271
+ 'DW': config.get("dw"),
272
+ 'HE': config.get("he"),
273
+ 'MB': config.get("mb"),
274
+ 'NK': config.get("nk"),
275
+ 'NX': config.get("nx"),
276
+ 'SF': config.get("sf"),
277
+ 'SJ': config.get("sj"),
278
+ 'SL': config.get("sl"),
279
+ 'WD': config.get("wd"),
280
+ 'WX': config.get("wx")
281
+ }
269
282
 
270
- return package_id
283
+ handlers = [
284
+ (flags['AL'], handle_al),
285
+ (flags['BY'], handle_by),
286
+ (flags['DD'], lambda *a: handle_unprotected(*a, func=get_dd_download_links, label='DD')),
287
+ (flags['DJ'], lambda *a: handle_protected(*a, func=get_dj_download_links, label='DJ')),
288
+ (flags['DL'], handle_dl),
289
+ (flags['DT'], lambda *a: handle_unprotected(*a, func=get_dt_download_links, label='DT')),
290
+ (flags['DW'], lambda *a: handle_protected(*a, func=get_dw_download_links, label='DW')),
291
+ (flags['HE'], lambda *a: handle_unprotected(*a, func=get_he_download_links, label='HE')),
292
+ (flags['MB'], lambda *a: handle_protected(*a, func=get_mb_download_links, label='MB')),
293
+ (flags['NK'], lambda *a: handle_protected(*a, func=get_nk_download_links, label='NK')),
294
+ (flags['NX'], lambda *a: handle_unprotected(*a, func=get_nx_download_links, label='NX')),
295
+ (flags['SF'], handle_sf),
296
+ (flags['SJ'], lambda *a: handle_protected(*a, func=get_sj_download_links, label='SJ')),
297
+ (flags['SL'], handle_sl),
298
+ (flags['WD'], handle_wd),
299
+ (flags['WX'], handle_wx),
300
+ ]
301
+
302
+ for flag, fn in handlers:
303
+ if flag and flag.lower() in url.lower():
304
+ return {"package_id": package_id,
305
+ **fn(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb)}
306
+
307
+ if "filecrypt" in url.lower():
308
+ return {"package_id": package_id, **handle_protected(
309
+ shared_state, title, password, package_id, imdb_id, url, mirror, size_mb,
310
+ func=lambda ss, u, m, t: [[u, "filecrypt"]],
311
+ label='filecrypt'
312
+ )}
313
+
314
+ info(f'Could not parse URL for "{title}" - "{url}"')
315
+ StatsHelper(shared_state).increment_failed_downloads()
316
+ return {"success": False, "package_id": package_id, "title": title}
317
+
318
+
319
+ def fail(title, package_id, shared_state, reason="Offline / no links found"):
320
+ try:
321
+ info(f"Reason for failure: {reason}")
322
+ StatsHelper(shared_state).increment_failed_downloads()
323
+ blob = json.dumps({"title": title, "error": reason})
324
+ stored = shared_state.get_db("failed").store(package_id, json.dumps(blob))
325
+ if stored:
326
+ info(f'Package "{title}" marked as failed!"')
327
+ return True
328
+ else:
329
+ info(f'Failed to mark package "{title}" as failed!"')
330
+ return False
331
+ except Exception as e:
332
+ info(f'Error marking package "{package_id}" as failed: {e}')
333
+ return False
File without changes