quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,123 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ from typing import List, Dict, Any
7
+
8
+ import requests
9
+
10
+ from quasarr.providers.log import info, debug
11
+ from quasarr.providers.statistics import StatsHelper
12
+
13
+
14
+ def unhide_links(shared_state, url):
15
+ try:
16
+ links = []
17
+
18
+ match = re.search(r"container/([a-z0-9\-]+)", url)
19
+ if not match:
20
+ info(f"Invalid hide.cx URL: {url}")
21
+ return []
22
+
23
+ container_id = match.group(1)
24
+ info(f"Fetching hide.cx container with ID: {container_id}")
25
+
26
+ headers = {'User-Agent': shared_state.values["user_agent"]}
27
+
28
+ container_url = f"https://api.hide.cx/containers/{container_id}"
29
+ response = requests.get(container_url, headers=headers)
30
+ data = response.json()
31
+
32
+ for link in data.get("links", []):
33
+ link_id = link.get("id")
34
+ if not link_id:
35
+ continue
36
+
37
+ debug(f"Fetching hide.cx link with ID: {link_id}")
38
+ link_url = f"https://api.hide.cx/containers/{container_id}/links/{link_id}"
39
+ link_data = requests.get(link_url, headers=headers).json()
40
+
41
+ final_url = link_data.get("url")
42
+ if final_url and final_url not in links:
43
+ links.append(final_url)
44
+
45
+ success = bool(links)
46
+ if success:
47
+ StatsHelper(shared_state).increment_captcha_decryptions_automatic()
48
+ else:
49
+ StatsHelper(shared_state).increment_failed_decryptions_automatic()
50
+
51
+ return links
52
+ except Exception as e:
53
+ info(f"Error fetching hide.cx links: {e}")
54
+ StatsHelper(shared_state).increment_failed_decryptions_automatic()
55
+ return []
56
+
57
+
58
+ def decrypt_links_if_hide(shared_state: Any, items: List[List[str]]) -> Dict[str, Any]:
59
+ """
60
+ Resolve redirects and decrypt hide.cx links from a list of item lists.
61
+
62
+ Each item list must include:
63
+ - index 0: the URL to resolve
64
+ - any additional metadata at subsequent indices (ignored here)
65
+
66
+ :param shared_state: State object required by unhide_links function
67
+ :param items: List of lists, where each inner list has the URL at index 0
68
+ :return: Dict with 'status' and 'results' (flat list of decrypted link URLs)
69
+ """
70
+ if not items:
71
+ info("No items provided to decrypt.")
72
+ return {"status": "error", "results": []}
73
+
74
+ session = requests.Session()
75
+ session.max_redirects = 5
76
+
77
+ hide_urls: List[str] = []
78
+ for item in items:
79
+ original_url = item[0]
80
+ if not original_url:
81
+ debug(f"Skipping item without URL: {item}")
82
+ continue
83
+
84
+ try:
85
+ # Try HEAD first, fallback to GET
86
+ try:
87
+ resp = session.head(original_url, allow_redirects=True, timeout=10)
88
+ except requests.RequestException:
89
+ resp = session.get(original_url, allow_redirects=True, timeout=10)
90
+
91
+ final_url = resp.url
92
+ if "hide.cx" in final_url:
93
+ debug(f"Identified hide.cx link: {final_url}")
94
+ hide_urls.append(final_url)
95
+ else:
96
+ debug(f"Not a hide.cx link (skipped): {final_url}")
97
+
98
+ except requests.RequestException as e:
99
+ info(f"Error resolving URL {original_url}: {e}")
100
+ continue
101
+
102
+ if not hide_urls:
103
+ debug(f"No hide.cx links found among {len(items)} items.")
104
+ return {"status": "none", "results": []}
105
+
106
+ info(f"Found {len(hide_urls)} hide.cx URLs; decrypting...")
107
+ decrypted_links: List[str] = []
108
+ for url in hide_urls:
109
+ try:
110
+ links = unhide_links(shared_state, url)
111
+ if not links:
112
+ debug(f"No links decrypted for {url}")
113
+ continue
114
+ decrypted_links.extend(links)
115
+ except Exception as e:
116
+ info(f"Failed to decrypt {url}: {e}")
117
+ continue
118
+
119
+ if not decrypted_links:
120
+ info(f"Could not decrypt any links from hide.cx URLs.")
121
+ return {"status": "error", "results": []}
122
+
123
+ return {"status": "success", "results": decrypted_links}
@@ -0,0 +1,476 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import json
6
+ from collections import defaultdict
7
+ from urllib.parse import urlparse
8
+
9
+ from quasarr.providers.log import info, debug
10
+ from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
11
+
12
+
13
+ def get_links_comment(package, package_links):
14
+ package_uuid = package.get("uuid")
15
+ if package_uuid and package_links:
16
+ for link in package_links:
17
+ if link.get("packageUUID") == package_uuid:
18
+ return link.get("comment")
19
+ return None
20
+
21
+
22
+ def get_links_status(package, all_links, is_archive=False):
23
+ links_in_package = []
24
+ package_uuid = package.get("uuid")
25
+ if package_uuid and all_links:
26
+ for link in all_links:
27
+ link_package_uuid = link.get("packageUUID")
28
+ if link_package_uuid and link_package_uuid == package_uuid:
29
+ links_in_package.append(link)
30
+
31
+ all_finished = True
32
+ eta = None
33
+ error = None
34
+
35
+ mirrors = defaultdict(list)
36
+ for link in links_in_package:
37
+ url = link.get("url", "")
38
+ base_domain = urlparse(url).netloc
39
+ mirrors[base_domain].append(link)
40
+
41
+ has_mirror_all_online = False
42
+ for mirror_links in mirrors.values():
43
+ if all(link.get('availability', '').lower() == 'online' for link in mirror_links):
44
+ has_mirror_all_online = True
45
+ break
46
+
47
+ offline_links = [link for link in links_in_package if link.get('availability', '').lower() == 'offline']
48
+ offline_ids = [link.get('uuid') for link in offline_links]
49
+ offline_mirror_linkids = offline_ids if has_mirror_all_online else []
50
+
51
+ for link in links_in_package:
52
+ if link.get('availability', "").lower() == "offline" and not has_mirror_all_online:
53
+ error = "Links offline for all mirrors"
54
+ if link.get('statusIconKey', '').lower() == "false":
55
+ error = "File error in package"
56
+ link_finished = link.get('finished', False)
57
+ link_extraction_status = link.get('extractionStatus', '').lower() # "error" signifies an issue
58
+ link_eta = link.get('eta', 0) // 1000
59
+ if not link_finished:
60
+ all_finished = False
61
+ elif link_extraction_status and link_extraction_status != 'successful':
62
+ if link_extraction_status == 'error':
63
+ error = link.get('status', '')
64
+ elif link_extraction_status == 'running' and link_eta > 0:
65
+ if eta and link_eta > eta or not eta:
66
+ eta = link_eta
67
+ all_finished = False
68
+ elif is_archive:
69
+ # For archives, check if extraction is actually complete
70
+ link_status = link.get('status', '').lower()
71
+ # Check for various "extraction complete" indicators
72
+ if 'extraction ok' not in link_status and 'entpacken ok' not in link_status:
73
+ all_finished = False
74
+
75
+ return {"all_finished": all_finished, "eta": eta, "error": error, "offline_mirror_linkids": offline_mirror_linkids}
76
+
77
+
78
+ def get_links_matching_package_uuid(package, package_links):
79
+ package_uuid = package.get("uuid")
80
+ link_ids = []
81
+
82
+ if not isinstance(package_links, list):
83
+ debug("Error - expected a list of package_links, got: %r" % type(package_links).__name__)
84
+ return link_ids
85
+
86
+ if package_uuid:
87
+ for link in package_links:
88
+ if link.get("packageUUID") == package_uuid:
89
+ link_ids.append(link.get("uuid"))
90
+ else:
91
+ info("Error - package uuid missing in delete request!")
92
+ return link_ids
93
+
94
+
95
+ def format_eta(seconds):
96
+ if seconds < 0:
97
+ return "23:59:59"
98
+ else:
99
+ hours = seconds // 3600
100
+ minutes = (seconds % 3600) // 60
101
+ seconds = seconds % 60
102
+ return f"{hours:02}:{minutes:02}:{seconds:02}"
103
+
104
+
105
+ def get_packages(shared_state):
106
+ packages = []
107
+
108
+ protected_packages = shared_state.get_db("protected").retrieve_all_titles()
109
+ if protected_packages:
110
+ for package in protected_packages:
111
+ package_id = package[0]
112
+
113
+ data = json.loads(package[1])
114
+ details = {
115
+ "title": data["title"],
116
+ "urls": data["links"],
117
+ "size_mb": data["size_mb"],
118
+ "password": data["password"]
119
+ }
120
+
121
+ packages.append({
122
+ "details": details,
123
+ "location": "queue",
124
+ "type": "protected",
125
+ "package_id": package_id
126
+ })
127
+
128
+ failed_packages = shared_state.get_db("failed").retrieve_all_titles()
129
+ if failed_packages:
130
+ for package in failed_packages:
131
+ package_id = package[0]
132
+
133
+ data = json.loads(package[1])
134
+ try:
135
+ if type(data) is str:
136
+ data = json.loads(data)
137
+ except json.JSONDecodeError:
138
+ pass
139
+ details = {
140
+ "name": data["title"],
141
+ "bytesLoaded": 0,
142
+ "saveTo": "/"
143
+ }
144
+
145
+ error = data.get("error", "Unknown error")
146
+
147
+ packages.append({
148
+ "details": details,
149
+ "location": "history",
150
+ "type": "failed",
151
+ "error": error,
152
+ "comment": package_id,
153
+ "uuid": package_id
154
+ })
155
+ try:
156
+ linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
157
+ linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
158
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
159
+ linkgrabber_packages = []
160
+ linkgrabber_links = []
161
+
162
+ if linkgrabber_packages:
163
+ for package in linkgrabber_packages:
164
+ comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
165
+ link_details = get_links_status(package, linkgrabber_links, is_archive=False)
166
+
167
+ error = link_details["error"]
168
+ offline_mirror_linkids = link_details["offline_mirror_linkids"]
169
+ if offline_mirror_linkids:
170
+ shared_state.get_device().linkgrabber.cleanup(
171
+ "DELETE_OFFLINE",
172
+ "REMOVE_LINKS_ONLY",
173
+ "SELECTED",
174
+ offline_mirror_linkids,
175
+ [package["uuid"]]
176
+ )
177
+
178
+ location = "history" if error else "queue"
179
+ packages.append({
180
+ "details": package,
181
+ "location": location,
182
+ "type": "linkgrabber",
183
+ "comment": comment,
184
+ "uuid": package.get("uuid"),
185
+ "error": error
186
+ })
187
+ try:
188
+ downloader_packages = shared_state.get_device().downloads.query_packages()
189
+ downloader_links = shared_state.get_device().downloads.query_links()
190
+ except (TokenExpiredException, RequestTimeoutException, MYJDException):
191
+ downloader_packages = []
192
+ downloader_links = []
193
+
194
+ if downloader_packages and downloader_links:
195
+ for package in downloader_packages:
196
+ comment = get_links_comment(package, downloader_links)
197
+
198
+ # Check if package is actually archived/extracted using archive info
199
+ is_archive = False
200
+ try:
201
+ archive_info = shared_state.get_device().extraction.get_archive_info([], [package.get("uuid")])
202
+ is_archive = True if archive_info and archive_info[0] else False
203
+ except:
204
+ # On error, don't assume it's an archive - check bytes instead
205
+ pass
206
+
207
+ link_details = get_links_status(package, downloader_links, is_archive)
208
+
209
+ error = link_details["error"]
210
+ finished = link_details["all_finished"]
211
+
212
+ # Additional check: if download is 100% complete and no ETA, it's finished
213
+ # This catches non-archive packages or when archive detection fails
214
+ if not finished and not error:
215
+ bytes_total = int(package.get("bytesTotal", 0))
216
+ bytes_loaded = int(package.get("bytesLoaded", 0))
217
+ eta = package.get("eta")
218
+
219
+ # If download is complete and no ETA (paused/finished state)
220
+ if bytes_total > 0 and bytes_loaded >= bytes_total and eta is None:
221
+ # Only mark as finished if it's not an archive, or if we can't detect archives
222
+ if not is_archive:
223
+ finished = True
224
+
225
+ if not finished and link_details["eta"]:
226
+ package["eta"] = link_details["eta"]
227
+
228
+ location = "history" if error or finished else "queue"
229
+
230
+ packages.append({
231
+ "details": package,
232
+ "location": location,
233
+ "type": "downloader",
234
+ "comment": comment,
235
+ "uuid": package.get("uuid"),
236
+ "error": error
237
+ })
238
+
239
+ downloads = {
240
+ "queue": [],
241
+ "history": []
242
+ }
243
+ for package in packages:
244
+ queue_index = 0
245
+ history_index = 0
246
+
247
+ package_id = None
248
+
249
+ if package["location"] == "queue":
250
+ time_left = "23:59:59"
251
+ if package["type"] == "linkgrabber":
252
+ details = package["details"]
253
+ name = f"[Linkgrabber] {details["name"]}"
254
+ try:
255
+ mb = mb_left = int(details["bytesTotal"]) / (1024 * 1024)
256
+ except KeyError:
257
+ mb = mb_left = 0
258
+ try:
259
+ package_id = package["comment"]
260
+ if "movies" in package_id:
261
+ category = "movies"
262
+ elif "docs" in package_id:
263
+ category = "docs"
264
+ else:
265
+ category = "tv"
266
+ except TypeError:
267
+ category = "not_quasarr"
268
+ package_type = "linkgrabber"
269
+ package_uuid = package["uuid"]
270
+ elif package["type"] == "downloader":
271
+ details = package["details"]
272
+ status = "Downloading"
273
+ eta = details.get("eta")
274
+ bytes_total = int(details.get("bytesTotal", 0))
275
+ bytes_loaded = int(details.get("bytesLoaded", 0))
276
+
277
+ mb = bytes_total / (1024 * 1024)
278
+ mb_left = (bytes_total - bytes_loaded) / (1024 * 1024) if bytes_total else 0
279
+ if mb_left < 0:
280
+ mb_left = 0
281
+
282
+ if eta is None:
283
+ status = "Paused"
284
+ else:
285
+ time_left = format_eta(int(eta))
286
+ if mb_left == 0:
287
+ status = "Extracting"
288
+
289
+ name = f"[{status}] {details['name']}"
290
+
291
+ try:
292
+ package_id = package["comment"]
293
+ if "movies" in package_id:
294
+ category = "movies"
295
+ elif "docs" in package_id:
296
+ category = "docs"
297
+ else:
298
+ category = "tv"
299
+ except TypeError:
300
+ category = "not_quasarr"
301
+ package_type = "downloader"
302
+ package_uuid = package["uuid"]
303
+ else:
304
+ details = package["details"]
305
+ name = f"[CAPTCHA not solved!] {details["title"]}"
306
+ mb = mb_left = details["size_mb"]
307
+ try:
308
+ package_id = package["package_id"]
309
+ if "movies" in package_id:
310
+ category = "movies"
311
+ elif "docs" in package_id:
312
+ category = "docs"
313
+ else:
314
+ category = "tv"
315
+ except TypeError:
316
+ category = "not_quasarr"
317
+ package_type = "protected"
318
+ package_uuid = None
319
+
320
+ try:
321
+ if package_id:
322
+ mb_left = int(mb_left)
323
+ mb = int(mb)
324
+ try:
325
+ percentage = int(100 * (mb - mb_left) / mb)
326
+ except ZeroDivisionError:
327
+ percentage = 0
328
+
329
+ downloads["queue"].append({
330
+ "index": queue_index,
331
+ "nzo_id": package_id,
332
+ "priority": "Normal",
333
+ "filename": name,
334
+ "cat": category,
335
+ "mbleft": mb_left,
336
+ "mb": mb,
337
+ "status": "Downloading",
338
+ "percentage": percentage,
339
+ "timeleft": time_left,
340
+ "type": package_type,
341
+ "uuid": package_uuid
342
+ })
343
+ except:
344
+ debug(f"Parameters missing for {package}")
345
+ queue_index += 1
346
+ elif package["location"] == "history":
347
+ details = package["details"]
348
+ name = details["name"]
349
+ try:
350
+ size = int(details["bytesLoaded"])
351
+ except KeyError:
352
+ size = 0
353
+ storage = details["saveTo"]
354
+ try:
355
+ package_id = package["comment"]
356
+ if "movies" in package_id:
357
+ category = "movies"
358
+ elif "docs" in package_id:
359
+ category = "docs"
360
+ else:
361
+ category = "tv"
362
+ except TypeError:
363
+ category = "not_quasarr"
364
+
365
+ error = package.get("error")
366
+ fail_message = ""
367
+ if error:
368
+ status = "Failed"
369
+ fail_message = error
370
+ else:
371
+ status = "Completed"
372
+
373
+ downloads["history"].append({
374
+ "fail_message": fail_message,
375
+ "category": category,
376
+ "storage": storage,
377
+ "status": status,
378
+ "nzo_id": package_id,
379
+ "name": name,
380
+ "bytes": int(size),
381
+ "percentage": 100,
382
+ "type": "downloader",
383
+ "uuid": package["uuid"]
384
+ })
385
+ history_index += 1
386
+ else:
387
+ info(f"Invalid package location {package['location']}")
388
+
389
+ if not shared_state.get_device().linkgrabber.is_collecting():
390
+ linkgrabber_packages = shared_state.get_device().linkgrabber.query_packages()
391
+ linkgrabber_links = shared_state.get_device().linkgrabber.query_links()
392
+
393
+ packages_to_start = []
394
+ links_to_start = []
395
+
396
+ for package in linkgrabber_packages:
397
+ comment = get_links_comment(package, shared_state.get_device().linkgrabber.query_links())
398
+ if comment and comment.startswith("Quasarr_"):
399
+ package_uuid = package.get("uuid")
400
+ if package_uuid:
401
+ linkgrabber_links = [link.get("uuid") for link in linkgrabber_links if
402
+ link.get("packageUUID") == package_uuid]
403
+ if linkgrabber_links:
404
+ packages_to_start.append(package_uuid)
405
+ links_to_start.extend(linkgrabber_links)
406
+ else:
407
+ info(f"Package {package_uuid} has no links in linkgrabber - skipping start")
408
+
409
+ break
410
+
411
+ if packages_to_start and links_to_start:
412
+ shared_state.get_device().linkgrabber.move_to_downloadlist(links_to_start, packages_to_start)
413
+ info(f"Started {len(packages_to_start)} package download"
414
+ f"{'s' if len(packages_to_start) > 1 else ''} from linkgrabber")
415
+
416
+ return downloads
417
+
418
+
419
+ def delete_package(shared_state, package_id):
420
+ try:
421
+ deleted_title = ""
422
+
423
+ packages = get_packages(shared_state)
424
+ for package_location in packages:
425
+ for package in packages[package_location]:
426
+ if package["nzo_id"] == package_id:
427
+ if package["type"] == "linkgrabber":
428
+ ids = get_links_matching_package_uuid(package,
429
+ shared_state.get_device().linkgrabber.query_links())
430
+ if ids:
431
+ shared_state.get_device().linkgrabber.cleanup(
432
+ "DELETE_ALL",
433
+ "REMOVE_LINKS_AND_DELETE_FILES",
434
+ "SELECTED",
435
+ ids,
436
+ [package["uuid"]]
437
+ )
438
+ break
439
+ elif package["type"] == "downloader":
440
+ ids = get_links_matching_package_uuid(package,
441
+ shared_state.get_device().downloads.query_links())
442
+ if ids:
443
+ shared_state.get_device().downloads.cleanup(
444
+ "DELETE_ALL",
445
+ "REMOVE_LINKS_AND_DELETE_FILES",
446
+ "SELECTED",
447
+ ids,
448
+ [package["uuid"]]
449
+ )
450
+ break
451
+
452
+ # no state check, just clean up whatever exists with the package id
453
+ shared_state.get_db("failed").delete(package_id)
454
+ shared_state.get_db("protected").delete(package_id)
455
+
456
+ if package_location == "queue":
457
+ package_name_field = "filename"
458
+ else:
459
+ package_name_field = "name"
460
+
461
+ try:
462
+ deleted_title = package[package_name_field]
463
+ except KeyError:
464
+ pass
465
+
466
+ # Leave the loop
467
+ break
468
+
469
+ if deleted_title:
470
+ info(f'Deleted package "{deleted_title}" with ID "{package_id}"')
471
+ else:
472
+ info(f'Deleted package "{package_id}"')
473
+ except:
474
+ info(f"Failed to delete package {package_id}")
475
+ return False
476
+ return True