quasarr 1.20.7__tar.gz → 1.21.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. {quasarr-1.20.7 → quasarr-1.21.0}/PKG-INFO +3 -1
  2. {quasarr-1.20.7 → quasarr-1.21.0}/README.md +2 -0
  3. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/__init__.py +7 -0
  4. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/arr/__init__.py +4 -1
  5. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/__init__.py +93 -27
  6. quasarr-1.21.0/quasarr/downloads/sources/dl.py +196 -0
  7. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/nk.py +8 -5
  8. quasarr-1.21.0/quasarr/downloads/sources/wx.py +127 -0
  9. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/html_images.py +2 -0
  10. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/myjd_api.py +35 -4
  11. quasarr-1.21.0/quasarr/providers/sessions/dl.py +175 -0
  12. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/shared_state.py +21 -5
  13. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/version.py +1 -1
  14. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/__init__.py +9 -0
  15. quasarr-1.21.0/quasarr/search/sources/dl.py +316 -0
  16. quasarr-1.21.0/quasarr/search/sources/wx.py +342 -0
  17. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/storage/config.py +7 -1
  18. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/storage/setup.py +10 -2
  19. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/PKG-INFO +3 -1
  20. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/SOURCES.txt +5 -0
  21. {quasarr-1.20.7 → quasarr-1.21.0}/LICENSE +0 -0
  22. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/__init__.py +0 -0
  23. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/captcha/__init__.py +0 -0
  24. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/config/__init__.py +0 -0
  25. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/sponsors_helper/__init__.py +0 -0
  26. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/api/statistics/__init__.py +0 -0
  27. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/linkcrypters/__init__.py +0 -0
  28. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/linkcrypters/al.py +0 -0
  29. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/linkcrypters/filecrypt.py +0 -0
  30. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/linkcrypters/hide.py +0 -0
  31. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/packages/__init__.py +0 -0
  32. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/__init__.py +0 -0
  33. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/al.py +0 -0
  34. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/by.py +0 -0
  35. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/dd.py +0 -0
  36. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/dj.py +0 -0
  37. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/dt.py +0 -0
  38. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/dw.py +0 -0
  39. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/he.py +0 -0
  40. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/mb.py +0 -0
  41. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/nx.py +0 -0
  42. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/sf.py +0 -0
  43. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/sj.py +0 -0
  44. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/sl.py +0 -0
  45. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/downloads/sources/wd.py +0 -0
  46. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/__init__.py +0 -0
  47. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/cloudflare.py +0 -0
  48. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/html_templates.py +0 -0
  49. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/imdb_metadata.py +0 -0
  50. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/log.py +0 -0
  51. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/notifications.py +0 -0
  52. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/obfuscated.py +0 -0
  53. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/sessions/__init__.py +0 -0
  54. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/sessions/al.py +0 -0
  55. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/sessions/dd.py +0 -0
  56. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/sessions/nx.py +0 -0
  57. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/statistics.py +0 -0
  58. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/providers/web_server.py +0 -0
  59. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/__init__.py +0 -0
  60. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/al.py +0 -0
  61. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/by.py +0 -0
  62. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/dd.py +0 -0
  63. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/dj.py +0 -0
  64. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/dt.py +0 -0
  65. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/dw.py +0 -0
  66. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/fx.py +0 -0
  67. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/he.py +0 -0
  68. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/mb.py +0 -0
  69. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/nk.py +0 -0
  70. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/nx.py +0 -0
  71. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/sf.py +0 -0
  72. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/sj.py +0 -0
  73. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/sl.py +0 -0
  74. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/search/sources/wd.py +0 -0
  75. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/storage/__init__.py +0 -0
  76. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr/storage/sqlite_database.py +0 -0
  77. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/dependency_links.txt +0 -0
  78. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/entry_points.txt +0 -0
  79. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/not-zip-safe +0 -0
  80. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/requires.txt +0 -0
  81. {quasarr-1.20.7 → quasarr-1.21.0}/quasarr.egg-info/top_level.txt +0 -0
  82. {quasarr-1.20.7 → quasarr-1.21.0}/setup.cfg +0 -0
  83. {quasarr-1.20.7 → quasarr-1.21.0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.20.7
3
+ Version: 1.21.0
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -151,6 +151,7 @@ docker run -d \
151
151
  -e 'HOSTNAMES'='https://pastebin.com/raw/eX4Mpl3'
152
152
  -e 'SILENT'='True' \
153
153
  -e 'DEBUG'='' \
154
+ -e 'TZ'='Europe/Berlin' \
154
155
  ghcr.io/rix1337/quasarr:latest
155
156
  ```
156
157
 
@@ -163,6 +164,7 @@ docker run -d \
163
164
  * Must contain at least one valid Hostname per line `ab = xyz`
164
165
  * `SILENT` is optional and silences all discord notifications except for error messages from SponsorsHelper if `True`.
165
166
  * `DEBUG` is optional and enables debug logging if `True`.
167
+ * `TZ` is optional, wrong timezone can cause HTTPS/SSL issues
166
168
 
167
169
  # Manual setup
168
170
 
@@ -124,6 +124,7 @@ docker run -d \
124
124
  -e 'HOSTNAMES'='https://pastebin.com/raw/eX4Mpl3'
125
125
  -e 'SILENT'='True' \
126
126
  -e 'DEBUG'='' \
127
+ -e 'TZ'='Europe/Berlin' \
127
128
  ghcr.io/rix1337/quasarr:latest
128
129
  ```
129
130
 
@@ -136,6 +137,7 @@ docker run -d \
136
137
  * Must contain at least one valid Hostname per line `ab = xyz`
137
138
  * `SILENT` is optional and silences all discord notifications except for error messages from SponsorsHelper if `True`.
138
139
  * `DEBUG` is optional and enables debug logging if `True`.
140
+ * `TZ` is optional, wrong timezone can cause HTTPS/SSL issues
139
141
 
140
142
  # Manual setup
141
143
 
@@ -181,6 +181,13 @@ def run():
181
181
  if not user or not password:
182
182
  hostname_credentials_config(shared_state, "NX", nx)
183
183
 
184
+ dl = Config('Hostnames').get('dl')
185
+ if dl:
186
+ user = Config('DL').get('user')
187
+ password = Config('DL').get('password')
188
+ if not user or not password:
189
+ hostname_credentials_config(shared_state, "DL", dl)
190
+
184
191
  config = Config('JDownloader')
185
192
  user = config.get('user')
186
193
  password = config.get('password')
@@ -340,13 +340,16 @@ def setup_arr_routes(app):
340
340
  if not "lazylibrarian" in request_from.lower():
341
341
  title = f'[{release.get("hostname", "").upper()}] {title}'
342
342
 
343
+ # Get publication date - sources should provide valid dates
344
+ pub_date = release.get("date", "").strip()
345
+
343
346
  items += f'''
344
347
  <item>
345
348
  <title>{title}</title>
346
349
  <guid isPermaLink="True">{release.get("link", "")}</guid>
347
350
  <link>{release.get("link", "")}</link>
348
351
  <comments>{source}</comments>
349
- <pubDate>{release.get("date", datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000"))}</pubDate>
352
+ <pubDate>{pub_date}</pubDate>
350
353
  <enclosure url="{release.get("link", "")}" length="{release.get("size", 0)}" type="application/x-nzb" />
351
354
  </item>'''
352
355
 
@@ -12,6 +12,7 @@ from quasarr.downloads.sources.al import get_al_download_links
12
12
  from quasarr.downloads.sources.by import get_by_download_links
13
13
  from quasarr.downloads.sources.dd import get_dd_download_links
14
14
  from quasarr.downloads.sources.dj import get_dj_download_links
15
+ from quasarr.downloads.sources.dl import get_dl_download_links
15
16
  from quasarr.downloads.sources.dt import get_dt_download_links
16
17
  from quasarr.downloads.sources.dw import get_dw_download_links
17
18
  from quasarr.downloads.sources.he import get_he_download_links
@@ -22,6 +23,7 @@ from quasarr.downloads.sources.sf import get_sf_download_links, resolve_sf_redir
22
23
  from quasarr.downloads.sources.sj import get_sj_download_links
23
24
  from quasarr.downloads.sources.sl import get_sl_download_links
24
25
  from quasarr.downloads.sources.wd import get_wd_download_links
26
+ from quasarr.downloads.sources.wx import get_wx_download_links
25
27
  from quasarr.providers.log import info
26
28
  from quasarr.providers.notifications import send_discord_message
27
29
  from quasarr.providers.statistics import StatsHelper
@@ -77,6 +79,31 @@ def handle_protected(shared_state, title, password, package_id, imdb_id, url,
77
79
  return {"success": True, "title": title}
78
80
 
79
81
 
82
+ def handle_hide(shared_state, title, password, package_id, imdb_id, url, links, label):
83
+ """
84
+ Attempt to decrypt hide.cx links and handle the result.
85
+ Returns a dict with 'handled' (bool) and 'result' (response dict or None).
86
+ """
87
+ decrypted = decrypt_links_if_hide(shared_state, links)
88
+
89
+ if not decrypted or decrypted.get("status") == "none":
90
+ return {"handled": False, "result": None}
91
+
92
+ status = decrypted.get("status", "error")
93
+ decrypted_links = decrypted.get("results", [])
94
+
95
+ if status == "success":
96
+ result = handle_unprotected(
97
+ shared_state, title, password, package_id, imdb_id, url,
98
+ links=decrypted_links, label=label
99
+ )
100
+ return {"handled": True, "result": result}
101
+ else:
102
+ fail(title, package_id, shared_state,
103
+ reason=f'Error decrypting hide.cx links for "{title}" on {label} - "{url}"')
104
+ return {"handled": True, "result": {"success": False, "title": title}}
105
+
106
+
80
107
  def handle_al(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
81
108
  data = get_al_download_links(shared_state, url, mirror, title, password)
82
109
  links = data.get("links", [])
@@ -96,19 +123,12 @@ def handle_by(shared_state, title, password, package_id, imdb_id, url, mirror, s
96
123
  reason=f'Offline / no links found for "{title}" on BY - "{url}"')
97
124
  return {"success": False, "title": title}
98
125
 
99
- decrypted = decrypt_links_if_hide(shared_state, links)
100
- if decrypted and decrypted.get("status") != "none":
101
- status = decrypted.get("status", "error")
102
- links = decrypted.get("results", [])
103
- if status == "success":
104
- return handle_unprotected(
105
- shared_state, title, password, package_id, imdb_id, url,
106
- links=links, label='BY'
107
- )
108
- else:
109
- fail(title, package_id, shared_state,
110
- reason=f'Error decrypting hide.cx links for "{title}" on BY - "{url}"')
111
- return {"success": False, "title": title}
126
+ decrypt_result = handle_hide(
127
+ shared_state, title, password, package_id, imdb_id, url, links, 'BY'
128
+ )
129
+
130
+ if decrypt_result["handled"]:
131
+ return decrypt_result["result"]
112
132
 
113
133
  return handle_protected(
114
134
  shared_state, title, password, package_id, imdb_id, url,
@@ -119,6 +139,32 @@ def handle_by(shared_state, title, password, package_id, imdb_id, url, mirror, s
119
139
  )
120
140
 
121
141
 
142
+ def handle_dl(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
143
+ links, extracted_password = get_dl_download_links(shared_state, url, mirror, title)
144
+ if not links:
145
+ fail(title, package_id, shared_state,
146
+ reason=f'Offline / no links found for "{title}" on DL - "{url}"')
147
+ return {"success": False, "title": title}
148
+
149
+ # Use extracted password if available, otherwise fall back to provided password
150
+ final_password = extracted_password if extracted_password else password
151
+
152
+ decrypt_result = handle_hide(
153
+ shared_state, title, final_password, package_id, imdb_id, url, links, 'DL'
154
+ )
155
+
156
+ if decrypt_result["handled"]:
157
+ return decrypt_result["result"]
158
+
159
+ return handle_protected(
160
+ shared_state, title, final_password, package_id, imdb_id, url,
161
+ mirror=mirror,
162
+ size_mb=size_mb,
163
+ func=lambda ss, u, m, t: links,
164
+ label='DL'
165
+ )
166
+
167
+
122
168
  def handle_sf(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
123
169
  if url.startswith(f"https://{shared_state.values['config']('Hostnames').get('sf')}/external"):
124
170
  url = resolve_sf_redirect(url, shared_state.values["user_agent"])
@@ -162,19 +208,12 @@ def handle_wd(shared_state, title, password, package_id, imdb_id, url, mirror, s
162
208
  reason=f'Offline / no links found for "{title}" on WD - "{url}"')
163
209
  return {"success": False, "title": title}
164
210
 
165
- decrypted = decrypt_links_if_hide(shared_state, links)
166
- if decrypted and decrypted.get("status") != "none":
167
- status = decrypted.get("status", "error")
168
- links = decrypted.get("results", [])
169
- if status == "success":
170
- return handle_unprotected(
171
- shared_state, title, password, package_id, imdb_id, url,
172
- links=links, label='WD'
173
- )
174
- else:
175
- fail(title, package_id, shared_state,
176
- reason=f'Error decrypting hide.cx links for "{title}" on WD - "{url}"')
177
- return {"success": False, "title": title}
211
+ decrypt_result = handle_hide(
212
+ shared_state, title, password, package_id, imdb_id, url, links, 'WD'
213
+ )
214
+
215
+ if decrypt_result["handled"]:
216
+ return decrypt_result["result"]
178
217
 
179
218
  return handle_protected(
180
219
  shared_state, title, password, package_id, imdb_id, url,
@@ -185,6 +224,29 @@ def handle_wd(shared_state, title, password, package_id, imdb_id, url, mirror, s
185
224
  )
186
225
 
187
226
 
227
+ def handle_wx(shared_state, title, password, package_id, imdb_id, url, mirror, size_mb):
228
+ links = get_wx_download_links(shared_state, url, mirror, title)
229
+ if not links:
230
+ fail(title, package_id, shared_state,
231
+ reason=f'Offline / no links found for "{title}" on WX - "{url}"')
232
+ return {"success": False, "title": title}
233
+
234
+ decrypt_result = handle_hide(
235
+ shared_state, title, password, package_id, imdb_id, url, links, 'WX'
236
+ )
237
+
238
+ if decrypt_result["handled"]:
239
+ return decrypt_result["result"]
240
+
241
+ return handle_protected(
242
+ shared_state, title, password, package_id, imdb_id, url,
243
+ mirror=mirror,
244
+ size_mb=size_mb,
245
+ func=lambda ss, u, m, t: links,
246
+ label='WX'
247
+ )
248
+
249
+
188
250
  def download(shared_state, request_from, title, url, mirror, size_mb, password, imdb_id=None):
189
251
  if "lazylibrarian" in request_from.lower():
190
252
  category = "docs"
@@ -204,6 +266,7 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
204
266
  'BY': config.get("by"),
205
267
  'DD': config.get("dd"),
206
268
  'DJ': config.get("dj"),
269
+ 'DL': config.get("dl"),
207
270
  'DT': config.get("dt"),
208
271
  'DW': config.get("dw"),
209
272
  'HE': config.get("he"),
@@ -213,7 +276,8 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
213
276
  'SF': config.get("sf"),
214
277
  'SJ': config.get("sj"),
215
278
  'SL': config.get("sl"),
216
- 'WD': config.get("wd")
279
+ 'WD': config.get("wd"),
280
+ 'WX': config.get("wx")
217
281
  }
218
282
 
219
283
  handlers = [
@@ -221,6 +285,7 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
221
285
  (flags['BY'], handle_by),
222
286
  (flags['DD'], lambda *a: handle_unprotected(*a, func=get_dd_download_links, label='DD')),
223
287
  (flags['DJ'], lambda *a: handle_protected(*a, func=get_dj_download_links, label='DJ')),
288
+ (flags['DL'], handle_dl),
224
289
  (flags['DT'], lambda *a: handle_unprotected(*a, func=get_dt_download_links, label='DT')),
225
290
  (flags['DW'], lambda *a: handle_protected(*a, func=get_dw_download_links, label='DW')),
226
291
  (flags['HE'], lambda *a: handle_unprotected(*a, func=get_he_download_links, label='HE')),
@@ -231,6 +296,7 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
231
296
  (flags['SJ'], lambda *a: handle_protected(*a, func=get_sj_download_links, label='SJ')),
232
297
  (flags['SL'], handle_sl),
233
298
  (flags['WD'], handle_wd),
299
+ (flags['WX'], handle_wx),
234
300
  ]
235
301
 
236
302
  for flag, fn in handlers:
@@ -0,0 +1,196 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+
7
+ from bs4 import BeautifulSoup
8
+
9
+ from quasarr.providers.log import info, debug
10
+ from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
11
+
12
+ hostname = "dl"
13
+
14
+
15
+ def extract_password_from_post(soup, host):
16
+ """
17
+ Extract password from forum post using multiple strategies.
18
+ Returns empty string if no password found or if explicitly marked as 'no password'.
19
+ """
20
+ # Get flattened text from the post - collapse whitespace to single spaces
21
+ post_text = soup.get_text()
22
+ post_text = re.sub(r'\s+', ' ', post_text).strip()
23
+
24
+ # Strategy 1: Look for password label followed by the password value
25
+ # Pattern: "Passwort:" followed by optional separators, then the password
26
+ password_pattern = r'(?:passwort|password|pass|pw)[\s:]+([a-zA-Z0-9._-]{2,50})'
27
+ match = re.search(password_pattern, post_text, re.IGNORECASE)
28
+
29
+ if match:
30
+ password = match.group(1).strip()
31
+ # Skip if it looks like a section header or common word
32
+ if not re.match(r'^(?:download|mirror|link|episode|info|mediainfo|spoiler|hier|click|klick|kein|none|no)',
33
+ password, re.IGNORECASE):
34
+ debug(f"Found password: {password}")
35
+ return password
36
+
37
+ # Strategy 2: Look for explicit "no password" indicators (only if no valid password found)
38
+ no_password_patterns = [
39
+ r'(?:passwort|password|pass|pw)[\s:]*(?:kein(?:es)?|none|no|nicht|not|nein|-|–|—)',
40
+ r'(?:kein(?:es)?|none|no|nicht|not|nein)\s*(?:passwort|password|pass|pw)',
41
+ ]
42
+
43
+ for pattern in no_password_patterns:
44
+ if re.search(pattern, post_text, re.IGNORECASE):
45
+ debug("No password required (explicitly stated)")
46
+ return ""
47
+
48
+ # Strategy 3: Default to hostname-based password
49
+ default_password = f"www.{host}"
50
+ debug(f"No password found, using default: {default_password}")
51
+ return default_password
52
+
53
+
54
+ def extract_mirror_name_from_link(link_element):
55
+ """
56
+ Extract the mirror/hoster name from the link text or nearby text.
57
+ Returns the extracted name or None.
58
+ """
59
+ # Get the link text
60
+ link_text = link_element.get_text(strip=True)
61
+
62
+ # Try to extract a meaningful name from the link text
63
+ # Look for text that looks like a hoster name (alphanumeric, may contain numbers/dashes)
64
+ # Filter out common non-hoster words
65
+ common_non_hosters = {'download', 'mirror', 'link', 'hier', 'click', 'klick', 'code', 'spoiler'}
66
+
67
+ # Clean and extract potential mirror name
68
+ if link_text and len(link_text) > 2:
69
+ # Remove common symbols and whitespace
70
+ cleaned = re.sub(r'[^\w\s-]', '', link_text).strip().lower()
71
+
72
+ # If it's a single word or hyphenated word and not in common non-hosters
73
+ if cleaned and cleaned not in common_non_hosters:
74
+ # Extract the main part (first word if multiple)
75
+ main_part = cleaned.split()[0] if ' ' in cleaned else cleaned
76
+ if len(main_part) > 2: # Must be at least 3 characters
77
+ return main_part
78
+
79
+ # Check if there's a bold tag or nearby text in parent
80
+ parent = link_element.parent
81
+ if parent:
82
+ parent_text = parent.get_text(strip=True)
83
+ # Look for text before the link that might be the mirror name
84
+ for sibling in link_element.previous_siblings:
85
+ if hasattr(sibling, 'get_text'):
86
+ sibling_text = sibling.get_text(strip=True).lower()
87
+ if sibling_text and len(sibling_text) > 2 and sibling_text not in common_non_hosters:
88
+ cleaned = re.sub(r'[^\w\s-]', '', sibling_text).strip()
89
+ if cleaned:
90
+ return cleaned.split()[0] if ' ' in cleaned else cleaned
91
+
92
+ return None
93
+
94
+
95
+ def extract_links_and_password_from_post(post_content, host):
96
+ """
97
+ Extract download links and password from a forum post.
98
+ Only filecrypt and hide are supported - other link crypters will cause an error.
99
+
100
+ Returns:
101
+ tuple of (links, password) where:
102
+ - links: list of [url, mirror_name] pairs where mirror_name is the actual hoster
103
+ - password: extracted password string
104
+ """
105
+ links = []
106
+ soup = BeautifulSoup(post_content, 'html.parser')
107
+
108
+ for link in soup.find_all('a', href=True):
109
+ href = link.get('href')
110
+
111
+ # Skip internal forum links
112
+ if href.startswith('/') or host in href:
113
+ continue
114
+
115
+ # Check supported link crypters
116
+ crypter_type = None
117
+ if re.search(r'filecrypt\.', href, re.IGNORECASE):
118
+ crypter_type = "filecrypt"
119
+ elif re.search(r'hide\.', href, re.IGNORECASE):
120
+ crypter_type = "hide"
121
+ else:
122
+ debug(f"Unsupported link crypter/hoster found: {href}")
123
+ debug(f"Currently only filecrypt and hide are supported. Other crypters may be added later.")
124
+ continue
125
+
126
+ # Extract mirror name from link text or nearby context
127
+ mirror_name = extract_mirror_name_from_link(link)
128
+
129
+ # Use mirror name if found, otherwise fall back to crypter type
130
+ identifier = mirror_name if mirror_name else crypter_type
131
+
132
+ # Avoid duplicates
133
+ if [href, identifier] not in links:
134
+ links.append([href, identifier])
135
+ if mirror_name:
136
+ debug(f"Found {crypter_type} link for mirror: {mirror_name}")
137
+ else:
138
+ debug(f"Found {crypter_type} link (no mirror name detected)")
139
+
140
+ # Only extract password if we found links
141
+ password = ""
142
+ if links:
143
+ password = extract_password_from_post(soup, host)
144
+
145
+ return links, password
146
+
147
+
148
+ def get_dl_download_links(shared_state, url, mirror, title):
149
+ """
150
+ Get download links from a thread.
151
+
152
+ Returns:
153
+ tuple of (links, password) where:
154
+ - links: list of [url, mirror_name] pairs
155
+ - password: extracted password string
156
+ """
157
+ host = shared_state.values["config"]("Hostnames").get(hostname)
158
+
159
+ sess = retrieve_and_validate_session(shared_state)
160
+ if not sess:
161
+ info(f"Could not retrieve valid session for {host}")
162
+ return [], ""
163
+
164
+ try:
165
+ response = fetch_via_requests_session(shared_state, method="GET", target_url=url, timeout=30)
166
+
167
+ if response.status_code != 200:
168
+ info(f"Failed to load thread page: {url} (Status: {response.status_code})")
169
+ return [], ""
170
+
171
+ soup = BeautifulSoup(response.text, 'html.parser')
172
+
173
+ first_post = soup.select_one('article.message--post')
174
+ if not first_post:
175
+ info(f"Could not find first post in thread: {url}")
176
+ return [], ""
177
+
178
+ post_content = first_post.select_one('div.bbWrapper')
179
+ if not post_content:
180
+ info(f"Could not find post content in thread: {url}")
181
+ return [], ""
182
+
183
+ # Extract both links and password from the same post content
184
+ links, password = extract_links_and_password_from_post(str(post_content), host)
185
+
186
+ if not links:
187
+ info(f"No supported download links found in thread: {url}")
188
+ return [], ""
189
+
190
+ debug(f"Found {len(links)} download link(s) for: {title} (password: {password})")
191
+ return links, password
192
+
193
+ except Exception as e:
194
+ info(f"Error extracting download links from {url}: {e}")
195
+ invalidate_session(shared_state)
196
+ return [], ""
@@ -8,6 +8,7 @@ from bs4 import BeautifulSoup
8
8
  from quasarr.providers.log import info
9
9
 
10
10
  hostname = "nk"
11
+ supported_mirrors = ["rapidgator", "ddownload"]
11
12
 
12
13
 
13
14
  def get_nk_download_links(shared_state, url, mirror, title):
@@ -28,9 +29,14 @@ def get_nk_download_links(shared_state, url, mirror, title):
28
29
  anchors = soup.select('a.btn-orange')
29
30
  candidates = []
30
31
  for a in anchors:
32
+ mirror = a.text.strip().lower()
33
+ if mirror == 'ddl.to':
34
+ mirror = 'ddownload'
35
+
36
+ if mirror not in supported_mirrors:
37
+ continue
31
38
 
32
39
  href = a.get('href', '').strip()
33
- hoster = href.split('/')[3].lower()
34
40
  if not href.lower().startswith(('http://', 'https://')):
35
41
  href = 'https://' + host + href
36
42
 
@@ -40,10 +46,7 @@ def get_nk_download_links(shared_state, url, mirror, title):
40
46
  info(f"{hostname}: could not resolve download link for {title}: {e}")
41
47
  continue
42
48
 
43
- if hoster == 'ddl.to':
44
- hoster = 'ddownload'
45
-
46
- candidates.append([href, hoster])
49
+ candidates.append([href, mirror])
47
50
 
48
51
  if not candidates:
49
52
  info(f"No external download links found on {hostname} page for {title}")
@@ -0,0 +1,127 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+
7
+ import requests
8
+
9
+ from quasarr.providers.log import info, debug
10
+
11
+ hostname = "wx"
12
+
13
+
14
+ def get_wx_download_links(shared_state, url, mirror, title):
15
+ """
16
+ Get download links from API based on title and mirror.
17
+
18
+ Returns:
19
+ list of [url, hoster] pairs where hoster is the actual mirror (e.g., 'ddownload.com', 'rapidgator.net')
20
+ """
21
+ host = shared_state.values["config"]("Hostnames").get(hostname)
22
+
23
+ headers = {
24
+ 'User-Agent': shared_state.values["user_agent"],
25
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
26
+ }
27
+
28
+ try:
29
+ session = requests.Session()
30
+
31
+ # First, load the page to establish session cookies
32
+ response = session.get(url, headers=headers, timeout=30)
33
+
34
+ if response.status_code != 200:
35
+ info(f"{hostname.upper()}: Failed to load page: {url} (Status: {response.status_code})")
36
+ return []
37
+
38
+ # Extract slug from URL
39
+ slug_match = re.search(r'/detail/([^/]+)', url)
40
+ if not slug_match:
41
+ info(f"{hostname.upper()}: Could not extract slug from URL: {url}")
42
+ return []
43
+
44
+ api_url = f'https://api.{host}/start/d/{slug_match.group(1)}'
45
+
46
+ # Update headers for API request
47
+ api_headers = {
48
+ 'User-Agent': shared_state.values["user_agent"],
49
+ 'Accept': 'application/json'
50
+ }
51
+
52
+ debug(f"{hostname.upper()}: Fetching API data from: {api_url}")
53
+ api_response = session.get(api_url, headers=api_headers, timeout=30)
54
+
55
+ if api_response.status_code != 200:
56
+ info(f"{hostname.upper()}: Failed to load API: {api_url} (Status: {api_response.status_code})")
57
+ return []
58
+
59
+ data = api_response.json()
60
+
61
+ # Navigate to releases in the API response
62
+ if 'item' not in data or 'releases' not in data['item']:
63
+ info(f"{hostname.upper()}: No releases found in API response")
64
+ return []
65
+
66
+ releases = data['item']['releases']
67
+
68
+ # Find the release matching the title
69
+ matching_release = None
70
+ for release in releases:
71
+ if release.get('fulltitle') == title:
72
+ matching_release = release
73
+ break
74
+
75
+ if not matching_release:
76
+ info(f"{hostname.upper()}: No release found matching title: {title}")
77
+ return []
78
+
79
+ # Extract crypted_links based on mirror
80
+ crypted_links = matching_release.get('crypted_links', {})
81
+
82
+ if not crypted_links:
83
+ info(f"{hostname.upper()}: No crypted_links found for: {title}")
84
+ return []
85
+
86
+ links = []
87
+
88
+ # If mirror is specified, find matching hoster (handle partial matches like 'ddownload' -> 'ddownload.com')
89
+ if mirror:
90
+ matched_hoster = None
91
+ for hoster in crypted_links.keys():
92
+ if mirror.lower() in hoster.lower() or hoster.lower() in mirror.lower():
93
+ matched_hoster = hoster
94
+ break
95
+
96
+ if matched_hoster:
97
+ link = crypted_links[matched_hoster]
98
+ # Prefer hide over filecrypt
99
+ if re.search(r'hide\.', link, re.IGNORECASE):
100
+ links.append([link, matched_hoster])
101
+ debug(f"{hostname.upper()}: Found hide link for mirror {matched_hoster}")
102
+ elif re.search(r'filecrypt\.', link, re.IGNORECASE):
103
+ links.append([link, matched_hoster])
104
+ debug(f"{hostname.upper()}: Found filecrypt link for mirror {matched_hoster}")
105
+ else:
106
+ info(
107
+ f"{hostname.upper()}: Mirror '{mirror}' not found in available hosters: {list(crypted_links.keys())}")
108
+ else:
109
+ # If no mirror specified, get all available crypted links (prefer hide over filecrypt)
110
+ for hoster, link in crypted_links.items():
111
+ if re.search(r'hide\.', link, re.IGNORECASE):
112
+ links.append([link, hoster])
113
+ debug(f"{hostname.upper()}: Found hide link for hoster {hoster}")
114
+ elif re.search(r'filecrypt\.', link, re.IGNORECASE):
115
+ links.append([link, hoster])
116
+ debug(f"{hostname.upper()}: Found filecrypt link for hoster {hoster}")
117
+
118
+ if not links:
119
+ info(f"{hostname.upper()}: No supported crypted links found for: {title}")
120
+ return []
121
+
122
+ debug(f"{hostname.upper()}: Found {len(links)} crypted link(s) for: {title}")
123
+ return links
124
+
125
+ except Exception as e:
126
+ info(f"{hostname.upper()}: Error extracting download links from {url}: {e}")
127
+ return []
@@ -7,6 +7,7 @@ al = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA9UlE
7
7
  by = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAMAAADz0U65AAAATlBMVEX///8crf/29fQDo//J7f/u7u7w+v/2/P/8/PyytLWenp6IhoWTwNfPz8+mpaVYxP94eHhryf+75fyB0f+oxNPl5eW4yNGrr68xsfmUlJQWKuGqAAAAQklEQVQI1yWLSRKAIBADMzOAsgiC+/8/aiwvSR+6AUB15mJyZ42ev8ldclI4MWvXuvwQCaGblPxQCmNP9fgyqGf+AkFFAeZ3L10cAAAAAElFTkSuQmCC'
8
8
  dd = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAABCElEQVR4nAXBP0sCYRzA8e89d6eQ4SRkdNKQOEVSvQQbegdB4dAgLY1tLS1NvYFoDIIIGtqKXPq3JJGWEQkaDQliGJnaPXf6e/p8rGrt4/CxUl777fZQylYGg4iReHyMxez8kXN1c5c/Pj1D7AhKQqKujQgqCDSrK35evTca/InD+sYmyXSWdqeLFvCHhka9bpyvdgsdCpl0msRkin1/xMLcLKXbIs3mp6UG/R5vL2VKzzV29g4oFAokkh7XxQuGgcbxtTYy6Fr97xaZ6QmeHu55rVYIf9poHeAMR8ZCNLvbW8TGY5x0OrgK7KhLEIY4U6nUeS63tBwGWowR5dgziME4bsTyPO/yH3LufRKlNxKMAAAAAElFTkSuQmCC'
9
9
  dj = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAMAAADz0U65AAAAIGNIUk0AAHomAACAhAAA+gAAAIDoAAB1MAAA6mAAADqYAAAXcJy6UTwAAAC9UExURY2NjYWGhoeIiYiIiIiIiYWFhoODg4mKillZWYSEhFBMS3x7e0xIR3Bsa0E+PldWVi4wMT4/Px0eHiQkJA8PDwoMDAsMDAsMDAsLCw0NDWxqaHZtZnBrZ2dnZ25qZ21oZJ94XtWNWr6DXG9mYpp2X6p5V6JwVNJ9TsN4UXBgWZhtVaxuTZBbR6xiRMpoQKlgQ6JfRZ5cQHFGN6dOMqFOM5lMM6tPMmZBNCYlJS8mIywlJCglJC8mJCMhIf////64+awAAAAadFJOU1SosLCwokSyl7ugup+6n7ugr5RLlJmYmZA8Hg7dEgAAAAFiS0dEPklkAOMAAAAHdElNRQfpDBESHCL8f1a2AAAAUElEQVQI12NgYGRiYmZhZWNgl5KWkZWT52DgVFBUUlZR5WLgVlPX0NTS5mHg1dHV0zcw5GPgNzI2MTUzF2AQtLC0sraxFWIQFhEVExOXkAQA4WIHfGNBCckAAAAldEVYdGRhdGU6Y3JlYXRlADIwMjUtMTItMTdUMTg6Mjg6MjgrMDA6MDDR5B2HAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDI1LTEyLTE3VDE4OjI4OjI4KzAwOjAwoLmlOwAAACh0RVh0ZGF0ZTp0aW1lc3RhbXAAMjAyNS0xMi0xN1QxODoyODozNCswMDowMPym7g4AAAAASUVORK5CYII='
10
+ dl = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAACKADAAQAAAABAAAACAAAAACVhHtSAAAAw0lEQVQYGV2PMQrCQBBF/+6OSUy1aYQgSI7gCSR4Bhs78RKCtQfxAnqGVLYewE6ENELSqHFJdt0hCOIvhtl9zPw/Al55voqCNikFoPntgNpQlRbFvhEMwzZ5MfjXm6oh8aQigdZZ+A2QfpxlJcCMiISeqgwRxYgfGuflHLjfMLuMcbzuND3JYGPXiE2IMjAYHU6YIIMNFKzoQEEzqBdqqzu/21nX20hvZgRkp2ritBxSdr33b/UhU8mncFo+7Qu55z9mH8f7S3Ax+Zg1AAAAAElFTkSuQmCC'
10
11
  dt = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA4ElEQVR4nD2PsWrCUBSGv3ubBoq+QevQUhAXV6dMRcyTZBV8CCfxCTq4ZOmSJwg+giB4t16p1NCpSFECyY2n5EL7LeeD8/PDr5qmEa01+XrNz35PaAxFUXCrNQ/DIVophbWW19WKt+WSp16PYLvlvtNBHY8ESZLg6ppPaxmMRgymUz4WC577fR5nM3SapryMx0RRxPf5jDjHXRi21bQEWZZhjCGeTIiDgOvpxM3hQLXb+QAiImVZSlVV0tS1XEXkPY7laz5vX0K74h/n/KnyXNxm41351B+tKoVcLt5Vt8svgsSBPKnRQSAAAAAASUVORK5CYII='
11
12
  dw = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA70lEQVR4nAXBTUrDQBiA4febmfjbpqZUsFkJxS6UIq4EvYS46CUExRPkBL2HFM/gIdxIFq4Ff2JNia3JzBefR+bzuZ1Op+E+y/q6Fd/GSe8qijZH1lpfN00uADdZNt5NDh7GJ8dng2HKxvYOBtB6jctms/5S3ePpxeWklw69tZE1IqKqrIxT81U1d4PReCJJ338HXBVURCCokhelMZW467YTt9pi1rUntPCHYJ0ljiyubMJh0yJFuZSk26ENjufPFV0CCeAUfLEo8V7pRBHWOMJvBc5Q+xpnIf8pl+fvi1Lztw9zlO6T7sWw9jy9vPIPUL1kPErau3YAAAAASUVORK5CYII='
12
13
  fx = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAABB0lEQVR4nB2LP0sCcQBA3/2Uzugu8E4IoT9cSYhUENFWYSA1FFTQ4hK5SENNDW19hoYcImhvaGgLguYgcIk0haLOE7wrNGzo5A4v9K3vPWknmwt0LYrrunieTygkAAm300EIQTg5nWB3e5O39w9s54t4fISEYfBZq/H4VETIsoxp1ftyJpWkR7lSZXFhnsFIhLBtOxR9n1KliqZFeX4po+saZ4VLTMtC9M61TJrTk2OarR9WV5bIpJfZ2ljH8zxEw3EovVaZm01xd//AsKpwfXNLLKaTmDSQ9vJHwWE+x3ezRbv9i6oqTBkTmLU65xdXSNn9g0CWB2jYDuNjo/y5Lqoy1A+63S7/GeVj+5KBt3UAAAAASUVORK5CYII='
@@ -18,3 +19,4 @@ sf = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA8ElE
18
19
  sj = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAMAAADz0U65AAAAIGNIUk0AAHomAACAhAAA+gAAAIDoAAB1MAAA6mAAADqYAAAXcJy6UTwAAAC9UExURY2NjYWGhoeIiYiIiIiIiYWFhoODg4mKillZWYSEhFBMS3x7e0xIR3Bsa0E+PldWVi4wMT4/Px0eHiQkJA8PDwoMDAsMDAsMDAsLCw0NDWxqaHZtZnBrZ2dnZ25qZ21oZJ94XtWNWr6DXG9mYpp2X6p5V6JwVNJ9TsN4UXBgWZhtVaxuTZBbR6xiRMpoQKlgQ6JfRZ5cQHFGN6dOMqFOM5lMM6tPMmZBNCYlJS8mIywlJCglJC8mJCMhIf////64+awAAAAadFJOU1SosLCwokSyl7ugup+6n7ugr5RLlJmYmZA8Hg7dEgAAAAFiS0dEPklkAOMAAAAHdElNRQfpDBESHCL8f1a2AAAAUElEQVQI12NgYGRiYmZhZWNgl5KWkZWT52DgVFBUUlZR5WLgVlPX0NTS5mHg1dHV0zcw5GPgNzI2MTUzF2AQtLC0sraxFWIQFhEVExOXkAQA4WIHfGNBCckAAAAldEVYdGRhdGU6Y3JlYXRlADIwMjUtMTItMTdUMTg6Mjg6MjgrMDA6MDDR5B2HAAAAJXRFWHRkYXRlOm1vZGlmeQAyMDI1LTEyLTE3VDE4OjI4OjI4KzAwOjAwoLmlOwAAACh0RVh0ZGF0ZTp0aW1lc3RhbXAAMjAyNS0xMi0xN1QxODoyODozNCswMDowMPym7g4AAAAASUVORK5CYII='
19
20
  sl = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA/ElEQVR4nE3PrUpDYRzH8d/z5tnRozugImIYgjAwGY5egGUG64JRkxgWvIFV0zCIwVsQLLYhWgSDLizJiowVFUHZdJ7zvP7Ftu8NfPgybOwvImcFet0C6FhMtrYTSQg1Ru/id3W7sfTxmc3rQpeE4gk8BZJiigF1kWYLe9rSGZEvR4o/AvRAxBLB8SObzXVqXb/tBjFdJjOCtliJI/U6fDo/CQSwf4ozIM2OGrm2xyGEClQCReO76tZmTabZYcsHmnHOfSvJ+8axCiNtlJDtztdNkLmxbViyEIx7669YxPPlZG44uD99wXs9ltqNbvF8aSbvBgBH9WAWMZX+AJIYacQLryqFAAAAAElFTkSuQmCC'
20
21
  wd = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAA90lEQVR4nC3OsS5DUQAG4P8/Pb2kiS4mliaEkMYgTBiQiMVk6C6RsHmAxuoFbHTzCmIR9QSWLko6kIiQCnqHq+fec84vEt8TfMS/5s3jniFf36uD9sTH6Fg9rQ8aDQY2r7trhJmFUY1kW9I4gF0I9wGuZUOM04x+B4o9EvMSZ0D1EVWYkklsnn51TFIOhJ2DwgaN6QiqgsbRh00rseu9J1WsUngGOBmhSvaZnZwdbA1MkWfrwQ2PvXMvwbk7n7taLPKppFpe+cvb8DN8gi09RGmkbEv9GHQLqQZi+bB19WZDdN+WlYskRe/0aNsBaO+fXy4ZxEWKC7/R+XkmFbstdAAAAABJRU5ErkJggg=='
22
+ wx = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAACKADAAQAAAABAAAACAAAAACVhHtSAAAAcUlEQVQYGWNgYGD4B8S4wD+mf7y2jEDZ/0CsiKRKEiT2j9cGLMQGJH8DMUwhyEQQGyTGBcRg3T+hNMgkGP4EYsN0AdlgAHMPE0wARMN0XEASBLHB4ixQQUGgg94xMjCCBMEyTJ8PiwKZr0F8kDW4ACMASI8hUcBfWMAAAAAASUVORK5CYII='