quasarr 1.20.7__py3-none-any.whl → 1.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,316 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ import warnings
8
+ from base64 import urlsafe_b64encode
9
+ from concurrent.futures import ThreadPoolExecutor, as_completed
10
+ from datetime import datetime
11
+ from html import unescape
12
+
13
+ from bs4 import BeautifulSoup
14
+ from bs4 import XMLParsedAsHTMLWarning
15
+
16
+ from quasarr.providers.imdb_metadata import get_localized_title
17
+ from quasarr.providers.log import info, debug
18
+ from quasarr.providers.sessions.dl import retrieve_and_validate_session, invalidate_session, fetch_via_requests_session
19
+
20
+ warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
21
+
22
+ hostname = "dl"
23
+ supported_mirrors = []
24
+
25
+
26
+ def normalize_title_for_sonarr(title):
27
+ """
28
+ Normalize title for Sonarr by replacing spaces with dots.
29
+ """
30
+ title = title.replace(' ', '.')
31
+ title = re.sub(r'\s*-\s*', '-', title)
32
+ title = re.sub(r'\.\-\.', '-', title)
33
+ title = re.sub(r'\.{2,}', '.', title)
34
+ title = title.strip('.')
35
+ return title
36
+
37
+
38
+ def dl_feed(shared_state, start_time, request_from, mirror=None):
39
+ """
40
+ Parse the RSS feed and return releases.
41
+ """
42
+ releases = []
43
+ host = shared_state.values["config"]("Hostnames").get(hostname)
44
+
45
+ if not host:
46
+ debug(f"{hostname}: hostname not configured")
47
+ return releases
48
+
49
+ try:
50
+ sess = retrieve_and_validate_session(shared_state)
51
+ if not sess:
52
+ info(f"Could not retrieve valid session for {host}")
53
+ return releases
54
+
55
+ # Instead we should parse the HTML for the correct *arr client
56
+ rss_url = f'https://www.{host}/forums/-/index.rss'
57
+ response = sess.get(rss_url, timeout=30)
58
+
59
+ if response.status_code != 200:
60
+ info(f"{hostname}: RSS feed returned status {response.status_code}")
61
+ return releases
62
+
63
+ soup = BeautifulSoup(response.content, 'html.parser')
64
+ items = soup.find_all('item')
65
+
66
+ if not items:
67
+ debug(f"{hostname}: No entries found in RSS feed")
68
+ return releases
69
+
70
+ for item in items:
71
+ try:
72
+ title_tag = item.find('title')
73
+ if not title_tag:
74
+ continue
75
+
76
+ title = title_tag.get_text(strip=True)
77
+ if not title:
78
+ continue
79
+
80
+ title = unescape(title)
81
+ title = title.replace(']]>', '').replace('<![CDATA[', '')
82
+ title = normalize_title_for_sonarr(title)
83
+
84
+ item_text = item.get_text()
85
+ thread_url = None
86
+ match = re.search(r'https://[^\s]+/threads/[^\s]+', item_text)
87
+ if match:
88
+ thread_url = match.group(0)
89
+ if not thread_url:
90
+ continue
91
+
92
+ pub_date = item.find('pubdate')
93
+ if pub_date:
94
+ date_str = pub_date.get_text(strip=True)
95
+ else:
96
+ # Fallback: use current time if no pubDate found
97
+ date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
98
+
99
+ mb = 0
100
+ imdb_id = None
101
+ password = ""
102
+
103
+ payload = urlsafe_b64encode(
104
+ f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
105
+ ).decode("utf-8")
106
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
107
+
108
+ releases.append({
109
+ "details": {
110
+ "title": title,
111
+ "hostname": hostname,
112
+ "imdb_id": imdb_id,
113
+ "link": link,
114
+ "mirror": mirror,
115
+ "size": mb * 1024 * 1024,
116
+ "date": date_str,
117
+ "source": thread_url
118
+ },
119
+ "type": "protected"
120
+ })
121
+
122
+ except Exception as e:
123
+ debug(f"{hostname}: error parsing RSS entry: {e}")
124
+ continue
125
+
126
+ except Exception as e:
127
+ info(f"{hostname}: RSS feed error: {e}")
128
+ invalidate_session(shared_state)
129
+
130
+ elapsed = time.time() - start_time
131
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
132
+ return releases
133
+
134
+
135
+ def _search_single_page(shared_state, host, search_string, search_id, page_num, imdb_id, mirror, request_from, season,
136
+ episode):
137
+ """
138
+ Search a single page. This function is called in parallel for each page.
139
+ """
140
+ page_releases = []
141
+
142
+ try:
143
+ if page_num == 1:
144
+ search_params = {
145
+ 'keywords': search_string,
146
+ 'c[title_only]': 1
147
+ }
148
+ search_url = f'https://www.{host}/search/search'
149
+ else:
150
+ if not search_id:
151
+ return page_releases, None
152
+
153
+ search_params = {
154
+ 'page': page_num,
155
+ 'q': search_string,
156
+ 'o': 'relevance'
157
+ }
158
+ search_url = f'https://www.{host}/search/{search_id}/'
159
+
160
+ search_response = fetch_via_requests_session(shared_state, method="GET",
161
+ target_url=search_url,
162
+ get_params=search_params,
163
+ timeout=10)
164
+
165
+ if search_response.status_code != 200:
166
+ debug(f"{hostname}: [Page {page_num}] returned status {search_response.status_code}")
167
+ return page_releases, None
168
+
169
+ # Extract search ID from first page
170
+ extracted_search_id = None
171
+ if page_num == 1:
172
+ match = re.search(r'/search/(\d+)/', search_response.url)
173
+ if match:
174
+ extracted_search_id = match.group(1)
175
+ debug(f"{hostname}: [Page 1] Extracted search ID: {extracted_search_id}")
176
+
177
+ soup = BeautifulSoup(search_response.text, 'html.parser')
178
+ result_items = soup.select('li.block-row')
179
+
180
+ if not result_items:
181
+ debug(f"{hostname}: [Page {page_num}] found 0 results")
182
+ return page_releases, extracted_search_id
183
+
184
+ debug(f"{hostname}: [Page {page_num}] found {len(result_items)} results")
185
+
186
+ for item in result_items:
187
+ try:
188
+ title_elem = item.select_one('h3.contentRow-title a')
189
+ if not title_elem:
190
+ continue
191
+
192
+ title = title_elem.get_text(separator=' ', strip=True)
193
+ title = re.sub(r'\s+', ' ', title)
194
+ title = unescape(title)
195
+ title_normalized = normalize_title_for_sonarr(title)
196
+
197
+ thread_url = title_elem.get('href')
198
+ if thread_url.startswith('/'):
199
+ thread_url = f"https://www.{host}{thread_url}"
200
+
201
+ if not shared_state.is_valid_release(title_normalized, request_from, search_string, season, episode):
202
+ continue
203
+
204
+ minor_info = item.select_one('div.contentRow-minor')
205
+ date_str = ""
206
+ if minor_info:
207
+ date_elem = minor_info.select_one('time.u-dt')
208
+ if date_elem:
209
+ date_str = date_elem.get('datetime', '')
210
+
211
+ # Fallback: use current time if no date found
212
+ if not date_str:
213
+ date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
214
+
215
+ mb = 0
216
+ password = ""
217
+
218
+ payload = urlsafe_b64encode(
219
+ f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
220
+ ).decode("utf-8")
221
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
222
+
223
+ page_releases.append({
224
+ "details": {
225
+ "title": title_normalized,
226
+ "hostname": hostname,
227
+ "imdb_id": imdb_id,
228
+ "link": link,
229
+ "mirror": mirror,
230
+ "size": mb * 1024 * 1024,
231
+ "date": date_str,
232
+ "source": thread_url
233
+ },
234
+ "type": "protected"
235
+ })
236
+
237
+ except Exception as e:
238
+ debug(f"{hostname}: [Page {page_num}] error parsing item: {e}")
239
+
240
+ return page_releases, extracted_search_id
241
+
242
+ except Exception as e:
243
+ info(f"{hostname}: [Page {page_num}] error: {e}")
244
+ return page_releases, None
245
+
246
+
247
+ def dl_search(shared_state, start_time, request_from, search_string,
248
+ mirror=None, season=None, episode=None):
249
+ """
250
+ Search with parallel pagination (max 5 pages) to find best quality releases.
251
+ Requests are fired in parallel to minimize search time.
252
+ """
253
+ releases = []
254
+ host = shared_state.values["config"]("Hostnames").get(hostname)
255
+
256
+ imdb_id = shared_state.is_imdb_id(search_string)
257
+ if imdb_id:
258
+ title = get_localized_title(shared_state, imdb_id, 'de')
259
+ if not title:
260
+ info(f"{hostname}: no title for IMDb {imdb_id}")
261
+ return releases
262
+ search_string = title
263
+
264
+ search_string = unescape(search_string)
265
+ max_pages = 5
266
+
267
+ info(
268
+ f"{hostname}: Starting parallel paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
269
+
270
+ try:
271
+ sess = retrieve_and_validate_session(shared_state)
272
+ if not sess:
273
+ info(f"Could not retrieve valid session for {host}")
274
+ return releases
275
+
276
+ # First, do page 1 to get the search ID
277
+ page_1_releases, search_id = _search_single_page(
278
+ shared_state, host, search_string, None, 1,
279
+ imdb_id, mirror, request_from, season, episode
280
+ )
281
+ releases.extend(page_1_releases)
282
+
283
+ if not search_id:
284
+ info(f"{hostname}: Could not extract search ID, stopping pagination")
285
+ return releases
286
+
287
+ # Now fire remaining pages in parallel
288
+ with ThreadPoolExecutor(max_workers=4) as executor:
289
+ futures = {}
290
+ for page_num in range(2, max_pages + 1):
291
+ future = executor.submit(
292
+ _search_single_page,
293
+ shared_state, host, search_string, search_id, page_num,
294
+ imdb_id, mirror, request_from, season, episode
295
+ )
296
+ futures[future] = page_num
297
+
298
+ for future in as_completed(futures):
299
+ page_num = futures[future]
300
+ try:
301
+ page_releases, _ = future.result()
302
+ releases.extend(page_releases)
303
+ debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
304
+ except Exception as e:
305
+ info(f"{hostname}: [Page {page_num}] failed: {e}")
306
+
307
+ except Exception as e:
308
+ info(f"{hostname}: search error: {e}")
309
+ invalidate_session(shared_state)
310
+
311
+ info(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
312
+
313
+ elapsed = time.time() - start_time
314
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
315
+
316
+ return releases
@@ -0,0 +1,342 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import time
7
+ import traceback
8
+ import warnings
9
+ from base64 import urlsafe_b64encode
10
+ from datetime import datetime
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+ from bs4 import XMLParsedAsHTMLWarning
15
+
16
+ from quasarr.providers.imdb_metadata import get_localized_title
17
+ from quasarr.providers.log import info, debug
18
+
19
+ warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
20
+
21
+ hostname = "wx"
22
+ supported_mirrors = []
23
+
24
+
25
+ def wx_feed(shared_state, start_time, request_from, mirror=None):
26
+ """
27
+ Fetch latest releases from RSS feed.
28
+ """
29
+ releases = []
30
+ host = shared_state.values["config"]("Hostnames").get(hostname)
31
+
32
+ if "lazylibrarian" in request_from.lower():
33
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
34
+ return releases
35
+
36
+ rss_url = f'https://{host}/rss'
37
+ headers = {
38
+ 'User-Agent': shared_state.values["user_agent"],
39
+ }
40
+
41
+ try:
42
+ response = requests.get(rss_url, headers=headers, timeout=10)
43
+
44
+ if response.status_code != 200:
45
+ info(f"{hostname.upper()}: RSS feed returned status {response.status_code}")
46
+ return releases
47
+
48
+ soup = BeautifulSoup(response.content, 'html.parser')
49
+ items = soup.find_all('entry')
50
+
51
+ if not items:
52
+ items = soup.find_all('item')
53
+
54
+ if not items:
55
+ debug(f"{hostname.upper()}: No entries found in RSS feed")
56
+ return releases
57
+
58
+ max_releases = 100
59
+ if len(items) > max_releases:
60
+ debug(f"{hostname.upper()}: Found {len(items)} entries, limiting to {max_releases}")
61
+ items = items[:max_releases]
62
+ else:
63
+ debug(f"{hostname.upper()}: Found {len(items)} entries in RSS feed")
64
+
65
+ for item in items:
66
+ try:
67
+ title_tag = item.find('title')
68
+ if not title_tag:
69
+ continue
70
+
71
+ title = title_tag.get_text(strip=True)
72
+ if not title:
73
+ continue
74
+
75
+ title = html.unescape(title)
76
+ title = title.replace(']]>', '').replace('<![CDATA[', '')
77
+ title = title.replace(' ', '.')
78
+
79
+ link_tag = item.find('link', rel='alternate')
80
+ if link_tag and link_tag.has_attr('href'):
81
+ source = link_tag['href']
82
+ else:
83
+ link_tag = item.find('link')
84
+ if not link_tag:
85
+ continue
86
+ source = link_tag.get_text(strip=True)
87
+
88
+ if not source:
89
+ continue
90
+
91
+ pub_date = item.find('updated') or item.find('pubDate')
92
+ if pub_date:
93
+ published = pub_date.get_text(strip=True)
94
+ else:
95
+ # Fallback: use current time if no pubDate found
96
+ published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
97
+
98
+ mb = 0
99
+ size = 0
100
+ imdb_id = None
101
+ password = host.upper()
102
+
103
+ payload = urlsafe_b64encode(
104
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
105
+ ).decode("utf-8")
106
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
107
+
108
+ releases.append({
109
+ "details": {
110
+ "title": title,
111
+ "hostname": hostname,
112
+ "imdb_id": imdb_id,
113
+ "link": link,
114
+ "mirror": mirror,
115
+ "size": size,
116
+ "date": published,
117
+ "source": source
118
+ },
119
+ "type": "protected"
120
+ })
121
+
122
+ except Exception as e:
123
+ debug(f"{hostname.upper()}: error parsing RSS entry: {e}")
124
+ continue
125
+
126
+ except Exception as e:
127
+ info(f"Error loading {hostname.upper()} feed: {e}")
128
+ return releases
129
+
130
+ elapsed_time = time.time() - start_time
131
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
132
+
133
+ return releases
134
+
135
+
136
+ def wx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
137
+ """
138
+ Search using internal API.
139
+ """
140
+ releases = []
141
+ host = shared_state.values["config"]("Hostnames").get(hostname)
142
+
143
+ if "lazylibrarian" in request_from.lower():
144
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
145
+ return releases
146
+
147
+ imdb_id = shared_state.is_imdb_id(search_string)
148
+ if imdb_id:
149
+ info(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
150
+ title = get_localized_title(shared_state, imdb_id, 'de')
151
+ if not title:
152
+ info(f"{hostname.upper()}: no title for IMDb {imdb_id}")
153
+ return releases
154
+ info(f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'")
155
+ search_string = html.unescape(title)
156
+ else:
157
+ info(f"{hostname.upper()}: Using search string directly: '{search_string}'")
158
+
159
+ api_url = f'https://api.{host}/start/search'
160
+
161
+ headers = {
162
+ 'User-Agent': shared_state.values["user_agent"],
163
+ 'Accept': 'application/json, text/plain, */*',
164
+ 'Referer': f'https://{host}/search'
165
+ }
166
+
167
+ params = {
168
+ '__LOAD_P': '',
169
+ 'per_page': 50,
170
+ 'q': search_string,
171
+ 'selectedTypes': '',
172
+ 'selectedGenres': '',
173
+ 'types': 'movie,series,anime',
174
+ 'genres': '',
175
+ 'years': '',
176
+ 'ratings': '',
177
+ 'page': 1,
178
+ 'sortBy': 'latest',
179
+ 'sortOrder': 'desc'
180
+ }
181
+
182
+ if "sonarr" in request_from.lower():
183
+ params['types'] = 'series,anime'
184
+ elif "radarr" in request_from.lower():
185
+ params['types'] = 'movie'
186
+
187
+ info(f"{hostname.upper()}: Searching: '{search_string}'")
188
+
189
+ try:
190
+ response = requests.get(api_url, headers=headers, params=params, timeout=10)
191
+
192
+ if response.status_code != 200:
193
+ info(f"{hostname.upper()}: Search API returned status {response.status_code}")
194
+ return releases
195
+
196
+ data = response.json()
197
+
198
+ if 'items' in data and 'data' in data['items']:
199
+ items = data['items']['data']
200
+ elif 'data' in data:
201
+ items = data['data']
202
+ elif 'results' in data:
203
+ items = data['results']
204
+ else:
205
+ items = data if isinstance(data, list) else []
206
+
207
+ info(f"{hostname.upper()}: Found {len(items)} items in search results")
208
+
209
+ for item in items:
210
+ try:
211
+ uid = item.get('uid')
212
+ if not uid:
213
+ debug(f"{hostname.upper()}: Item has no UID, skipping")
214
+ continue
215
+
216
+ info(f"{hostname.upper()}: Fetching details for UID: {uid}")
217
+
218
+ detail_url = f'https://api.{host}/start/d/{uid}'
219
+ detail_response = requests.get(detail_url, headers=headers, timeout=10)
220
+
221
+ if detail_response.status_code != 200:
222
+ debug(f"{hostname.upper()}: Detail API returned {detail_response.status_code} for {uid}")
223
+ continue
224
+
225
+ detail_data = detail_response.json()
226
+
227
+ if 'item' in detail_data:
228
+ detail_item = detail_data['item']
229
+ else:
230
+ detail_item = detail_data
231
+
232
+ item_imdb_id = imdb_id
233
+ if not item_imdb_id:
234
+ item_imdb_id = detail_item.get('imdb_id') or detail_item.get('imdbid')
235
+ if not item_imdb_id and 'options' in detail_item:
236
+ item_imdb_id = detail_item['options'].get('imdb_id')
237
+
238
+ source = f"https://{host}/detail/{uid}"
239
+
240
+ main_title = detail_item.get('fulltitle') or detail_item.get('title') or detail_item.get('name')
241
+ if main_title:
242
+ title = html.unescape(main_title)
243
+ title = title.replace(' ', '.')
244
+
245
+ if shared_state.is_valid_release(title, request_from, search_string, season, episode):
246
+ published = detail_item.get('updated_at') or detail_item.get('created_at')
247
+ if not published:
248
+ published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
249
+ password = f"www.{host}"
250
+
251
+ payload = urlsafe_b64encode(
252
+ f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}".encode("utf-8")
253
+ ).decode("utf-8")
254
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
255
+
256
+ releases.append({
257
+ "details": {
258
+ "title": title,
259
+ "hostname": hostname,
260
+ "imdb_id": item_imdb_id,
261
+ "link": link,
262
+ "mirror": mirror,
263
+ "size": 0,
264
+ "date": published,
265
+ "source": source
266
+ },
267
+ "type": "protected"
268
+ })
269
+
270
+ if 'releases' in detail_item and isinstance(detail_item['releases'], list):
271
+ info(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
272
+
273
+ for release in detail_item['releases']:
274
+ try:
275
+ release_title = release.get('fulltitle')
276
+ if not release_title:
277
+ continue
278
+
279
+ release_title = html.unescape(release_title)
280
+ release_title = release_title.replace(' ', '.')
281
+
282
+ if not shared_state.is_valid_release(release_title, request_from, search_string, season,
283
+ episode):
284
+ debug(f"{hostname.upper()}: ✗ Release filtered out: {release_title}")
285
+ continue
286
+
287
+ release_uid = release.get('uid')
288
+ if release_uid:
289
+ release_source = f"https://{host}/detail/{uid}?release={release_uid}"
290
+ else:
291
+ release_source = source
292
+
293
+ release_published = release.get('updated_at') or release.get(
294
+ 'created_at') or detail_item.get('updated_at')
295
+ if not release_published:
296
+ release_published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
297
+ release_size = release.get('size', 0)
298
+ password = f"www.{host}"
299
+
300
+ payload = urlsafe_b64encode(
301
+ f"{release_title}|{release_source}|{mirror}|{release_size}|{password}|{item_imdb_id or ''}".encode(
302
+ "utf-8")
303
+ ).decode("utf-8")
304
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
305
+
306
+ releases.append({
307
+ "details": {
308
+ "title": release_title,
309
+ "hostname": hostname,
310
+ "imdb_id": item_imdb_id,
311
+ "link": link,
312
+ "mirror": mirror,
313
+ "size": release_size,
314
+ "date": release_published,
315
+ "source": release_source
316
+ },
317
+ "type": "protected"
318
+ })
319
+
320
+ except Exception as e:
321
+ debug(f"{hostname.upper()}: Error parsing release: {e}")
322
+ continue
323
+ else:
324
+ debug(f"{hostname.upper()}: No releases array found for {uid}")
325
+
326
+ except Exception as e:
327
+ debug(f"{hostname.upper()}: Error processing item: {e}")
328
+ debug(f"{hostname.upper()}: {traceback.format_exc()}")
329
+ continue
330
+
331
+ info(f"{hostname.upper()}: Returning {len(releases)} total releases")
332
+
333
+ except Exception as e:
334
+ info(f"Error in {hostname.upper()} search: {e}")
335
+
336
+ debug(f"{hostname.upper()}: {traceback.format_exc()}")
337
+ return releases
338
+
339
+ elapsed_time = time.time() - start_time
340
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
341
+
342
+ return releases
quasarr/storage/config.py CHANGED
@@ -30,6 +30,7 @@ class Config(object):
30
30
  ("by", "secret", ""),
31
31
  ("dd", "secret", ""),
32
32
  ("dj", "secret", ""),
33
+ ("dl", "secret", ""),
33
34
  ("dt", "secret", ""),
34
35
  ("dw", "secret", ""),
35
36
  ("fx", "secret", ""),
@@ -40,7 +41,8 @@ class Config(object):
40
41
  ("sf", "secret", ""),
41
42
  ("sj", "secret", ""),
42
43
  ("sl", "secret", ""),
43
- ("wd", "secret", "")
44
+ ("wd", "secret", ""),
45
+ ("wx", "secret", "")
44
46
  ],
45
47
  'FlareSolverr': [
46
48
  ("url", "str", ""),
@@ -53,6 +55,10 @@ class Config(object):
53
55
  ("user", "secret", ""),
54
56
  ("password", "secret", "")
55
57
  ],
58
+ 'DL': [
59
+ ("user", "secret", ""),
60
+ ("password", "secret", "")
61
+ ],
56
62
  'NX': [
57
63
  ("user", "secret", ""),
58
64
  ("password", "secret", "")