quasarr 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. quasarr/__init__.py +134 -70
  2. quasarr/api/__init__.py +40 -31
  3. quasarr/api/arr/__init__.py +116 -108
  4. quasarr/api/captcha/__init__.py +262 -137
  5. quasarr/api/config/__init__.py +76 -46
  6. quasarr/api/packages/__init__.py +138 -102
  7. quasarr/api/sponsors_helper/__init__.py +29 -16
  8. quasarr/api/statistics/__init__.py +19 -19
  9. quasarr/downloads/__init__.py +165 -72
  10. quasarr/downloads/linkcrypters/al.py +35 -18
  11. quasarr/downloads/linkcrypters/filecrypt.py +107 -52
  12. quasarr/downloads/linkcrypters/hide.py +5 -6
  13. quasarr/downloads/packages/__init__.py +342 -177
  14. quasarr/downloads/sources/al.py +191 -100
  15. quasarr/downloads/sources/by.py +31 -13
  16. quasarr/downloads/sources/dd.py +27 -14
  17. quasarr/downloads/sources/dj.py +1 -3
  18. quasarr/downloads/sources/dl.py +126 -71
  19. quasarr/downloads/sources/dt.py +11 -5
  20. quasarr/downloads/sources/dw.py +28 -14
  21. quasarr/downloads/sources/he.py +32 -24
  22. quasarr/downloads/sources/mb.py +19 -9
  23. quasarr/downloads/sources/nk.py +14 -10
  24. quasarr/downloads/sources/nx.py +8 -18
  25. quasarr/downloads/sources/sf.py +45 -20
  26. quasarr/downloads/sources/sj.py +1 -3
  27. quasarr/downloads/sources/sl.py +9 -5
  28. quasarr/downloads/sources/wd.py +32 -12
  29. quasarr/downloads/sources/wx.py +35 -21
  30. quasarr/providers/auth.py +42 -37
  31. quasarr/providers/cloudflare.py +28 -30
  32. quasarr/providers/hostname_issues.py +2 -1
  33. quasarr/providers/html_images.py +2 -2
  34. quasarr/providers/html_templates.py +22 -14
  35. quasarr/providers/imdb_metadata.py +149 -80
  36. quasarr/providers/jd_cache.py +131 -39
  37. quasarr/providers/log.py +1 -1
  38. quasarr/providers/myjd_api.py +260 -196
  39. quasarr/providers/notifications.py +53 -41
  40. quasarr/providers/obfuscated.py +9 -4
  41. quasarr/providers/sessions/al.py +71 -55
  42. quasarr/providers/sessions/dd.py +21 -14
  43. quasarr/providers/sessions/dl.py +30 -19
  44. quasarr/providers/sessions/nx.py +23 -14
  45. quasarr/providers/shared_state.py +292 -141
  46. quasarr/providers/statistics.py +75 -43
  47. quasarr/providers/utils.py +33 -27
  48. quasarr/providers/version.py +45 -14
  49. quasarr/providers/web_server.py +10 -5
  50. quasarr/search/__init__.py +30 -18
  51. quasarr/search/sources/al.py +124 -73
  52. quasarr/search/sources/by.py +110 -59
  53. quasarr/search/sources/dd.py +57 -35
  54. quasarr/search/sources/dj.py +69 -48
  55. quasarr/search/sources/dl.py +159 -100
  56. quasarr/search/sources/dt.py +110 -74
  57. quasarr/search/sources/dw.py +121 -61
  58. quasarr/search/sources/fx.py +108 -62
  59. quasarr/search/sources/he.py +78 -49
  60. quasarr/search/sources/mb.py +96 -48
  61. quasarr/search/sources/nk.py +80 -50
  62. quasarr/search/sources/nx.py +91 -62
  63. quasarr/search/sources/sf.py +171 -106
  64. quasarr/search/sources/sj.py +69 -48
  65. quasarr/search/sources/sl.py +115 -71
  66. quasarr/search/sources/wd.py +67 -44
  67. quasarr/search/sources/wx.py +188 -123
  68. quasarr/storage/config.py +65 -52
  69. quasarr/storage/setup.py +238 -140
  70. quasarr/storage/sqlite_database.py +10 -4
  71. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
  72. quasarr-2.4.9.dist-info/RECORD +81 -0
  73. quasarr-2.4.8.dist-info/RECORD +0 -81
  74. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
  75. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
  76. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
@@ -7,15 +7,15 @@ import html
7
7
  import re
8
8
  import time
9
9
  from base64 import urlsafe_b64encode
10
- from datetime import timezone, timedelta
10
+ from datetime import timedelta, timezone
11
11
  from urllib.parse import quote_plus
12
12
 
13
13
  import requests
14
14
  from bs4 import BeautifulSoup
15
15
 
16
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
16
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
17
17
  from quasarr.providers.imdb_metadata import get_localized_title
18
- from quasarr.providers.log import info, debug
18
+ from quasarr.providers.log import debug, info
19
19
 
20
20
  hostname = "dt"
21
21
  supported_mirrors = ["rapidgator", "nitroflare", "ddownload"]
@@ -32,19 +32,19 @@ def extract_size(text):
32
32
 
33
33
 
34
34
  def parse_published_datetime(article):
35
- date_box = article.find('div', class_='mr-2 shadow-sm1 text-center')
36
- mon = date_box.find('small').text.strip()
37
- day = date_box.find('h4').text.strip()
38
- year = date_box.find('h6').text.strip()
39
- month_num = datetime.datetime.strptime(mon, '%b').month
35
+ date_box = article.find("div", class_="mr-2 shadow-sm1 text-center")
36
+ mon = date_box.find("small").text.strip()
37
+ day = date_box.find("h4").text.strip()
38
+ year = date_box.find("h6").text.strip()
39
+ month_num = datetime.datetime.strptime(mon, "%b").month
40
40
 
41
- time_icon = article.select_one('i.fa-clock-o')
41
+ time_icon = article.select_one("i.fa-clock-o")
42
42
  if time_icon:
43
43
  # its parent <span> contains e.g. "19:12"
44
44
  raw = time_icon.parent.get_text(strip=True)
45
- m = re.search(r'(\d{1,2}:\d{2})', raw)
45
+ m = re.search(r"(\d{1,2}:\d{2})", raw)
46
46
  if m:
47
- hh, mm = map(int, m.group(1).split(':'))
47
+ hh, mm = map(int, m.group(1).split(":"))
48
48
  else:
49
49
  hh, mm = 0, 0
50
50
  else:
@@ -69,39 +69,50 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
69
69
  feed_type = "media/tv-show/"
70
70
 
71
71
  if mirror and mirror not in supported_mirrors:
72
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
72
+ debug(
73
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!'
74
+ )
73
75
  return releases
74
76
 
75
- url = f'https://{dt}/{feed_type}'
76
- headers = {'User-Agent': shared_state.values["user_agent"]}
77
+ url = f"https://{dt}/{feed_type}"
78
+ headers = {"User-Agent": shared_state.values["user_agent"]}
77
79
 
78
80
  try:
79
81
  r = requests.get(url, headers=headers, timeout=30)
80
82
  r.raise_for_status()
81
83
  feed = BeautifulSoup(r.content, "html.parser")
82
84
 
83
- for article in feed.find_all('article'):
85
+ for article in feed.find_all("article"):
84
86
  try:
85
- link_tag = article.select_one('h4.font-weight-bold a')
87
+ link_tag = article.select_one("h4.font-weight-bold a")
86
88
  if not link_tag:
87
- debug(f"Link tag not found in article: {article} at {hostname.upper()}")
89
+ debug(
90
+ f"Link tag not found in article: {article} at {hostname.upper()}"
91
+ )
88
92
  continue
89
93
 
90
- source = link_tag['href']
94
+ source = link_tag["href"]
91
95
  title_raw = link_tag.text.strip()
92
- title = title_raw.replace(' - ', '-').replace(' ', '.').replace('(', '').replace(')', '')
93
-
94
- if 'lazylibrarian' in request_from.lower():
96
+ title = (
97
+ title_raw.replace(" - ", "-")
98
+ .replace(" ", ".")
99
+ .replace("(", "")
100
+ .replace(")", "")
101
+ )
102
+
103
+ if "lazylibrarian" in request_from.lower():
95
104
  # lazylibrarian can only detect specific date formats / issue numbering for magazines
96
105
  title = shared_state.normalize_magazine_title(title)
97
106
 
98
107
  try:
99
- imdb_id = re.search(r'tt\d+', str(article)).group()
108
+ imdb_id = re.search(r"tt\d+", str(article)).group()
100
109
  except:
101
110
  imdb_id = None
102
111
 
103
- body_text = article.find('div', class_='card-body').get_text(" ")
104
- size_match = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
112
+ body_text = article.find("div", class_="card-body").get_text(" ")
113
+ size_match = re.search(
114
+ r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE
115
+ )
105
116
  if not size_match:
106
117
  debug(f"Size not found in article: {article} at {hostname.upper()}")
107
118
  continue
@@ -113,32 +124,40 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
113
124
  published = parse_published_datetime(article)
114
125
 
115
126
  payload = urlsafe_b64encode(
116
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
127
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
128
+ "utf-8"
129
+ )
117
130
  ).decode("utf-8")
118
131
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
119
132
 
120
133
  except Exception as e:
121
134
  info(f"Error parsing {hostname.upper()} feed: {e}")
122
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
135
+ mark_hostname_issue(
136
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
137
+ )
123
138
  continue
124
139
 
125
- releases.append({
126
- "details": {
127
- "title": title,
128
- "hostname": hostname.lower(),
129
- "imdb_id": imdb_id,
130
- "link": link,
131
- "mirror": mirror,
132
- "size": size,
133
- "date": published,
134
- "source": source
135
- },
136
- "type": "protected"
137
- })
140
+ releases.append(
141
+ {
142
+ "details": {
143
+ "title": title,
144
+ "hostname": hostname.lower(),
145
+ "imdb_id": imdb_id,
146
+ "link": link,
147
+ "mirror": mirror,
148
+ "size": size,
149
+ "date": published,
150
+ "source": source,
151
+ },
152
+ "type": "protected",
153
+ }
154
+ )
138
155
 
139
156
  except Exception as e:
140
157
  info(f"Error loading {hostname.upper()} feed: {e}")
141
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
158
+ mark_hostname_issue(
159
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
160
+ )
142
161
 
143
162
  elapsed = time.time() - start_time
144
163
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
@@ -148,7 +167,15 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
148
167
  return releases
149
168
 
150
169
 
151
- def dt_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
170
+ def dt_search(
171
+ shared_state,
172
+ start_time,
173
+ request_from,
174
+ search_string,
175
+ mirror=None,
176
+ season=None,
177
+ episode=None,
178
+ ):
152
179
  releases = []
153
180
  dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
154
181
  password = dt
@@ -161,13 +188,15 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
161
188
  cat_id = "64"
162
189
 
163
190
  if mirror and mirror not in supported_mirrors:
164
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!')
191
+ debug(
192
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!'
193
+ )
165
194
  return releases
166
195
 
167
196
  try:
168
197
  imdb_id = shared_state.is_imdb_id(search_string)
169
198
  if imdb_id:
170
- search_string = get_localized_title(shared_state, imdb_id, 'en')
199
+ search_string = get_localized_title(shared_state, imdb_id, "en")
171
200
  if not search_string:
172
201
  info(f"Could not extract title from IMDb-ID {imdb_id}")
173
202
  return releases
@@ -207,21 +236,19 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
207
236
  continue
208
237
  source = link_tag["href"]
209
238
  title_raw = link_tag.text.strip()
210
- title = (title_raw.
211
- replace(' - ', '-').
212
- replace(' ', '.').
213
- replace('(', '').
214
- replace(')', '')
215
- )
216
-
217
- if not shared_state.is_valid_release(title,
218
- request_from,
219
- search_string,
220
- season,
221
- episode):
239
+ title = (
240
+ title_raw.replace(" - ", "-")
241
+ .replace(" ", ".")
242
+ .replace("(", "")
243
+ .replace(")", "")
244
+ )
245
+
246
+ if not shared_state.is_valid_release(
247
+ title, request_from, search_string, season, episode
248
+ ):
222
249
  continue
223
250
 
224
- if 'lazylibrarian' in request_from.lower():
251
+ if "lazylibrarian" in request_from.lower():
225
252
  # lazylibrarian can only detect specific date formats / issue numbering for magazines
226
253
  title = shared_state.normalize_magazine_title(title)
227
254
 
@@ -231,7 +258,9 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
231
258
  imdb_id = None
232
259
 
233
260
  body_text = article.find("div", class_="card-body").get_text(" ")
234
- m = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
261
+ m = re.search(
262
+ r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE
263
+ )
235
264
  if not m:
236
265
  debug(f"Size not found in search-article: {title_raw}")
237
266
  continue
@@ -242,33 +271,40 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
242
271
  published = parse_published_datetime(article)
243
272
 
244
273
  payload = urlsafe_b64encode(
245
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}"
246
- .encode("utf-8")
274
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode(
275
+ "utf-8"
276
+ )
247
277
  ).decode("utf-8")
248
278
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
249
279
 
250
280
  except Exception as e:
251
281
  info(f"Error parsing {hostname.upper()} search item: {e}")
252
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
282
+ mark_hostname_issue(
283
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
284
+ )
253
285
  continue
254
286
 
255
- releases.append({
256
- "details": {
257
- "title": title,
258
- "hostname": hostname.lower(),
259
- "imdb_id": imdb_id,
260
- "link": link,
261
- "mirror": mirror,
262
- "size": size,
263
- "date": published,
264
- "source": source
265
- },
266
- "type": "protected"
267
- })
287
+ releases.append(
288
+ {
289
+ "details": {
290
+ "title": title,
291
+ "hostname": hostname.lower(),
292
+ "imdb_id": imdb_id,
293
+ "link": link,
294
+ "mirror": mirror,
295
+ "size": size,
296
+ "date": published,
297
+ "source": source,
298
+ },
299
+ "type": "protected",
300
+ }
301
+ )
268
302
 
269
303
  except Exception as e:
270
304
  info(f"Error loading {hostname.upper()} search page: {e}")
271
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
305
+ mark_hostname_issue(
306
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
307
+ )
272
308
 
273
309
  elapsed = time.time() - start_time
274
310
  debug(f"Search time: {elapsed:.2f}s ({hostname})")
@@ -10,26 +10,50 @@ from base64 import urlsafe_b64encode
10
10
  import requests
11
11
  from bs4 import BeautifulSoup
12
12
 
13
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
14
- from quasarr.providers.log import info, debug
13
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
14
+ from quasarr.providers.log import debug, info
15
15
 
16
16
  hostname = "dw"
17
17
  supported_mirrors = ["1fichier", "rapidgator", "ddownload", "katfile"]
18
18
 
19
19
 
20
20
  def convert_to_rss_date(date_str):
21
- german_months = ["Januar", "Februar", "März", "April", "Mai", "Juni",
22
- "Juli", "August", "September", "Oktober", "November", "Dezember"]
23
- english_months = ["January", "February", "March", "April", "May", "June",
24
- "July", "August", "September", "October", "November", "December"]
21
+ german_months = [
22
+ "Januar",
23
+ "Februar",
24
+ "März",
25
+ "April",
26
+ "Mai",
27
+ "Juni",
28
+ "Juli",
29
+ "August",
30
+ "September",
31
+ "Oktober",
32
+ "November",
33
+ "Dezember",
34
+ ]
35
+ english_months = [
36
+ "January",
37
+ "February",
38
+ "March",
39
+ "April",
40
+ "May",
41
+ "June",
42
+ "July",
43
+ "August",
44
+ "September",
45
+ "October",
46
+ "November",
47
+ "December",
48
+ ]
25
49
 
26
50
  for german, english in zip(german_months, english_months):
27
51
  if german in date_str:
28
52
  date_str = date_str.replace(german, english)
29
53
  break
30
54
 
31
- parsed_date = datetime.datetime.strptime(date_str, '%d. %B %Y / %H:%M')
32
- rss_date = parsed_date.strftime('%a, %d %b %Y %H:%M:%S %z')
55
+ parsed_date = datetime.datetime.strptime(date_str, "%d. %B %Y / %H:%M")
56
+ rss_date = parsed_date.strftime("%a, %d %b %Y %H:%M:%S %z")
33
57
 
34
58
  return rss_date
35
59
 
@@ -59,7 +83,9 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
59
83
  password = dw
60
84
 
61
85
  if not "arr" in request_from.lower():
62
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
86
+ debug(
87
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
88
+ )
63
89
  return releases
64
90
 
65
91
  if "Radarr" in request_from:
@@ -68,20 +94,22 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
68
94
  feed_type = "videos/serien/"
69
95
 
70
96
  if mirror and mirror not in supported_mirrors:
71
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
72
- ' Skipping search!')
97
+ debug(
98
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
99
+ " Skipping search!"
100
+ )
73
101
  return releases
74
102
 
75
- url = f'https://{dw}/{feed_type}'
103
+ url = f"https://{dw}/{feed_type}"
76
104
  headers = {
77
- 'User-Agent': shared_state.values["user_agent"],
105
+ "User-Agent": shared_state.values["user_agent"],
78
106
  }
79
107
 
80
108
  try:
81
109
  r = requests.get(url, headers=headers, timeout=30)
82
110
  r.raise_for_status()
83
111
  feed = BeautifulSoup(r.content, "html.parser")
84
- articles = feed.find_all('h4')
112
+ articles = feed.find_all("h4")
85
113
 
86
114
  for article in articles:
87
115
  try:
@@ -89,7 +117,7 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
89
117
  title = article.a.text.strip()
90
118
 
91
119
  try:
92
- imdb_id = re.search(r'tt\d+', str(article)).group()
120
+ imdb_id = re.search(r"tt\d+", str(article)).group()
93
121
  except:
94
122
  imdb_id = None
95
123
 
@@ -97,33 +125,44 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
97
125
  size_item = extract_size(size_info)
98
126
  mb = shared_state.convert_to_mb(size_item)
99
127
  size = mb * 1024 * 1024
100
- date = article.parent.parent.find("span", {"class": "date updated"}).text.strip()
128
+ date = article.parent.parent.find(
129
+ "span", {"class": "date updated"}
130
+ ).text.strip()
101
131
  published = convert_to_rss_date(date)
102
132
  payload = urlsafe_b64encode(
103
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode("utf-8")
133
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
134
+ "utf-8"
135
+ )
136
+ ).decode("utf-8")
104
137
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
105
138
  except Exception as e:
106
139
  info(f"Error parsing {hostname.upper()} feed: {e}")
107
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
140
+ mark_hostname_issue(
141
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
142
+ )
108
143
  continue
109
144
 
110
- releases.append({
111
- "details": {
112
- "title": title,
113
- "hostname": hostname.lower(),
114
- "imdb_id": imdb_id,
115
- "link": link,
116
- "mirror": mirror,
117
- "size": size,
118
- "date": published,
119
- "source": source
120
- },
121
- "type": "protected"
122
- })
145
+ releases.append(
146
+ {
147
+ "details": {
148
+ "title": title,
149
+ "hostname": hostname.lower(),
150
+ "imdb_id": imdb_id,
151
+ "link": link,
152
+ "mirror": mirror,
153
+ "size": size,
154
+ "date": published,
155
+ "source": source,
156
+ },
157
+ "type": "protected",
158
+ }
159
+ )
123
160
 
124
161
  except Exception as e:
125
162
  info(f"Error loading {hostname.upper()} feed: {e}")
126
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
163
+ mark_hostname_issue(
164
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
165
+ )
127
166
 
128
167
  elapsed_time = time.time() - start_time
129
168
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
@@ -133,13 +172,23 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
133
172
  return releases
134
173
 
135
174
 
136
- def dw_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
175
+ def dw_search(
176
+ shared_state,
177
+ start_time,
178
+ request_from,
179
+ search_string,
180
+ mirror=None,
181
+ season=None,
182
+ episode=None,
183
+ ):
137
184
  releases = []
138
185
  dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
139
186
  password = dw
140
187
 
141
188
  if not "arr" in request_from.lower():
142
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
189
+ debug(
190
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
191
+ )
143
192
  return releases
144
193
 
145
194
  if "Radarr" in request_from:
@@ -148,23 +197,27 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
148
197
  search_type = "videocategory=serien"
149
198
 
150
199
  if mirror and mirror not in ["1fichier", "rapidgator", "ddownload", "katfile"]:
151
- debug(f'Mirror "{mirror}" not not supported by {hostname.upper()}. Skipping search!')
200
+ debug(
201
+ f'Mirror "{mirror}" not not supported by {hostname.upper()}. Skipping search!'
202
+ )
152
203
  return releases
153
204
 
154
- url = f'https://{dw}/?s={search_string}&{search_type}'
205
+ url = f"https://{dw}/?s={search_string}&{search_type}"
155
206
  headers = {
156
- 'User-Agent': shared_state.values["user_agent"],
207
+ "User-Agent": shared_state.values["user_agent"],
157
208
  }
158
209
 
159
210
  try:
160
211
  r = requests.get(url, headers=headers, timeout=10)
161
212
  r.raise_for_status()
162
213
  search = BeautifulSoup(r.content, "html.parser")
163
- results = search.find_all('h4')
214
+ results = search.find_all("h4")
164
215
 
165
216
  except Exception as e:
166
217
  info(f"Error loading {hostname.upper()} search feed: {e}")
167
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
218
+ mark_hostname_issue(
219
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
220
+ )
168
221
  return releases
169
222
 
170
223
  imdb_id = shared_state.is_imdb_id(search_string)
@@ -174,16 +227,14 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
174
227
  try:
175
228
  title = result.a.text.strip()
176
229
 
177
- if not shared_state.is_valid_release(title,
178
- request_from,
179
- search_string,
180
- season,
181
- episode):
230
+ if not shared_state.is_valid_release(
231
+ title, request_from, search_string, season, episode
232
+ ):
182
233
  continue
183
234
 
184
235
  if not imdb_id:
185
236
  try:
186
- imdb_id = re.search(r'tt\d+', str(result)).group()
237
+ imdb_id = re.search(r"tt\d+", str(result)).group()
187
238
  except:
188
239
  imdb_id = None
189
240
 
@@ -192,29 +243,38 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
192
243
  size_item = extract_size(size_info)
193
244
  mb = shared_state.convert_to_mb(size_item)
194
245
  size = mb * 1024 * 1024
195
- date = result.parent.parent.find("span", {"class": "date updated"}).text.strip()
246
+ date = result.parent.parent.find(
247
+ "span", {"class": "date updated"}
248
+ ).text.strip()
196
249
  published = convert_to_rss_date(date)
197
250
  payload = urlsafe_b64encode(
198
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode("utf-8")
251
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
252
+ "utf-8"
253
+ )
254
+ ).decode("utf-8")
199
255
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
200
256
  except Exception as e:
201
257
  info(f"Error parsing {hostname.upper()} search: {e}")
202
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
258
+ mark_hostname_issue(
259
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
260
+ )
203
261
  continue
204
262
 
205
- releases.append({
206
- "details": {
207
- "title": title,
208
- "hostname": hostname.lower(),
209
- "imdb_id": imdb_id,
210
- "link": link,
211
- "mirror": mirror,
212
- "size": size,
213
- "date": published,
214
- "source": source
215
- },
216
- "type": "protected"
217
- })
263
+ releases.append(
264
+ {
265
+ "details": {
266
+ "title": title,
267
+ "hostname": hostname.lower(),
268
+ "imdb_id": imdb_id,
269
+ "link": link,
270
+ "mirror": mirror,
271
+ "size": size,
272
+ "date": published,
273
+ "source": source,
274
+ },
275
+ "type": "protected",
276
+ }
277
+ )
218
278
 
219
279
  elapsed_time = time.time() - start_time
220
280
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")