quasarr 2.4.8__py3-none-any.whl → 2.4.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (76) hide show
  1. quasarr/__init__.py +134 -70
  2. quasarr/api/__init__.py +40 -31
  3. quasarr/api/arr/__init__.py +116 -108
  4. quasarr/api/captcha/__init__.py +262 -137
  5. quasarr/api/config/__init__.py +76 -46
  6. quasarr/api/packages/__init__.py +138 -102
  7. quasarr/api/sponsors_helper/__init__.py +29 -16
  8. quasarr/api/statistics/__init__.py +19 -19
  9. quasarr/downloads/__init__.py +165 -72
  10. quasarr/downloads/linkcrypters/al.py +35 -18
  11. quasarr/downloads/linkcrypters/filecrypt.py +107 -52
  12. quasarr/downloads/linkcrypters/hide.py +5 -6
  13. quasarr/downloads/packages/__init__.py +342 -177
  14. quasarr/downloads/sources/al.py +191 -100
  15. quasarr/downloads/sources/by.py +31 -13
  16. quasarr/downloads/sources/dd.py +27 -14
  17. quasarr/downloads/sources/dj.py +1 -3
  18. quasarr/downloads/sources/dl.py +126 -71
  19. quasarr/downloads/sources/dt.py +11 -5
  20. quasarr/downloads/sources/dw.py +28 -14
  21. quasarr/downloads/sources/he.py +32 -24
  22. quasarr/downloads/sources/mb.py +19 -9
  23. quasarr/downloads/sources/nk.py +14 -10
  24. quasarr/downloads/sources/nx.py +8 -18
  25. quasarr/downloads/sources/sf.py +45 -20
  26. quasarr/downloads/sources/sj.py +1 -3
  27. quasarr/downloads/sources/sl.py +9 -5
  28. quasarr/downloads/sources/wd.py +32 -12
  29. quasarr/downloads/sources/wx.py +35 -21
  30. quasarr/providers/auth.py +42 -37
  31. quasarr/providers/cloudflare.py +28 -30
  32. quasarr/providers/hostname_issues.py +2 -1
  33. quasarr/providers/html_images.py +2 -2
  34. quasarr/providers/html_templates.py +22 -14
  35. quasarr/providers/imdb_metadata.py +149 -80
  36. quasarr/providers/jd_cache.py +131 -39
  37. quasarr/providers/log.py +1 -1
  38. quasarr/providers/myjd_api.py +260 -196
  39. quasarr/providers/notifications.py +53 -41
  40. quasarr/providers/obfuscated.py +9 -4
  41. quasarr/providers/sessions/al.py +71 -55
  42. quasarr/providers/sessions/dd.py +21 -14
  43. quasarr/providers/sessions/dl.py +30 -19
  44. quasarr/providers/sessions/nx.py +23 -14
  45. quasarr/providers/shared_state.py +292 -141
  46. quasarr/providers/statistics.py +75 -43
  47. quasarr/providers/utils.py +33 -27
  48. quasarr/providers/version.py +45 -14
  49. quasarr/providers/web_server.py +10 -5
  50. quasarr/search/__init__.py +30 -18
  51. quasarr/search/sources/al.py +124 -73
  52. quasarr/search/sources/by.py +110 -59
  53. quasarr/search/sources/dd.py +57 -35
  54. quasarr/search/sources/dj.py +69 -48
  55. quasarr/search/sources/dl.py +159 -100
  56. quasarr/search/sources/dt.py +110 -74
  57. quasarr/search/sources/dw.py +121 -61
  58. quasarr/search/sources/fx.py +108 -62
  59. quasarr/search/sources/he.py +78 -49
  60. quasarr/search/sources/mb.py +96 -48
  61. quasarr/search/sources/nk.py +80 -50
  62. quasarr/search/sources/nx.py +91 -62
  63. quasarr/search/sources/sf.py +171 -106
  64. quasarr/search/sources/sj.py +69 -48
  65. quasarr/search/sources/sl.py +115 -71
  66. quasarr/search/sources/wd.py +67 -44
  67. quasarr/search/sources/wx.py +188 -123
  68. quasarr/storage/config.py +65 -52
  69. quasarr/storage/setup.py +238 -140
  70. quasarr/storage/sqlite_database.py +10 -4
  71. {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/METADATA +4 -3
  72. quasarr-2.4.10.dist-info/RECORD +81 -0
  73. quasarr-2.4.8.dist-info/RECORD +0 -81
  74. {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/WHEEL +0 -0
  75. {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/entry_points.txt +0 -0
  76. {quasarr-2.4.8.dist-info → quasarr-2.4.10.dist-info}/licenses/LICENSE +0 -0
@@ -9,8 +9,8 @@ from base64 import urlsafe_b64encode
9
9
  import requests
10
10
  from bs4 import BeautifulSoup
11
11
 
12
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
13
- from quasarr.providers.log import info, debug
12
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
13
+ from quasarr.providers.log import debug, info
14
14
 
15
15
  hostname = "fx"
16
16
  supported_mirrors = ["rapidgator"]
@@ -32,18 +32,22 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
32
32
  fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
33
33
 
34
34
  if not "arr" in request_from.lower():
35
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
35
+ debug(
36
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
37
+ )
36
38
  return releases
37
39
 
38
40
  if mirror and mirror not in supported_mirrors:
39
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
40
- ' Skipping search!')
41
+ debug(
42
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
43
+ " Skipping search!"
44
+ )
41
45
  return releases
42
46
 
43
47
  password = fx.split(".")[0]
44
- url = f'https://{fx}/'
48
+ url = f"https://{fx}/"
45
49
  headers = {
46
- 'User-Agent': shared_state.values["user_agent"],
50
+ "User-Agent": shared_state.values["user_agent"],
47
51
  }
48
52
 
49
53
  try:
@@ -53,7 +57,9 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
53
57
  items = feed.find_all("article")
54
58
  except Exception as e:
55
59
  info(f"Error loading {hostname.upper()} feed: {e}")
56
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
60
+ mark_hostname_issue(
61
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
62
+ )
57
63
  return releases
58
64
 
59
65
  if items:
@@ -61,8 +67,10 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
61
67
  try:
62
68
  article = BeautifulSoup(str(item), "html.parser")
63
69
  try:
64
- source = article.find('h2', class_='entry-title').a["href"]
65
- titles = article.find_all("a", href=re.compile("(filecrypt|safe." + fx + ")"))
70
+ source = article.find("h2", class_="entry-title").a["href"]
71
+ titles = article.find_all(
72
+ "a", href=re.compile("(filecrypt|safe." + fx + ")")
73
+ )
66
74
  except:
67
75
  continue
68
76
  i = 0
@@ -72,19 +80,27 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
72
80
 
73
81
  try:
74
82
  imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
75
- imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
83
+ imdb_id = re.search(r"tt\d+", str(imdb_link)).group()
76
84
  except:
77
85
  imdb_id = None
78
86
 
79
87
  try:
80
- size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
81
- i].next.next.text.replace("|", "").strip()
88
+ size_info = (
89
+ article.find_all(
90
+ "strong",
91
+ text=re.compile(r"(size|größe)", re.IGNORECASE),
92
+ )[i]
93
+ .next.next.text.replace("|", "")
94
+ .strip()
95
+ )
82
96
  size_item = extract_size(size_info)
83
97
  mb = shared_state.convert_to_mb(size_item)
84
98
  size = mb * 1024 * 1024
85
99
  payload = urlsafe_b64encode(
86
- f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
87
- "utf-8")
100
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
101
+ "utf-8"
102
+ )
103
+ ).decode("utf-8")
88
104
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
89
105
  except:
90
106
  continue
@@ -96,23 +112,27 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
96
112
  except:
97
113
  continue
98
114
 
99
- releases.append({
100
- "details": {
101
- "title": title,
102
- "hostname": hostname.lower(),
103
- "imdb_id": imdb_id,
104
- "link": link,
105
- "mirror": mirror,
106
- "size": size,
107
- "date": published,
108
- "source": source
109
- },
110
- "type": "protected"
111
- })
115
+ releases.append(
116
+ {
117
+ "details": {
118
+ "title": title,
119
+ "hostname": hostname.lower(),
120
+ "imdb_id": imdb_id,
121
+ "link": link,
122
+ "mirror": mirror,
123
+ "size": size,
124
+ "date": published,
125
+ "source": source,
126
+ },
127
+ "type": "protected",
128
+ }
129
+ )
112
130
 
113
131
  except Exception as e:
114
132
  info(f"Error parsing {hostname.upper()} feed: {e}")
115
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
133
+ mark_hostname_issue(
134
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
135
+ )
116
136
 
117
137
  elapsed_time = time.time() - start_time
118
138
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
@@ -122,34 +142,48 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
122
142
  return releases
123
143
 
124
144
 
125
- def fx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
145
+ def fx_search(
146
+ shared_state,
147
+ start_time,
148
+ request_from,
149
+ search_string,
150
+ mirror=None,
151
+ season=None,
152
+ episode=None,
153
+ ):
126
154
  releases = []
127
155
  fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
128
156
  password = fx.split(".")[0]
129
157
 
130
158
  if not "arr" in request_from.lower():
131
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
159
+ debug(
160
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
161
+ )
132
162
  return releases
133
163
 
134
164
  if mirror and mirror not in supported_mirrors:
135
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
136
- ' Skipping search!')
165
+ debug(
166
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
167
+ " Skipping search!"
168
+ )
137
169
  return releases
138
170
 
139
- url = f'https://{fx}/?s={search_string}'
171
+ url = f"https://{fx}/?s={search_string}"
140
172
  headers = {
141
- 'User-Agent': shared_state.values["user_agent"],
173
+ "User-Agent": shared_state.values["user_agent"],
142
174
  }
143
175
 
144
176
  try:
145
177
  r = requests.get(url, headers=headers, timeout=10)
146
178
  r.raise_for_status()
147
179
  search = BeautifulSoup(r.content, "html.parser")
148
- results = search.find('h2', class_='entry-title')
180
+ results = search.find("h2", class_="entry-title")
149
181
 
150
182
  except Exception as e:
151
183
  info(f"Error loading {hostname.upper()} feed: {e}")
152
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
184
+ mark_hostname_issue(
185
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
186
+ )
153
187
  return releases
154
188
 
155
189
  if results:
@@ -162,7 +196,9 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
162
196
  items = feed.find_all("article")
163
197
  except Exception as e:
164
198
  info(f"Error loading {hostname.upper()} feed: {e}")
165
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
199
+ mark_hostname_issue(
200
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
201
+ )
166
202
  return releases
167
203
 
168
204
  for item in items:
@@ -177,28 +213,34 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
177
213
  link = title["href"]
178
214
  title = shared_state.sanitize_title(title.text)
179
215
 
180
- if not shared_state.is_valid_release(title,
181
- request_from,
182
- search_string,
183
- season,
184
- episode):
216
+ if not shared_state.is_valid_release(
217
+ title, request_from, search_string, season, episode
218
+ ):
185
219
  continue
186
220
 
187
221
  try:
188
222
  imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
189
- imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
223
+ imdb_id = re.search(r"tt\d+", str(imdb_link)).group()
190
224
  except:
191
225
  imdb_id = None
192
226
 
193
227
  try:
194
- size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
195
- i].next.next.text.replace("|", "").strip()
228
+ size_info = (
229
+ article.find_all(
230
+ "strong",
231
+ text=re.compile(r"(size|größe)", re.IGNORECASE),
232
+ )[i]
233
+ .next.next.text.replace("|", "")
234
+ .strip()
235
+ )
196
236
  size_item = extract_size(size_info)
197
237
  mb = shared_state.convert_to_mb(size_item)
198
238
  size = mb * 1024 * 1024
199
239
  payload = urlsafe_b64encode(
200
- f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
201
- "utf-8")
240
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode(
241
+ "utf-8"
242
+ )
243
+ ).decode("utf-8")
202
244
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
203
245
  except:
204
246
  continue
@@ -210,23 +252,27 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
210
252
  except:
211
253
  continue
212
254
 
213
- releases.append({
214
- "details": {
215
- "title": title,
216
- "hostname": hostname.lower(),
217
- "imdb_id": imdb_id,
218
- "link": link,
219
- "mirror": mirror,
220
- "size": size,
221
- "date": published,
222
- "source": result_source
223
- },
224
- "type": "protected"
225
- })
255
+ releases.append(
256
+ {
257
+ "details": {
258
+ "title": title,
259
+ "hostname": hostname.lower(),
260
+ "imdb_id": imdb_id,
261
+ "link": link,
262
+ "mirror": mirror,
263
+ "size": size,
264
+ "date": published,
265
+ "source": result_source,
266
+ },
267
+ "type": "protected",
268
+ }
269
+ )
226
270
 
227
271
  except Exception as e:
228
272
  info(f"Error parsing {hostname.upper()} search: {e}")
229
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
273
+ mark_hostname_issue(
274
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
275
+ )
230
276
 
231
277
  elapsed_time = time.time() - start_time
232
278
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
@@ -11,9 +11,9 @@ from html import unescape
11
11
  import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
14
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
15
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
16
- from quasarr.providers.log import info, debug
16
+ from quasarr.providers.log import debug, info
17
17
 
18
18
  hostname = "he"
19
19
  supported_mirrors = ["rapidgator", "nitroflare"]
@@ -21,35 +21,37 @@ supported_mirrors = ["rapidgator", "nitroflare"]
21
21
 
22
22
  def parse_posted_ago(txt):
23
23
  try:
24
- m = re.search(r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE)
24
+ m = re.search(
25
+ r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE
26
+ )
25
27
  if not m:
26
- return ''
28
+ return ""
27
29
  value = int(m.group(1))
28
30
  unit = m.group(2).lower()
29
31
  now = datetime.utcnow()
30
- if unit.startswith('sec'):
32
+ if unit.startswith("sec"):
31
33
  delta = timedelta(seconds=value)
32
- elif unit.startswith('min'):
34
+ elif unit.startswith("min"):
33
35
  delta = timedelta(minutes=value)
34
- elif unit.startswith('hour'):
36
+ elif unit.startswith("hour"):
35
37
  delta = timedelta(hours=value)
36
- elif unit.startswith('day'):
38
+ elif unit.startswith("day"):
37
39
  delta = timedelta(days=value)
38
- elif unit.startswith('week'):
40
+ elif unit.startswith("week"):
39
41
  delta = timedelta(weeks=value)
40
- elif unit.startswith('month'):
42
+ elif unit.startswith("month"):
41
43
  delta = timedelta(days=30 * value)
42
44
  else:
43
45
  delta = timedelta(days=365 * value)
44
46
  return (datetime.utcnow() - delta).strftime("%a, %d %b %Y %H:%M:%S +0000")
45
47
  except Exception:
46
- return ''
48
+ return ""
47
49
 
48
50
 
49
51
  def extract_size(text: str) -> dict:
50
52
  match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
51
53
  if match:
52
- size = match.group(1).replace(',', '.')
54
+ size = match.group(1).replace(",", ".")
53
55
  unit = match.group(2)
54
56
  return {"size": size, "sizeunit": unit}
55
57
  return {"size": "0", "sizeunit": "MB"}
@@ -59,12 +61,22 @@ def he_feed(*args, **kwargs):
59
61
  return he_search(*args, **kwargs)
60
62
 
61
63
 
62
- def he_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
64
+ def he_search(
65
+ shared_state,
66
+ start_time,
67
+ request_from,
68
+ search_string="",
69
+ mirror=None,
70
+ season=None,
71
+ episode=None,
72
+ ):
63
73
  releases = []
64
74
  host = shared_state.values["config"]("Hostnames").get(hostname)
65
75
 
66
76
  if not "arr" in request_from.lower():
67
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
77
+ debug(
78
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
79
+ )
68
80
  return releases
69
81
 
70
82
  if "radarr" in request_from.lower():
@@ -80,7 +92,7 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
80
92
  if search_string != "":
81
93
  imdb_id = shared_state.is_imdb_id(search_string)
82
94
  if imdb_id:
83
- local_title = get_localized_title(shared_state, imdb_id, 'en')
95
+ local_title = get_localized_title(shared_state, imdb_id, "en")
84
96
  if not local_title:
85
97
  info(f"{hostname}: no title for IMDb {imdb_id}")
86
98
  return releases
@@ -108,7 +120,7 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
108
120
  if episode:
109
121
  source_search += f"E{int(episode):02d}"
110
122
 
111
- url = f'https://{host}/tag/{tag}/'
123
+ url = f"https://{host}/tag/{tag}/"
112
124
 
113
125
  headers = {"User-Agent": shared_state.values["user_agent"]}
114
126
  params = {"s": source_search}
@@ -116,11 +128,13 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
116
128
  try:
117
129
  r = requests.get(url, headers=headers, params=params, timeout=timeout)
118
130
  r.raise_for_status()
119
- soup = BeautifulSoup(r.content, 'html.parser')
120
- results = soup.find_all('div', class_='item')
131
+ soup = BeautifulSoup(r.content, "html.parser")
132
+ results = soup.find_all("div", class_="item")
121
133
  except Exception as e:
122
134
  info(f"{hostname}: {search_type} load error: {e}")
123
- mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
135
+ mark_hostname_issue(
136
+ hostname, search_type, str(e) if "e" in dir() else "Error occurred"
137
+ )
124
138
  return releases
125
139
 
126
140
  if not results:
@@ -128,19 +142,19 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
128
142
 
129
143
  for result in results:
130
144
  try:
131
- data = result.find('div', class_='data')
145
+ data = result.find("div", class_="data")
132
146
  if not data:
133
147
  continue
134
148
 
135
- headline = data.find('h5')
149
+ headline = data.find("h5")
136
150
  if not headline:
137
151
  continue
138
152
 
139
- a = headline.find('a', href=True)
153
+ a = headline.find("a", href=True)
140
154
  if not a:
141
155
  continue
142
156
 
143
- source = a['href'].strip()
157
+ source = a["href"].strip()
144
158
 
145
159
  head_title = a.get_text(strip=True)
146
160
  if not head_title:
@@ -149,7 +163,9 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
149
163
  head_split = head_title.split(" – ")
150
164
  title = head_split[0].strip()
151
165
 
152
- if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
166
+ if not shared_state.is_valid_release(
167
+ title, request_from, search_string, season, episode
168
+ ):
153
169
  continue
154
170
 
155
171
  size_item = extract_size(head_split[1].strip())
@@ -158,12 +174,12 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
158
174
  size = mb * 1024 * 1024
159
175
 
160
176
  published = None
161
- p_meta = data.find('p', class_='meta')
177
+ p_meta = data.find("p", class_="meta")
162
178
  if p_meta:
163
179
  posted_span = None
164
- for sp in p_meta.find_all('span'):
165
- txt = sp.get_text(' ', strip=True)
166
- if txt.lower().startswith('posted') or 'ago' in txt.lower():
180
+ for sp in p_meta.find_all("span"):
181
+ txt = sp.get_text(" ", strip=True)
182
+ if txt.lower().startswith("posted") or "ago" in txt.lower():
167
183
  posted_span = txt
168
184
  break
169
185
 
@@ -176,15 +192,21 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
176
192
  release_imdb_id = None
177
193
  try:
178
194
  r = requests.get(source, headers=headers, timeout=10)
179
- soup = BeautifulSoup(r.content, 'html.parser')
195
+ soup = BeautifulSoup(r.content, "html.parser")
180
196
  except Exception as e:
181
- mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
197
+ mark_hostname_issue(
198
+ hostname, search_type, str(e) if "e" in dir() else "Error occurred"
199
+ )
182
200
  try:
183
- imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
201
+ imdb_link = soup.find(
202
+ "a", href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE)
203
+ )
184
204
  if imdb_link:
185
- release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
205
+ release_imdb_id = re.search(r"tt\d+", imdb_link["href"]).group()
186
206
  if imdb_id and release_imdb_id != imdb_id:
187
- debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
207
+ debug(
208
+ f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
209
+ )
188
210
  continue
189
211
  else:
190
212
  debug(f"{hostname}: imdb link not found for title {title}")
@@ -194,22 +216,29 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
194
216
 
195
217
  password = None
196
218
  payload = urlsafe_b64encode(
197
- f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode("utf-8")).decode()
198
- link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
199
-
200
- releases.append({
201
- "details": {
202
- "title": title,
203
- "hostname": hostname,
204
- "imdb_id": release_imdb_id,
205
- "link": link,
206
- "mirror": mirror,
207
- "size": size,
208
- "date": published,
209
- "source": source
210
- },
211
- "type": "protected"
212
- })
219
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode(
220
+ "utf-8"
221
+ )
222
+ ).decode()
223
+ link = (
224
+ f"{shared_state.values['internal_address']}/download/?payload={payload}"
225
+ )
226
+
227
+ releases.append(
228
+ {
229
+ "details": {
230
+ "title": title,
231
+ "hostname": hostname,
232
+ "imdb_id": release_imdb_id,
233
+ "link": link,
234
+ "mirror": mirror,
235
+ "size": size,
236
+ "date": published,
237
+ "source": source,
238
+ },
239
+ "type": "protected",
240
+ }
241
+ )
213
242
  except Exception as e:
214
243
  debug(f"{hostname}: error parsing search result: {e}")
215
244
  continue