quasarr 1.4.1__py3-none-any.whl → 1.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (67) hide show
  1. quasarr/__init__.py +157 -67
  2. quasarr/api/__init__.py +126 -43
  3. quasarr/api/arr/__init__.py +197 -78
  4. quasarr/api/captcha/__init__.py +885 -39
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +84 -22
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +236 -487
  9. quasarr/downloads/linkcrypters/al.py +237 -0
  10. quasarr/downloads/linkcrypters/filecrypt.py +178 -31
  11. quasarr/downloads/linkcrypters/hide.py +123 -0
  12. quasarr/downloads/packages/__init__.py +461 -0
  13. quasarr/downloads/sources/al.py +697 -0
  14. quasarr/downloads/sources/by.py +106 -0
  15. quasarr/downloads/sources/dd.py +6 -78
  16. quasarr/downloads/sources/dj.py +7 -0
  17. quasarr/downloads/sources/dt.py +1 -1
  18. quasarr/downloads/sources/dw.py +2 -2
  19. quasarr/downloads/sources/he.py +112 -0
  20. quasarr/downloads/sources/mb.py +47 -0
  21. quasarr/downloads/sources/nk.py +51 -0
  22. quasarr/downloads/sources/nx.py +36 -81
  23. quasarr/downloads/sources/sf.py +27 -4
  24. quasarr/downloads/sources/sj.py +7 -0
  25. quasarr/downloads/sources/sl.py +90 -0
  26. quasarr/downloads/sources/wd.py +110 -0
  27. quasarr/providers/cloudflare.py +204 -0
  28. quasarr/providers/html_images.py +20 -0
  29. quasarr/providers/html_templates.py +48 -39
  30. quasarr/providers/imdb_metadata.py +15 -2
  31. quasarr/providers/myjd_api.py +34 -5
  32. quasarr/providers/notifications.py +30 -5
  33. quasarr/providers/obfuscated.py +35 -0
  34. quasarr/providers/sessions/__init__.py +0 -0
  35. quasarr/providers/sessions/al.py +286 -0
  36. quasarr/providers/sessions/dd.py +78 -0
  37. quasarr/providers/sessions/nx.py +76 -0
  38. quasarr/providers/shared_state.py +347 -20
  39. quasarr/providers/statistics.py +154 -0
  40. quasarr/providers/version.py +1 -1
  41. quasarr/search/__init__.py +112 -36
  42. quasarr/search/sources/al.py +448 -0
  43. quasarr/search/sources/by.py +203 -0
  44. quasarr/search/sources/dd.py +17 -6
  45. quasarr/search/sources/dj.py +213 -0
  46. quasarr/search/sources/dt.py +37 -7
  47. quasarr/search/sources/dw.py +27 -47
  48. quasarr/search/sources/fx.py +27 -29
  49. quasarr/search/sources/he.py +196 -0
  50. quasarr/search/sources/mb.py +195 -0
  51. quasarr/search/sources/nk.py +188 -0
  52. quasarr/search/sources/nx.py +22 -6
  53. quasarr/search/sources/sf.py +143 -151
  54. quasarr/search/sources/sj.py +213 -0
  55. quasarr/search/sources/sl.py +246 -0
  56. quasarr/search/sources/wd.py +208 -0
  57. quasarr/storage/config.py +20 -4
  58. quasarr/storage/setup.py +216 -51
  59. quasarr-1.20.4.dist-info/METADATA +304 -0
  60. quasarr-1.20.4.dist-info/RECORD +72 -0
  61. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/WHEEL +1 -1
  62. quasarr/providers/tvmaze_metadata.py +0 -23
  63. quasarr-1.4.1.dist-info/METADATA +0 -174
  64. quasarr-1.4.1.dist-info/RECORD +0 -43
  65. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/entry_points.txt +0 -0
  66. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/licenses/LICENSE +0 -0
  67. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/top_level.txt +0 -0
@@ -15,18 +15,6 @@ hostname = "fx"
15
15
  supported_mirrors = ["rapidgator"]
16
16
 
17
17
 
18
- def sanitize_title(title):
19
- umlaut_map = {
20
- "Ä": "Ae", "ä": "ae",
21
- "Ö": "Oe", "ö": "oe",
22
- "Ü": "Ue", "ü": "ue",
23
- "ß": "ss"
24
- }
25
- for umlaut, replacement in umlaut_map.items():
26
- title = title.replace(umlaut, replacement)
27
- return title.encode("ascii", errors="ignore").decode().replace("/", "").replace(" ", ".").strip()
28
-
29
-
30
18
  def extract_size(text):
31
19
  match = re.match(r"(\d+)\s*([A-Za-z]+)", text)
32
20
  if match:
@@ -37,11 +25,16 @@ def extract_size(text):
37
25
  raise ValueError(f"Invalid size format: {text}")
38
26
 
39
27
 
40
- def fx_feed(shared_state, start_time, mirror=None):
28
+ def fx_feed(shared_state, start_time, request_from, mirror=None):
41
29
  releases = []
42
30
 
43
31
  fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
44
32
 
33
+ if not "arr" in request_from.lower():
34
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
35
+ return releases
36
+
37
+
45
38
  if mirror and mirror not in supported_mirrors:
46
39
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
47
40
  ' Skipping search!')
@@ -73,7 +66,7 @@ def fx_feed(shared_state, start_time, mirror=None):
73
66
  i = 0
74
67
  for title in titles:
75
68
  link = title["href"]
76
- title = sanitize_title(title.text)
69
+ title = shared_state.sanitize_title(title.text)
77
70
 
78
71
  try:
79
72
  imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
@@ -118,22 +111,26 @@ def fx_feed(shared_state, start_time, mirror=None):
118
111
  info(f"Error parsing {hostname.upper()} feed: {e}")
119
112
 
120
113
  elapsed_time = time.time() - start_time
121
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
114
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
122
115
 
123
116
  return releases
124
117
 
125
118
 
126
- def fx_search(shared_state, start_time, search_string, mirror=None):
119
+ def fx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
127
120
  releases = []
128
-
129
121
  fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
122
+ password = fx.split(".")[0]
123
+
124
+ if not "arr" in request_from.lower():
125
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
126
+ return releases
127
+
130
128
 
131
129
  if mirror and mirror not in supported_mirrors:
132
130
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
133
131
  ' Skipping search!')
134
132
  return releases
135
133
 
136
- password = fx.split(".")[0]
137
134
  url = f'https://{fx}/?s={search_string}'
138
135
  headers = {
139
136
  'User-Agent': shared_state.values["user_agent"],
@@ -148,8 +145,6 @@ def fx_search(shared_state, start_time, search_string, mirror=None):
148
145
  info(f"Error loading {hostname.upper()} feed: {e}")
149
146
  return releases
150
147
 
151
- imdb_id = shared_state.is_imdb_id(search_string)
152
-
153
148
  if results:
154
149
  for result in results:
155
150
  try:
@@ -171,17 +166,20 @@ def fx_search(shared_state, start_time, search_string, mirror=None):
171
166
  i = 0
172
167
  for title in titles:
173
168
  link = title["href"]
174
- title = sanitize_title(title.text)
169
+ title = shared_state.sanitize_title(title.text)
175
170
 
176
- if not imdb_id and not shared_state.search_string_in_sanitized_title(search_string, title):
171
+ if not shared_state.is_valid_release(title,
172
+ request_from,
173
+ search_string,
174
+ season,
175
+ episode):
177
176
  continue
178
177
 
179
- if not imdb_id:
180
- try:
181
- imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
182
- imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
183
- except:
184
- imdb_id = None
178
+ try:
179
+ imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
180
+ imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
181
+ except:
182
+ imdb_id = None
185
183
 
186
184
  try:
187
185
  size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
@@ -220,6 +218,6 @@ def fx_search(shared_state, start_time, search_string, mirror=None):
220
218
  info(f"Error parsing {hostname.upper()} search: {e}")
221
219
 
222
220
  elapsed_time = time.time() - start_time
223
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
221
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
224
222
 
225
223
  return releases
@@ -0,0 +1,196 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from datetime import datetime, timedelta
9
+ from html import unescape
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+
17
+ hostname = "he"
18
+ supported_mirrors = ["rapidgator", "nitroflare"]
19
+
20
+
21
+ def parse_posted_ago(txt):
22
+ try:
23
+ m = re.search(r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE)
24
+ if not m:
25
+ return ''
26
+ value = int(m.group(1))
27
+ unit = m.group(2).lower()
28
+ now = datetime.utcnow()
29
+ if unit.startswith('sec'):
30
+ delta = timedelta(seconds=value)
31
+ elif unit.startswith('min'):
32
+ delta = timedelta(minutes=value)
33
+ elif unit.startswith('hour'):
34
+ delta = timedelta(hours=value)
35
+ elif unit.startswith('day'):
36
+ delta = timedelta(days=value)
37
+ elif unit.startswith('week'):
38
+ delta = timedelta(weeks=value)
39
+ elif unit.startswith('month'):
40
+ delta = timedelta(days=30 * value)
41
+ else:
42
+ delta = timedelta(days=365 * value)
43
+ return (datetime.utcnow() - delta).strftime("%a, %d %b %Y %H:%M:%S +0000")
44
+ except Exception:
45
+ return ''
46
+
47
+
48
+ def extract_size(text: str) -> dict:
49
+ match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
50
+ if match:
51
+ size = match.group(1).replace(',', '.')
52
+ unit = match.group(2)
53
+ return {"size": size, "sizeunit": unit}
54
+ return {"size": "0", "sizeunit": "MB"}
55
+
56
+
57
+ def he_feed(*args, **kwargs):
58
+ return he_search(*args, **kwargs)
59
+
60
+
61
+ def he_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
62
+ releases = []
63
+ host = shared_state.values["config"]("Hostnames").get(hostname)
64
+
65
+ if not "arr" in request_from.lower():
66
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
67
+ return releases
68
+
69
+ if "radarr" in request_from.lower():
70
+ tag = "movies"
71
+ else:
72
+ tag = "tv-shows"
73
+
74
+ if mirror and mirror not in supported_mirrors:
75
+ debug(f'Mirror "{mirror}" not supported by {hostname}.')
76
+ return releases
77
+
78
+ source_search = ""
79
+ if search_string != "":
80
+ imdb_id = shared_state.is_imdb_id(search_string)
81
+ if imdb_id:
82
+ local_title = get_localized_title(shared_state, imdb_id, 'en')
83
+ if not local_title:
84
+ info(f"{hostname}: no title for IMDb {imdb_id}")
85
+ return releases
86
+ source_search = local_title
87
+ else:
88
+ return releases
89
+ source_search = unescape(source_search)
90
+ else:
91
+ imdb_id = None
92
+
93
+ url = f'https://{host}/tag/{tag}/'
94
+
95
+ headers = {"User-Agent": shared_state.values["user_agent"]}
96
+ params = {"s": source_search}
97
+
98
+ try:
99
+ r = requests.get(url, headers=headers, params=params, timeout=10)
100
+ soup = BeautifulSoup(r.content, 'html.parser')
101
+ results = soup.find_all('div', class_='item')
102
+ except Exception as e:
103
+ info(f"{hostname}: search load error: {e}")
104
+ return releases
105
+
106
+ if not results:
107
+ return releases
108
+
109
+ for result in results:
110
+ try:
111
+ data = result.find('div', class_='data')
112
+ if not data:
113
+ continue
114
+
115
+ headline = data.find('h5')
116
+ if not headline:
117
+ continue
118
+
119
+ a = headline.find('a', href=True)
120
+ if not a:
121
+ continue
122
+
123
+ source = a['href'].strip()
124
+
125
+ head_title = a.get_text(strip=True)
126
+ if not head_title:
127
+ continue
128
+
129
+ head_split = head_title.split(" – ")
130
+ title = head_split[0].strip()
131
+
132
+ if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
133
+ continue
134
+
135
+ size_item = extract_size(head_split[1].strip())
136
+ mb = shared_state.convert_to_mb(size_item)
137
+
138
+ size = mb * 1024 * 1024
139
+
140
+ published = None
141
+ p_meta = data.find('p', class_='meta')
142
+ if p_meta:
143
+ posted_span = None
144
+ for sp in p_meta.find_all('span'):
145
+ txt = sp.get_text(' ', strip=True)
146
+ if txt.lower().startswith('posted') or 'ago' in txt.lower():
147
+ posted_span = txt
148
+ break
149
+
150
+ if posted_span:
151
+ published = parse_posted_ago(posted_span)
152
+
153
+ if published is None:
154
+ continue
155
+
156
+ release_imdb_id = None
157
+ try:
158
+ r = requests.get(source, headers=headers, timeout=10)
159
+ soup = BeautifulSoup(r.content, 'html.parser')
160
+ imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
161
+ if imdb_link:
162
+ release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
163
+ if imdb_id and release_imdb_id != imdb_id:
164
+ debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
165
+ continue
166
+ else:
167
+ debug(f"{hostname}: imdb link not found for title {title}")
168
+ except Exception as e:
169
+ debug(f"{hostname}: failed to determine imdb_id for title {title}")
170
+ continue
171
+
172
+ password = None
173
+ payload = urlsafe_b64encode(
174
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
175
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
176
+
177
+ releases.append({
178
+ "details": {
179
+ "title": title,
180
+ "hostname": hostname,
181
+ "imdb_id": release_imdb_id,
182
+ "link": link,
183
+ "mirror": mirror,
184
+ "size": size,
185
+ "date": published,
186
+ "source": source
187
+ },
188
+ "type": "protected"
189
+ })
190
+ except Exception as e:
191
+ debug(f"{hostname}: error parsing search result: {e}")
192
+ continue
193
+
194
+ elapsed = time.time() - start_time
195
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
196
+ return releases
@@ -0,0 +1,195 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+ from urllib.parse import quote_plus
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+
15
+ from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.log import info, debug
17
+
18
+ hostname = "mb"
19
+ supported_mirrors = ["rapidgator", "ddownload"]
20
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
21
+ RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
22
+ CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
23
+ IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
24
+
25
+ # map German month names to numbers
26
+ GERMAN_MONTHS = {
27
+ 'Januar': '01', 'Februar': '02', 'März': '03', 'April': '04', 'Mai': '05', 'Juni': '06',
28
+ 'Juli': '07', 'August': '08', 'September': '09', 'Oktober': '10', 'November': '11', 'Dezember': '12'
29
+ }
30
+
31
+
32
+ def convert_to_rss_date(date_str):
33
+ parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
34
+ return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
35
+
36
+
37
+ def extract_size(text):
38
+ m = re.match(r"(\d+(?:\.\d+)?)\s*([A-Za-z]+)", text)
39
+ if not m:
40
+ raise ValueError(f"Invalid size format: {text!r}")
41
+ return {"size": m.group(1), "sizeunit": m.group(2)}
42
+
43
+
44
+ def _parse_posts(soup, shared_state, password, mirror_filter,
45
+ is_search=False, request_from=None, search_string=None,
46
+ season=None, episode=None):
47
+ releases = []
48
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
49
+
50
+ for post in soup.select("div.post"):
51
+ try:
52
+ # title & source
53
+ h1 = post.find("h1")
54
+ a = h1.find("a")
55
+ source = a["href"].strip()
56
+ title = a.get_text(strip=True)
57
+
58
+ # parse date
59
+ date_p = post.find("p", class_="date_x")
60
+ date_txt = date_p.get_text(strip=True) if date_p else None
61
+ published = one_hour_ago
62
+ if date_txt:
63
+ m_date = re.search(r'(?:\w+, )?(\d{1,2})\.\s*(\w+)\s+(\d{4})\s+(\d{2}:\d{2})', date_txt)
64
+ if m_date:
65
+ day, mon_name, year, hm = m_date.groups()
66
+ mon = GERMAN_MONTHS.get(mon_name, '01')
67
+ dt_obj = datetime.strptime(f"{day}.{mon}.{year} {hm}", "%d.%m.%Y %H:%M")
68
+ published = dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
69
+
70
+ if is_search:
71
+ if not shared_state.is_valid_release(title,
72
+ request_from,
73
+ search_string,
74
+ season,
75
+ episode):
76
+ continue
77
+
78
+ # drop .XXX. unless user explicitly searched xxx
79
+ if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
80
+ continue
81
+ # require resolution/codec
82
+ if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
83
+ continue
84
+ # require no spaces in title
85
+ if " " in title:
86
+ continue
87
+
88
+ # can't check for mirrors in search context
89
+ if mirror_filter and mirror_filter not in supported_mirrors:
90
+ continue
91
+ else:
92
+ mirror_candidates = []
93
+ for strong in post.find_all('strong', string=re.compile(r'^Download', re.I)):
94
+ link_tag = strong.find_next_sibling('a')
95
+ if link_tag and link_tag.get_text(strip=True):
96
+ host = link_tag.get_text(strip=True).split('.')[0].lower()
97
+ mirror_candidates.append(host)
98
+ valid = [m for m in mirror_candidates if m in supported_mirrors]
99
+ if not valid or (mirror_filter and mirror_filter not in valid):
100
+ continue
101
+
102
+ # extract IMDb ID
103
+ imdb_id = None
104
+ for tag in post.find_all('a', href=True):
105
+ m = IMDB_REGEX.search(tag['href'])
106
+ if m:
107
+ imdb_id = m.group(1)
108
+ break
109
+
110
+ # size extraction
111
+ mb = size_bytes = 0
112
+ size_match = re.search(r"Größe:\s*([\d\.]+)\s*([GMK]B)", post.get_text())
113
+ if size_match:
114
+ sz = {"size": size_match.group(1), "sizeunit": size_match.group(2)}
115
+ mb = shared_state.convert_to_mb(sz)
116
+ size_bytes = mb * 1024 * 1024
117
+
118
+ payload = urlsafe_b64encode(
119
+ f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
120
+ ).decode()
121
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
122
+
123
+ releases.append({
124
+ "details": {
125
+ "title": title,
126
+ "hostname": hostname,
127
+ "imdb_id": imdb_id,
128
+ "link": link,
129
+ "mirror": mirror_filter,
130
+ "size": size_bytes,
131
+ "date": published,
132
+ "source": source
133
+ },
134
+ "type": "protected"
135
+ })
136
+ except Exception as e:
137
+ debug(f"Error parsing {hostname.upper()} post: {e}")
138
+ continue
139
+ return releases
140
+
141
+
142
+ def mb_feed(shared_state, start_time, request_from, mirror=None):
143
+ mb = shared_state.values["config"]("Hostnames").get(hostname)
144
+
145
+ if not "arr" in request_from.lower():
146
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
147
+ return []
148
+
149
+ password = mb
150
+ section = "neuerscheinungen" if "Radarr" in request_from else "serie"
151
+ url = f"https://{mb}/category/{section}/"
152
+ headers = {'User-Agent': shared_state.values["user_agent"]}
153
+ try:
154
+ html_doc = requests.get(url, headers=headers, timeout=10).content
155
+ soup = BeautifulSoup(html_doc, "html.parser")
156
+ releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
157
+ except Exception as e:
158
+ info(f"Error loading {hostname.upper()} feed: {e}")
159
+ releases = []
160
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
161
+ return releases
162
+
163
+
164
+ def mb_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
165
+ mb = shared_state.values["config"]("Hostnames").get(hostname)
166
+
167
+ if not "arr" in request_from.lower():
168
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
169
+ return []
170
+
171
+ password = mb
172
+ imdb_id = shared_state.is_imdb_id(search_string)
173
+ if imdb_id:
174
+ title = get_localized_title(shared_state, imdb_id, 'de')
175
+ if not title:
176
+ info(f"Could not extract title from IMDb-ID {imdb_id}")
177
+ return []
178
+ search_string = html.unescape(title)
179
+
180
+ q = quote_plus(search_string)
181
+ url = f"https://{mb}/?s={q}&id=20&post_type=post"
182
+ headers = {'User-Agent': shared_state.values["user_agent"]}
183
+ try:
184
+ html_doc = requests.get(url, headers=headers, timeout=10).content
185
+ soup = BeautifulSoup(html_doc, "html.parser")
186
+ releases = _parse_posts(
187
+ soup, shared_state, password, mirror_filter=mirror,
188
+ is_search=True, request_from=request_from,
189
+ search_string=search_string, season=season, episode=episode
190
+ )
191
+ except Exception as e:
192
+ info(f"Error loading {hostname.upper()} search: {e}")
193
+ releases = []
194
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
195
+ return releases