quasarr 2.4.11__py3-none-any.whl → 2.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -0,0 +1,515 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ import warnings
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup, XMLParsedAsHTMLWarning
13
+
14
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
15
+
16
+ warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
17
+ from quasarr.providers.log import debug, info
18
+
19
+ hostname = "hs"
20
+ supported_mirrors = ["rapidgator", "ddownload", "katfile"]
21
+
22
+ FILECRYPT_REGEX = re.compile(r"filecrypt\.(?:cc|co|to)/container/", re.I)
23
+ SIZE_REGEX = re.compile(r"Größe[:\s]*(\d+(?:[.,]\d+)?)\s*(MB|GB|TB)", re.I)
24
+ IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)", re.I)
25
+ DATE_REGEX = re.compile(r"(\d{2}\.\d{2}\.\d{2}),?\s*(\d{1,2}:\d{2})")
26
+ # Pattern to extract individual episode release names from text
27
+ # Matches: Title.S02E03.Info-GROUP (group name starts after hyphen)
28
+ EPISODE_EXTRACT_REGEX = re.compile(
29
+ r"([A-Za-z][A-Za-z0-9.]+\.S\d{2}E\d{2}[A-Za-z0-9.]*-[A-Za-z][A-Za-z0-9]*)", re.I
30
+ )
31
+ # Pattern to clean trailing common words that may be attached to group names
32
+ # e.g., -WAYNEAvg -> -WAYNE, -GROUPBitrate -> -GROUP
33
+ TRAILING_GARBAGE_PATTERN = re.compile(
34
+ r"(Avg|Bitrate|Size|Größe|Video|Audio|Duration|Release|Info).*$", re.I
35
+ )
36
+ # Pattern to extract average bitrate (e.g., "Avg. Bitrate: 10,6 Mb/s" or "6 040 kb/s")
37
+ # Note: Numbers may contain spaces as thousand separators (e.g., "6 040")
38
+ BITRATE_REGEX = re.compile(
39
+ r"(?:Avg\.?\s*)?Bitrate[:\s]*([\d\s]+(?:[.,]\d+)?)\s*(kb/s|Mb/s|mb/s)", re.I
40
+ )
41
+ # Pattern to extract episode duration (e.g., "Dauer: 60 Min. pro Folge")
42
+ EPISODE_DURATION_REGEX = re.compile(r"Dauer[:\s]*(\d+)\s*Min\.?\s*pro\s*Folge", re.I)
43
+
44
+
45
+ def convert_to_rss_date(date_str):
46
+ """
47
+ HS date format from search: 'dd.mm.yy, HH:MM' e.g. '05.07.25, 17:23'
48
+ """
49
+ match = DATE_REGEX.search(date_str)
50
+ if match:
51
+ date_part = match.group(1)
52
+ time_part = match.group(2)
53
+ dt_obj = datetime.strptime(f"{date_part} {time_part}", "%d.%m.%y %H:%M")
54
+ return dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
55
+ return ""
56
+
57
+
58
+ def convert_rss_pubdate(pubdate_str):
59
+ """
60
+ RSS feed pubDate format: 'Mon, 26 Jan 2026 18:53:59 +0000'
61
+ """
62
+ try:
63
+ # Already in RSS format, just clean it up
64
+ dt_obj = datetime.strptime(pubdate_str.strip(), "%a, %d %b %Y %H:%M:%S %z")
65
+ return dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
66
+ except Exception:
67
+ return pubdate_str
68
+
69
+
70
+ def extract_size_from_text(text):
71
+ """Extract size in MB from text like 'Größe: 40883 MB'"""
72
+ match = SIZE_REGEX.search(text)
73
+ if match:
74
+ size_val = float(match.group(1).replace(",", "."))
75
+ unit = match.group(2).upper()
76
+ if unit == "GB":
77
+ return int(size_val * 1024)
78
+ elif unit == "TB":
79
+ return int(size_val * 1024 * 1024)
80
+ return int(size_val)
81
+ return 0
82
+
83
+
84
+ def extract_episode_size_mb(text):
85
+ """
86
+ Calculate episode size from average bitrate and per-episode duration.
87
+
88
+ Returns size in MB if both bitrate and per-episode duration are found,
89
+ otherwise returns None (caller should use total size).
90
+
91
+ Formula: size_mb = bitrate_mbps * duration_min * 60 / 8
92
+ """
93
+ # Check if this is per-episode duration (contains "pro Folge")
94
+ duration_match = EPISODE_DURATION_REGEX.search(text)
95
+ if not duration_match:
96
+ return None
97
+
98
+ duration_min = int(duration_match.group(1))
99
+
100
+ # Extract bitrate
101
+ bitrate_match = BITRATE_REGEX.search(text)
102
+ if not bitrate_match:
103
+ return None
104
+
105
+ # Parse bitrate value - handle various formats:
106
+ # - "6 040" (space as thousand separator)
107
+ # - "84,284" (comma as thousand separator - 3 digits after comma)
108
+ # - "20,8" or "10,6" (comma as decimal separator - not 3 digits after comma)
109
+ bitrate_str = bitrate_match.group(1).strip()
110
+
111
+ # Remove spaces (thousand separator)
112
+ bitrate_str = bitrate_str.replace(" ", "")
113
+
114
+ # Handle comma: thousand separator if exactly 3 digits follow, otherwise decimal
115
+ if "," in bitrate_str:
116
+ parts = bitrate_str.split(",")
117
+ if len(parts) == 2 and len(parts[1]) == 3 and parts[1].isdigit():
118
+ # Comma is thousand separator (e.g., "84,284" -> "84284")
119
+ bitrate_str = bitrate_str.replace(",", "")
120
+ else:
121
+ # Comma is decimal separator (e.g., "20,8" -> "20.8")
122
+ bitrate_str = bitrate_str.replace(",", ".")
123
+
124
+ bitrate_val = float(bitrate_str)
125
+ bitrate_unit = bitrate_match.group(2).lower()
126
+
127
+ # Convert to Mb/s
128
+ if bitrate_unit == "kb/s":
129
+ bitrate_mbps = bitrate_val / 1000
130
+ else: # mb/s
131
+ bitrate_mbps = bitrate_val
132
+
133
+ # Calculate size: bitrate (Mb/s) * duration (s) / 8 = MB
134
+ episode_size_mb = int(bitrate_mbps * duration_min * 60 / 8)
135
+ return episode_size_mb
136
+
137
+
138
+ def normalize_mirror_name(name):
139
+ """Normalize mirror names - ddlto/ddl.to -> ddownload"""
140
+ name_lower = name.lower().strip()
141
+ if "ddlto" in name_lower or "ddl.to" in name_lower or "ddownload" in name_lower:
142
+ return "ddownload"
143
+ if "rapidgator" in name_lower:
144
+ return "rapidgator"
145
+ if "katfile" in name_lower:
146
+ return "katfile"
147
+ return name_lower
148
+
149
+
150
+ def build_search_url(base_url, search_term):
151
+ """Build the ASP search URL with all required parameters"""
152
+ params = {
153
+ "s": search_term,
154
+ "asp_active": "1",
155
+ "p_asid": "1",
156
+ "p_asp_data": "1",
157
+ "current_page_id": "162309",
158
+ "qtranslate_lang": "0",
159
+ "filters_changed": "0",
160
+ "filters_initial": "1",
161
+ "asp_gen[]": ["title", "content", "excerpt"],
162
+ "customset[]": "post",
163
+ "termset[category][]": ["10", "13", "14", "15"],
164
+ "termset[formate][]": [
165
+ "41",
166
+ "42",
167
+ "43",
168
+ "44",
169
+ "45",
170
+ "46",
171
+ "47",
172
+ "48",
173
+ "49",
174
+ "50",
175
+ "51",
176
+ "52",
177
+ "53",
178
+ "54",
179
+ "55",
180
+ ],
181
+ }
182
+
183
+ # Build URL manually to handle multiple values for same key
184
+ parts = []
185
+ for key, value in params.items():
186
+ if isinstance(value, list):
187
+ for v in value:
188
+ parts.append(f"{key}={v}")
189
+ else:
190
+ parts.append(f"{key}={value}")
191
+
192
+ return f"{base_url}/?{'&'.join(parts)}"
193
+
194
+
195
+ def _parse_search_results(
196
+ soup,
197
+ shared_state,
198
+ hd_host,
199
+ password,
200
+ mirror_filter,
201
+ request_from,
202
+ search_string,
203
+ season,
204
+ episode,
205
+ ):
206
+ """Parse search results page and extract releases with filecrypt links.
207
+
208
+ Also extracts individual episode titles from season packs when available.
209
+ For episodes, calculates size from bitrate × duration instead of using total pack size.
210
+ """
211
+ releases = []
212
+
213
+ # Find all result entries - they appear as sections with date/title headers
214
+ # Pattern: "dd.mm.yy, HH:MM · [Title](url)"
215
+ for article in soup.find_all(
216
+ ["article", "div"],
217
+ class_=lambda x: x and ("post" in str(x).lower() or "result" in str(x).lower()),
218
+ ):
219
+ try:
220
+ # Find the title link
221
+ title_link = article.find(
222
+ "a",
223
+ href=lambda h: h
224
+ and hd_host in h
225
+ and ("/filme/" in h or "/serien/" in h),
226
+ )
227
+ if not title_link:
228
+ continue
229
+
230
+ main_title = title_link.get_text(strip=True)
231
+ if not main_title or len(main_title) < 5:
232
+ continue
233
+
234
+ # Replace spaces with dots for release name format
235
+ main_title = main_title.replace(" ", ".")
236
+
237
+ source = title_link["href"]
238
+
239
+ # Extract size from article content
240
+ article_text = article.get_text()
241
+ total_mb = extract_size_from_text(article_text)
242
+ total_size_bytes = int(total_mb * 1024 * 1024) if total_mb else 0
243
+
244
+ # Calculate episode size from bitrate and duration (if available)
245
+ episode_mb = extract_episode_size_mb(article_text)
246
+ episode_size_bytes = int(episode_mb * 1024 * 1024) if episode_mb else None
247
+
248
+ # Extract date
249
+ published = convert_to_rss_date(article_text)
250
+
251
+ # Extract IMDb ID if present
252
+ imdb_match = IMDB_REGEX.search(str(article))
253
+ imdb_id = imdb_match.group(1) if imdb_match else None
254
+
255
+ # Collect all titles to create releases for (episodes first, main title last)
256
+ episode_titles = []
257
+
258
+ # Extract individual episode titles from article content
259
+ # Episodes may appear on separate lines or concatenated without separators
260
+ # Use findall to get all matches, then clean up any trailing garbage
261
+ for ep_match in EPISODE_EXTRACT_REGEX.findall(article_text):
262
+ ep_title = ep_match.strip()
263
+ # Clean trailing common words that may be attached to group name
264
+ # e.g., "Title.S02E01-WAYNEAvg" -> "Title.S02E01-WAYNE"
265
+ ep_title = TRAILING_GARBAGE_PATTERN.sub("", ep_title)
266
+ if ep_title and len(ep_title) > 10:
267
+ episode_titles.append(ep_title)
268
+
269
+ # Remove duplicate episodes while preserving order
270
+ seen = set()
271
+ unique_episodes = []
272
+ for t in episode_titles:
273
+ t_lower = t.lower()
274
+ if t_lower not in seen:
275
+ seen.add(t_lower)
276
+ unique_episodes.append(t)
277
+
278
+ # Create releases for individual episodes (use calculated episode size)
279
+ for title in unique_episodes:
280
+ # Validate release against search criteria
281
+ if not shared_state.is_valid_release(
282
+ title, request_from, search_string, season, episode
283
+ ):
284
+ continue
285
+
286
+ # Use calculated episode size if available, otherwise fall back to total
287
+ ep_mb = episode_mb if episode_mb else total_mb
288
+ ep_size = episode_size_bytes if episode_size_bytes else total_size_bytes
289
+
290
+ payload = urlsafe_b64encode(
291
+ f"{title}|{source}|{mirror_filter}|{ep_mb}|{password}|{imdb_id}|{hostname}".encode()
292
+ ).decode()
293
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
294
+
295
+ releases.append(
296
+ {
297
+ "details": {
298
+ "title": title,
299
+ "hostname": hostname,
300
+ "imdb_id": imdb_id,
301
+ "link": link,
302
+ "mirror": mirror_filter,
303
+ "size": ep_size,
304
+ "date": published,
305
+ "source": source,
306
+ },
307
+ "type": "protected",
308
+ }
309
+ )
310
+
311
+ # Also add the main title (season pack) with full size - if not duplicate
312
+ if main_title.lower() not in seen:
313
+ if shared_state.is_valid_release(
314
+ main_title, request_from, search_string, season, episode
315
+ ):
316
+ payload = urlsafe_b64encode(
317
+ f"{main_title}|{source}|{mirror_filter}|{total_mb}|{password}|{imdb_id}|{hostname}".encode()
318
+ ).decode()
319
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
320
+
321
+ releases.append(
322
+ {
323
+ "details": {
324
+ "title": main_title,
325
+ "hostname": hostname,
326
+ "imdb_id": imdb_id,
327
+ "link": link,
328
+ "mirror": mirror_filter,
329
+ "size": total_size_bytes,
330
+ "date": published,
331
+ "source": source,
332
+ },
333
+ "type": "protected",
334
+ }
335
+ )
336
+
337
+ except Exception as e:
338
+ debug(f"Error parsing {hostname.upper()} search result: {e}")
339
+ continue
340
+
341
+ return releases
342
+
343
+
344
+ def hs_feed(shared_state, start_time, request_from, mirror=None):
345
+ """Return recent releases from HS feed"""
346
+ releases = []
347
+ hs = shared_state.values["config"]("Hostnames").get(hostname)
348
+ password = hs
349
+
350
+ if not hs:
351
+ return releases
352
+
353
+ # HS only supports movies and series
354
+ if "lazylibrarian" in request_from.lower():
355
+ debug(
356
+ f'Skipping {request_from} feed on "{hostname.upper()}" (unsupported media type)!'
357
+ )
358
+ return releases
359
+
360
+ if mirror and mirror.lower() not in supported_mirrors:
361
+ debug(
362
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping feed!'
363
+ )
364
+ return releases
365
+
366
+ base_url = f"https://{hs}"
367
+ feed_url = f"{base_url}/feed/"
368
+ headers = {"User-Agent": shared_state.values["user_agent"]}
369
+
370
+ try:
371
+ r = requests.get(feed_url, headers=headers, timeout=30)
372
+ r.raise_for_status()
373
+
374
+ # Parse RSS - use html.parser to avoid lxml dependency
375
+ soup = BeautifulSoup(r.content, "html.parser")
376
+ items = soup.find_all("item")
377
+
378
+ for item in items:
379
+ try:
380
+ title_elem = item.find("title")
381
+ link_elem = item.find("link")
382
+ pubdate_elem = item.find("pubdate") # html.parser lowercases tags
383
+
384
+ if not title_elem or not link_elem:
385
+ continue
386
+
387
+ title = title_elem.get_text(strip=True)
388
+ # html.parser treats <link> as void element, URL is in next_sibling
389
+ source = link_elem.get_text(strip=True)
390
+ if not source and link_elem.next_sibling:
391
+ source = link_elem.next_sibling.strip()
392
+
393
+ if not source:
394
+ continue
395
+
396
+ # Replace spaces with dots (titles may already have dots)
397
+ title = title.replace(" ", ".")
398
+
399
+ published = ""
400
+ if pubdate_elem:
401
+ published = convert_rss_pubdate(pubdate_elem.get_text(strip=True))
402
+
403
+ # Feed doesn't include size, set to 0
404
+ mb = 0
405
+ size_bytes = 0
406
+ imdb_id = None
407
+
408
+ payload = urlsafe_b64encode(
409
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode()
410
+ ).decode()
411
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
412
+
413
+ releases.append(
414
+ {
415
+ "details": {
416
+ "title": title,
417
+ "hostname": hostname,
418
+ "imdb_id": imdb_id,
419
+ "link": link,
420
+ "mirror": mirror,
421
+ "size": size_bytes,
422
+ "date": published,
423
+ "source": source,
424
+ },
425
+ "type": "protected",
426
+ }
427
+ )
428
+
429
+ except Exception as e:
430
+ debug(f"Error parsing {hostname.upper()} feed item: {e}")
431
+ continue
432
+
433
+ except Exception as e:
434
+ info(f"Error loading {hostname.upper()} feed: {e}")
435
+ mark_hostname_issue(hostname, "feed", str(e))
436
+ return releases
437
+
438
+ elapsed_time = time.time() - start_time
439
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
440
+
441
+ if releases:
442
+ clear_hostname_issue(hostname)
443
+ return releases
444
+
445
+
446
+ def hs_search(
447
+ shared_state,
448
+ start_time,
449
+ request_from,
450
+ search_string,
451
+ mirror=None,
452
+ season=None,
453
+ episode=None,
454
+ ):
455
+ """Search HS for releases by IMDb ID"""
456
+ releases = []
457
+ hs = shared_state.values["config"]("Hostnames").get(hostname)
458
+ password = hs
459
+
460
+ if not hs:
461
+ return releases
462
+
463
+ # HS only supports movies and series
464
+ if "lazylibrarian" in request_from.lower():
465
+ debug(
466
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
467
+ )
468
+ return releases
469
+
470
+ if mirror and mirror.lower() not in supported_mirrors:
471
+ debug(
472
+ f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!'
473
+ )
474
+ return releases
475
+
476
+ # HS supports direct IMDb ID search
477
+ imdb_id = shared_state.is_imdb_id(search_string)
478
+ if not imdb_id:
479
+ debug(
480
+ f'"{hostname.upper()}" only supports IMDb ID search, got: {search_string}'
481
+ )
482
+ return releases
483
+
484
+ base_url = f"https://{hs}"
485
+ search_url = build_search_url(base_url, imdb_id)
486
+ headers = {"User-Agent": shared_state.values["user_agent"]}
487
+
488
+ try:
489
+ r = requests.get(search_url, headers=headers, timeout=30)
490
+ r.raise_for_status()
491
+
492
+ soup = BeautifulSoup(r.content, "html.parser")
493
+ releases = _parse_search_results(
494
+ soup,
495
+ shared_state,
496
+ hs,
497
+ password,
498
+ mirror,
499
+ request_from,
500
+ search_string,
501
+ season,
502
+ episode,
503
+ )
504
+
505
+ except Exception as e:
506
+ info(f"Error loading {hostname.upper()} search: {e}")
507
+ mark_hostname_issue(hostname, "search", str(e))
508
+ return releases
509
+
510
+ elapsed_time = time.time() - start_time
511
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
512
+
513
+ if releases:
514
+ clear_hostname_issue(hostname)
515
+ return releases
@@ -2,7 +2,6 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
- import html
6
5
  import re
7
6
  import time
8
7
  from base64 import urlsafe_b64encode
@@ -13,7 +12,6 @@ import requests
13
12
  from bs4 import BeautifulSoup
14
13
 
15
14
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
17
15
  from quasarr.providers.log import debug, info
18
16
 
19
17
  hostname = "mb"
@@ -217,11 +215,7 @@ def mb_search(
217
215
  password = mb
218
216
  imdb_id = shared_state.is_imdb_id(search_string)
219
217
  if imdb_id:
220
- title = get_localized_title(shared_state, imdb_id, "de")
221
- if not title:
222
- info(f"Could not extract title from IMDb-ID {imdb_id}")
223
- return []
224
- search_string = html.unescape(title)
218
+ search_string = imdb_id
225
219
 
226
220
  q = quote_plus(search_string)
227
221
  url = f"https://{mb}/?s={q}&id=20&post_type=post"
@@ -9,7 +9,7 @@ from base64 import urlsafe_b64encode
9
9
  import requests
10
10
 
11
11
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
12
- from quasarr.providers.imdb_metadata import get_localized_title
12
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
13
13
  from quasarr.providers.log import debug, info
14
14
 
15
15
  hostname = "nx"
@@ -150,6 +150,9 @@ def nx_search(
150
150
  info(f"Could not extract title from IMDb-ID {imdb_id}")
151
151
  return releases
152
152
  search_string = html.unescape(search_string)
153
+ if not season:
154
+ if year := get_year(imdb_id):
155
+ search_string += f" {year}"
153
156
 
154
157
  url = f"https://{nx}/api/frontend/search/{search_string}"
155
158
  headers = {
@@ -13,7 +13,7 @@ import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
17
  from quasarr.providers.log import debug, info
18
18
 
19
19
  hostname = "wd"
@@ -209,6 +209,9 @@ def wd_search(
209
209
  info(f"Could not extract title from IMDb-ID {imdb_id}")
210
210
  return releases
211
211
  search_string = html.unescape(search_string)
212
+ if not season:
213
+ if year := get_year(imdb_id):
214
+ search_string += f" {year}"
212
215
 
213
216
  q = quote_plus(search_string)
214
217
  url = f"https://{wd}/search?q={q}"
@@ -13,7 +13,7 @@ import requests
13
13
  from bs4 import BeautifulSoup, XMLParsedAsHTMLWarning
14
14
 
15
15
  from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
17
  from quasarr.providers.log import debug, info
18
18
 
19
19
  warnings.filterwarnings(
@@ -190,7 +190,7 @@ def wx_search(
190
190
  "selectedGenres": "",
191
191
  "types": "movie,series,anime",
192
192
  "genres": "",
193
- "years": "",
193
+ "years": year if (year := get_year(imdb_id)) else "",
194
194
  "ratings": "",
195
195
  "page": 1,
196
196
  "sortBy": "latest",
@@ -244,13 +244,15 @@ def wx_search(
244
244
  else:
245
245
  detail_item = detail_data
246
246
 
247
- item_imdb_id = imdb_id
248
- if not item_imdb_id:
249
- item_imdb_id = detail_item.get("imdb_id") or detail_item.get(
250
- "imdbid"
247
+ item_imdb_id = detail_item.get("imdb_id") or detail_item.get("imdbid")
248
+ if not item_imdb_id and "options" in detail_item:
249
+ item_imdb_id = detail_item["options"].get("imdb_id")
250
+
251
+ if item_imdb_id and imdb_id and item_imdb_id != imdb_id:
252
+ debug(
253
+ f"{hostname.upper()}: IMDb-ID mismatch ({imdb_id} != {item_imdb_id}), skipping item"
251
254
  )
252
- if not item_imdb_id and "options" in detail_item:
253
- item_imdb_id = detail_item["options"].get("imdb_id")
255
+ continue
254
256
 
255
257
  source = f"https://{host}/detail/{uid}"
256
258
 
quasarr/storage/config.py CHANGED
@@ -38,6 +38,7 @@ class Config(object):
38
38
  ("dw", "secret", ""),
39
39
  ("fx", "secret", ""),
40
40
  ("he", "secret", ""),
41
+ ("hs", "secret", ""),
41
42
  ("mb", "secret", ""),
42
43
  ("nk", "secret", ""),
43
44
  ("nx", "secret", ""),