quasarr 2.2.0__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

quasarr/__init__.py CHANGED
@@ -100,38 +100,12 @@ def run():
100
100
  shared_state.update("database", DataBase)
101
101
  supported_hostnames = extract_allowed_keys(Config._DEFAULT_CONFIG, 'Hostnames')
102
102
  shared_state.update("sites", [key.upper() for key in supported_hostnames])
103
- shared_state.update("user_agent", "") # will be set by FlareSolverr or fallback
103
+ # Set fallback user agent immediately so it's available while background check runs
104
+ shared_state.update("user_agent", FALLBACK_USER_AGENT)
104
105
  shared_state.update("helper_active", False)
105
106
 
106
107
  print(f'Config path: "{config_path}"')
107
108
 
108
- # Check if FlareSolverr was previously skipped
109
- skip_flaresolverr_db = DataBase("skip_flaresolverr")
110
- flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
111
-
112
- flaresolverr_url = Config('FlareSolverr').get('url')
113
- if not flaresolverr_url and not flaresolverr_skipped:
114
- flaresolverr_config(shared_state)
115
- # Re-check after config - user may have skipped
116
- flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
117
- flaresolverr_url = Config('FlareSolverr').get('url')
118
-
119
- if flaresolverr_skipped:
120
- info('FlareSolverr setup skipped by user preference')
121
- info('Some sites (AL) will not work without FlareSolverr. Configure it later in the web UI.')
122
- # Set fallback user agent
123
- shared_state.update("user_agent", FALLBACK_USER_AGENT)
124
- print(f'User Agent (fallback): "{FALLBACK_USER_AGENT}"')
125
- elif flaresolverr_url:
126
- print(f'Flaresolverr URL: "{flaresolverr_url}"')
127
- flaresolverr_check = check_flaresolverr(shared_state, flaresolverr_url)
128
- if flaresolverr_check:
129
- print(f'User Agent: "{shared_state.values["user_agent"]}"')
130
- else:
131
- info('FlareSolverr check failed - using fallback user agent')
132
- shared_state.update("user_agent", FALLBACK_USER_AGENT)
133
- print(f'User Agent (fallback): "{FALLBACK_USER_AGENT}"')
134
-
135
109
  print("\n===== Hostnames =====")
136
110
  try:
137
111
  if arguments.hostnames:
@@ -181,7 +155,7 @@ def run():
181
155
 
182
156
  # Check credentials for login-required hostnames
183
157
  skip_login_db = DataBase("skip_login")
184
- login_required_sites = ['al', 'dd', 'nx', 'dl']
158
+ login_required_sites = ['al', 'dd', 'dl', 'nx']
185
159
 
186
160
  for site in login_required_sites:
187
161
  hostname = Config('Hostnames').get(site)
@@ -239,6 +213,13 @@ def run():
239
213
  info(f'CAPTCHA-Solution required for {package_count} package{'s' if package_count > 1 else ''} at: '
240
214
  f'"{shared_state.values["external_address"]}/captcha"!')
241
215
 
216
+ flaresolverr = multiprocessing.Process(
217
+ target=flaresolverr_checker,
218
+ args=(shared_state_dict, shared_state_lock),
219
+ daemon=True
220
+ )
221
+ flaresolverr.start()
222
+
242
223
  jdownloader = multiprocessing.Process(
243
224
  target=jdownloader_connection,
244
225
  args=(shared_state_dict, shared_state_lock),
@@ -259,6 +240,34 @@ def run():
259
240
  sys.exit(0)
260
241
 
261
242
 
243
+ def flaresolverr_checker(shared_state_dict, shared_state_lock):
244
+ try:
245
+ shared_state.set_state(shared_state_dict, shared_state_lock)
246
+
247
+ # Check if FlareSolverr was previously skipped
248
+ skip_flaresolverr_db = DataBase("skip_flaresolverr")
249
+ flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
250
+
251
+ flaresolverr_url = Config('FlareSolverr').get('url')
252
+ if not flaresolverr_url and not flaresolverr_skipped:
253
+ flaresolverr_config(shared_state)
254
+ # Re-check after config - user may have skipped
255
+ flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
256
+ flaresolverr_url = Config('FlareSolverr').get('url')
257
+
258
+ if flaresolverr_skipped:
259
+ info('FlareSolverr setup skipped by user preference')
260
+ info('Some sites (AL) will not work without FlareSolverr. Configure it later in the web UI.')
261
+ elif flaresolverr_url:
262
+ print(f'Flaresolverr URL: "{flaresolverr_url}"')
263
+ flaresolverr_check = check_flaresolverr(shared_state, flaresolverr_url)
264
+ if flaresolverr_check:
265
+ print(f'Using same User-Agent as FlareSolverr: "{shared_state.values["user_agent"]}"')
266
+
267
+ except KeyboardInterrupt:
268
+ pass
269
+
270
+
262
271
  def update_checker(shared_state_dict, shared_state_lock):
263
272
  try:
264
273
  shared_state.set_state(shared_state_dict, shared_state_lock)
@@ -5,7 +5,7 @@
5
5
  import html
6
6
  import re
7
7
  from datetime import datetime, timedelta
8
- from json import loads
8
+ from json import loads, dumps
9
9
  from urllib.parse import quote
10
10
 
11
11
  import requests
@@ -14,20 +14,168 @@ from bs4 import BeautifulSoup
14
14
  from quasarr.providers.log import info, debug
15
15
 
16
16
 
17
+ def _get_db(table_name):
18
+ """Lazy import to avoid circular dependency."""
19
+ from quasarr.storage.sqlite_database import DataBase
20
+ return DataBase(table_name)
21
+
22
+
23
+ class IMDbAPI:
24
+ """Handles interactions with api.imdbapi.dev"""
25
+ BASE_URL = "https://api.imdbapi.dev"
26
+
27
+ @staticmethod
28
+ def get_title(imdb_id):
29
+ try:
30
+ response = requests.get(f"{IMDbAPI.BASE_URL}/titles/{imdb_id}", timeout=30)
31
+ response.raise_for_status()
32
+ return response.json()
33
+ except Exception as e:
34
+ info(f"Error loading imdbapi.dev for {imdb_id}: {e}")
35
+ return None
36
+
37
+ @staticmethod
38
+ def get_akas(imdb_id):
39
+ try:
40
+ response = requests.get(f"{IMDbAPI.BASE_URL}/titles/{imdb_id}/akas", timeout=30)
41
+ response.raise_for_status()
42
+ return response.json().get("akas", [])
43
+ except Exception as e:
44
+ info(f"Error loading localized titles from IMDbAPI.dev for {imdb_id}: {e}")
45
+ return []
46
+
47
+ @staticmethod
48
+ def search_titles(query):
49
+ try:
50
+ response = requests.get(f"{IMDbAPI.BASE_URL}/search/titles?query={quote(query)}&limit=5", timeout=30)
51
+ response.raise_for_status()
52
+ return response.json().get("titles", [])
53
+ except Exception as e:
54
+ debug(f"Request on IMDbAPI failed: {e}")
55
+ return []
56
+
57
+
58
+ class IMDbWeb:
59
+ """Handles fallback interactions by scraping imdb.com"""
60
+ BASE_URL = "https://www.imdb.com"
61
+
62
+ @staticmethod
63
+ def get_poster(imdb_id, user_agent):
64
+ headers = {'User-Agent': user_agent}
65
+ try:
66
+ request = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10).text
67
+ soup = BeautifulSoup(request, "html.parser")
68
+ poster_set = soup.find('div', class_='ipc-poster').div.img["srcset"]
69
+ poster_links = [x for x in poster_set.split(" ") if len(x) > 10]
70
+ return poster_links[-1]
71
+ except Exception as e:
72
+ debug(f"Could not get poster title for {imdb_id} from IMDb: {e}")
73
+ return None
74
+
75
+ @staticmethod
76
+ def get_localized_title(imdb_id, language, user_agent):
77
+ headers = {
78
+ 'Accept-Language': language,
79
+ 'User-Agent': user_agent
80
+ }
81
+ try:
82
+ response = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10)
83
+ response.raise_for_status()
84
+
85
+ match = re.search(r'<title>(.*?) \(.*?</title>', response.text)
86
+ if not match:
87
+ match = re.search(r'<title>(.*?) - IMDb</title>', response.text)
88
+
89
+ if match:
90
+ return match.group(1)
91
+ except Exception as e:
92
+ info(f"Error loading IMDb metadata for {imdb_id}: {e}")
93
+
94
+ return None
95
+
96
+ @staticmethod
97
+ def search_titles(query, ttype, language, user_agent):
98
+ headers = {
99
+ 'Accept-Language': language,
100
+ 'User-Agent': user_agent
101
+ }
102
+ try:
103
+ results = requests.get(f"{IMDbWeb.BASE_URL}/find/?q={quote(query)}&s=tt&ttype={ttype}&ref_=fn_{ttype}",
104
+ headers=headers, timeout=10)
105
+
106
+ if results.status_code == 200:
107
+ soup = BeautifulSoup(results.text, "html.parser")
108
+ props = soup.find("script", text=re.compile("props"))
109
+ if props:
110
+ details = loads(props.string)
111
+ return details['props']['pageProps']['titleResults']['results']
112
+ else:
113
+ debug(f"Request on IMDb failed: {results.status_code}")
114
+ except Exception as e:
115
+ debug(f"IMDb scraping fallback failed: {e}")
116
+
117
+ return []
118
+
119
+
120
+ class TitleCleaner:
121
+ @staticmethod
122
+ def sanitize(title):
123
+ if not title:
124
+ return ""
125
+ sanitized_title = html.unescape(title)
126
+ sanitized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', sanitized_title).strip()
127
+ sanitized_title = sanitized_title.replace(" - ", "-")
128
+ sanitized_title = re.sub(r'\s{2,}', ' ', sanitized_title)
129
+ return sanitized_title
130
+
131
+ @staticmethod
132
+ def clean(title):
133
+ try:
134
+ # Regex to find the title part before common release tags
135
+ # Stops at:
136
+ # - Year (19xx or 20xx) preceded by a separator
137
+ # - Language tags (.German, .GERMAN)
138
+ # - Resolution (.1080p, .720p, etc.)
139
+ # - Season info (.S01)
140
+ pattern = r"(.*?)(?:[\.\s](?!19|20)\d{2}|[\.\s]German|[\.\s]GERMAN|[\.\s]\d{3,4}p|[\.\s]S(?:\d{1,3}))"
141
+ match = re.search(pattern, title)
142
+ if match:
143
+ extracted_title = match.group(1)
144
+ else:
145
+ extracted_title = title
146
+
147
+ # Remove specific tags that might appear in the title part
148
+ tags_to_remove = [
149
+ r'[\.\s]UNRATED.*', r'[\.\s]Unrated.*', r'[\.\s]Uncut.*', r'[\.\s]UNCUT.*',
150
+ r'[\.\s]Directors[\.\s]Cut.*', r'[\.\s]Final[\.\s]Cut.*', r'[\.\s]DC.*',
151
+ r'[\.\s]REMASTERED.*', r'[\.\s]EXTENDED.*', r'[\.\s]Extended.*',
152
+ r'[\.\s]Theatrical.*', r'[\.\s]THEATRICAL.*'
153
+ ]
154
+
155
+ clean_title = extracted_title
156
+ for tag in tags_to_remove:
157
+ clean_title = re.sub(tag, "", clean_title, flags=re.IGNORECASE)
158
+
159
+ clean_title = clean_title.replace(".", " ").strip()
160
+ clean_title = re.sub(r'\s+', ' ', clean_title) # Remove multiple spaces
161
+ clean_title = clean_title.replace(" ", "+")
162
+
163
+ return clean_title
164
+ except Exception as e:
165
+ debug(f"Error cleaning title '{title}': {e}")
166
+ return title
167
+
168
+
17
169
  def get_poster_link(shared_state, imdb_id):
170
+ imdb_metadata = get_imdb_metadata(imdb_id)
171
+ if imdb_metadata:
172
+ poster_link = imdb_metadata.get("poster_link")
173
+ if poster_link:
174
+ return poster_link
175
+
18
176
  poster_link = None
19
177
  if imdb_id:
20
- headers = {'User-Agent': shared_state.values["user_agent"]}
21
- request = requests.get(f"https://www.imdb.com/title/{imdb_id}/", headers=headers, timeout=10).text
22
- soup = BeautifulSoup(request, "html.parser")
23
- try:
24
- poster_set = soup.find('div', class_='ipc-poster').div.img[
25
- "srcset"] # contains links to posters in ascending resolution
26
- poster_links = [x for x in poster_set.split(" ") if
27
- len(x) > 10] # extract all poster links ignoring resolution info
28
- poster_link = poster_links[-1] # get the highest resolution poster
29
- except:
30
- pass
178
+ poster_link = IMDbWeb.get_poster(imdb_id, shared_state.values["user_agent"])
31
179
 
32
180
  if not poster_link:
33
181
  debug(f"Could not get poster title for {imdb_id} from IMDb")
@@ -35,87 +183,151 @@ def get_poster_link(shared_state, imdb_id):
35
183
  return poster_link
36
184
 
37
185
 
38
- def get_localized_title(shared_state, imdb_id, language='de'):
39
- localized_title = None
186
+ def get_imdb_metadata(imdb_id):
187
+ db = _get_db("imdb_metadata")
188
+ now = datetime.now().timestamp()
40
189
 
41
- headers = {
42
- 'Accept-Language': language,
43
- 'User-Agent': shared_state.values["user_agent"]
190
+ # Try to load from DB
191
+ cached_metadata = None
192
+ try:
193
+ cached_data = db.retrieve(imdb_id)
194
+ if cached_data:
195
+ cached_metadata = loads(cached_data)
196
+ # If valid, update TTL and return
197
+ if cached_metadata.get("ttl") and cached_metadata["ttl"] > now:
198
+ cached_metadata["ttl"] = now + timedelta(days=30).total_seconds()
199
+ db.update_store(imdb_id, dumps(cached_metadata))
200
+ return cached_metadata
201
+ except Exception as e:
202
+ debug(f"Error retrieving IMDb metadata from DB for {imdb_id}: {e}")
203
+
204
+ # Initialize new metadata structure
205
+ imdb_metadata = {
206
+ "title": None,
207
+ "year": None,
208
+ "poster_link": None,
209
+ "localized": {},
210
+ "ttl": 0
44
211
  }
45
212
 
213
+ # Fetch from API
214
+ response_json = IMDbAPI.get_title(imdb_id)
215
+
216
+ if not response_json:
217
+ # API failed. If we have stale cached data, return it as fallback
218
+ if cached_metadata:
219
+ debug(f"IMDb API failed for {imdb_id}, returning stale cached data.")
220
+ return cached_metadata
221
+ return imdb_metadata
222
+
223
+ # Process API response
224
+ imdb_metadata["title"] = TitleCleaner.sanitize(response_json.get("primaryTitle", ""))
225
+ imdb_metadata["year"] = response_json.get("startYear")
226
+ imdb_metadata["ttl"] = now + timedelta(days=30).total_seconds()
227
+
46
228
  try:
47
- response = requests.get(f"https://www.imdb.com/title/{imdb_id}/", headers=headers, timeout=10)
229
+ imdb_metadata["poster_link"] = response_json.get("primaryImage").get("url")
48
230
  except Exception as e:
49
- info(f"Error loading IMDb metadata for {imdb_id}: {e}")
50
- return localized_title
231
+ debug(f"Could not find poster link for {imdb_id} from imdbapi.dev: {e}")
232
+ # Shorten TTL if data is incomplete
233
+ imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
51
234
 
52
- try:
53
- match = re.findall(r'<title>(.*?) \(.*?</title>', response.text)
54
- localized_title = match[0]
55
- except:
56
- try:
57
- match = re.findall(r'<title>(.*?) - IMDb</title>', response.text)
58
- localized_title = match[0]
59
- except:
60
- pass
235
+ akas = IMDbAPI.get_akas(imdb_id)
236
+ if akas:
237
+ for aka in akas:
238
+ if aka.get("language"):
239
+ continue # skip entries with specific language tags
240
+ if aka.get("country", {}).get("code", "").lower() == "de":
241
+ imdb_metadata["localized"]["de"] = TitleCleaner.sanitize(aka.get("text"))
242
+ break
243
+ else:
244
+ # Shorten TTL if AKAs failed
245
+ imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
61
246
 
62
- if not localized_title:
63
- debug(f"Could not get localized title for {imdb_id} in {language} from IMDb")
247
+ db.update_store(imdb_id, dumps(imdb_metadata))
248
+ return imdb_metadata
64
249
 
65
- localized_title = html.unescape(localized_title)
66
- localized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', localized_title).strip()
67
- localized_title = localized_title.replace(" - ", "-")
68
- localized_title = re.sub(r'\s{2,}', ' ', localized_title)
69
250
 
70
- return localized_title
251
+ def get_year(imdb_id):
252
+ imdb_metadata = get_imdb_metadata(imdb_id)
253
+ if imdb_metadata:
254
+ return imdb_metadata.get("year")
255
+ return None
71
256
 
72
257
 
73
- def get_clean_title(title):
74
- try:
75
- extracted_title = re.findall(r"(.*?)(?:.(?!19|20)\d{2}|\.German|.GERMAN|\.\d{3,4}p|\.S(?:\d{1,3}))", title)[0]
76
- leftover_tags_removed = re.sub(
77
- r'(|.UNRATED.*|.Unrated.*|.Uncut.*|.UNCUT.*)(|.Directors.Cut.*|.Final.Cut.*|.DC.*|.REMASTERED.*|.EXTENDED.*|.Extended.*|.Theatrical.*|.THEATRICAL.*)',
78
- "", extracted_title)
79
- clean_title = leftover_tags_removed.replace(".", " ").strip().replace(" ", "+")
258
+ def get_localized_title(shared_state, imdb_id, language='de'):
259
+ imdb_metadata = get_imdb_metadata(imdb_id)
260
+ if imdb_metadata:
261
+ localized_title = imdb_metadata.get("localized").get(language)
262
+ if localized_title:
263
+ return localized_title
264
+ return imdb_metadata.get("title")
80
265
 
81
- except:
82
- clean_title = title
83
- return clean_title
266
+ localized_title = IMDbWeb.get_localized_title(imdb_id, language, shared_state.values["user_agent"])
267
+
268
+ if not localized_title:
269
+ debug(f"Could not get localized title for {imdb_id} in {language} from IMDb")
270
+ else:
271
+ localized_title = TitleCleaner.sanitize(localized_title)
272
+ return localized_title
84
273
 
85
274
 
86
275
  def get_imdb_id_from_title(shared_state, title, language="de"):
87
276
  imdb_id = None
88
277
 
89
278
  if re.search(r"S\d{1,3}(E\d{1,3})?", title, re.IGNORECASE):
90
- ttype = "tv"
279
+ ttype_api = "TV_SERIES"
280
+ ttype_web = "tv"
91
281
  else:
92
- ttype = "ft"
282
+ ttype_api = "MOVIE"
283
+ ttype_web = "ft"
284
+
285
+ title = TitleCleaner.clean(title)
93
286
 
94
- title = get_clean_title(title)
287
+ # Check Search Cache (DB)
288
+ db = _get_db("imdb_searches")
289
+ try:
290
+ cached_data = db.retrieve(title)
291
+ if cached_data:
292
+ data = loads(cached_data)
293
+ # Check TTL (48 hours)
294
+ if data.get("timestamp") and datetime.fromtimestamp(data["timestamp"]) > datetime.now() - timedelta(
295
+ hours=48):
296
+ return data.get("imdb_id")
297
+ except Exception as e:
298
+ debug(f"Error retrieving search cache for {title}: {e}")
95
299
 
96
- threshold = 60 * 60 * 48 # 48 hours
97
- context = "recents_imdb"
98
- recently_searched = shared_state.get_recently_searched(shared_state, context, threshold)
99
- if title in recently_searched:
100
- title_item = recently_searched[title]
101
- if title_item["timestamp"] > datetime.now() - timedelta(seconds=threshold):
102
- return title_item["imdb_id"]
300
+ # Try IMDbAPI.dev first
301
+ search_results = IMDbAPI.search_titles(title)
302
+ if search_results:
303
+ for result in search_results:
304
+ found_title = result.get("primaryTitle")
305
+ found_id = result.get("id")
306
+ found_type = result.get("type")
103
307
 
104
- headers = {
105
- 'Accept-Language': language,
106
- 'User-Agent': shared_state.values["user_agent"]
107
- }
308
+ # Basic type filtering if possible from result data
309
+ if ttype_api == "TV_SERIES" and found_type not in ["tvSeries", "tvMiniSeries"]:
310
+ continue
311
+ if ttype_api == "MOVIE" and found_type not in ["movie", "tvMovie"]:
312
+ continue
108
313
 
109
- results = requests.get(f"https://www.imdb.com/find/?q={quote(title)}&s=tt&ttype={ttype}&ref_=fn_{ttype}",
110
- headers=headers, timeout=10)
314
+ if shared_state.search_string_in_sanitized_title(title, found_title):
315
+ imdb_id = found_id
316
+ break
111
317
 
112
- if results.status_code == 200:
113
- soup = BeautifulSoup(results.text, "html.parser")
114
- props = soup.find("script", text=re.compile("props"))
115
- details = loads(props.string)
116
- search_results = details['props']['pageProps']['titleResults']['results']
318
+ # If no exact match found with type filtering, try relaxed matching
319
+ if not imdb_id:
320
+ for result in search_results:
321
+ found_title = result.get("primaryTitle")
322
+ found_id = result.get("id")
323
+ if shared_state.search_string_in_sanitized_title(title, found_title):
324
+ imdb_id = found_id
325
+ break
117
326
 
118
- if len(search_results) > 0:
327
+ # Fallback to IMDb scraping if API failed or returned no results
328
+ if not imdb_id:
329
+ search_results = IMDbWeb.search_titles(title, ttype_web, language, shared_state.values["user_agent"])
330
+ if search_results:
119
331
  for result in search_results:
120
332
  try:
121
333
  found_title = result["listItem"]["titleText"]
@@ -127,14 +339,15 @@ def get_imdb_id_from_title(shared_state, title, language="de"):
127
339
  if shared_state.search_string_in_sanitized_title(title, found_title):
128
340
  imdb_id = found_id
129
341
  break
130
- else:
131
- debug(f"Request on IMDb failed: {results.status_code}")
132
342
 
133
- recently_searched[title] = {
134
- "imdb_id": imdb_id,
135
- "timestamp": datetime.now()
136
- }
137
- shared_state.update(context, recently_searched)
343
+ # Update Search Cache
344
+ try:
345
+ db.update_store(title, dumps({
346
+ "imdb_id": imdb_id,
347
+ "timestamp": datetime.now().timestamp()
348
+ }))
349
+ except Exception as e:
350
+ debug(f"Error updating search cache for {title}: {e}")
138
351
 
139
352
  if not imdb_id:
140
353
  debug(f"No IMDb-ID found for {title}")
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "2.2.0"
11
+ return "2.3.0"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -5,6 +5,7 @@
5
5
  import time
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
7
 
8
+ from quasarr.providers.imdb_metadata import get_imdb_metadata
8
9
  from quasarr.providers.log import info, debug
9
10
  from quasarr.search.sources.al import al_feed, al_search
10
11
  from quasarr.search.sources.by import by_feed, by_search
@@ -31,6 +32,10 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
31
32
  if imdb_id and not imdb_id.startswith('tt'):
32
33
  imdb_id = f'tt{imdb_id}'
33
34
 
35
+ # Pre-populate IMDb metadata cache to avoid API hammering by search threads
36
+ if imdb_id:
37
+ get_imdb_metadata(imdb_id)
38
+
34
39
  docs_search = "lazylibrarian" in request_from.lower()
35
40
 
36
41
  al = shared_state.values["config"]("Hostnames").get("al")
@@ -12,7 +12,7 @@ import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
- from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
16
16
  from quasarr.providers.log import info, debug
17
17
 
18
18
  hostname = "he"
@@ -84,6 +84,9 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
84
84
  if not local_title:
85
85
  info(f"{hostname}: no title for IMDb {imdb_id}")
86
86
  return releases
87
+ year = get_year(imdb_id)
88
+ if year:
89
+ local_title += f" {year}"
87
90
  source_search = local_title
88
91
  else:
89
92
  return releases
@@ -173,6 +176,9 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
173
176
  try:
174
177
  r = requests.get(source, headers=headers, timeout=10)
175
178
  soup = BeautifulSoup(r.content, 'html.parser')
179
+ except Exception as e:
180
+ mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
181
+ try:
176
182
  imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
177
183
  if imdb_link:
178
184
  release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
@@ -13,7 +13,7 @@ import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
17
  from quasarr.providers.log import info, debug
18
18
 
19
19
  hostname = "nk"
@@ -75,6 +75,9 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
75
75
  if not local_title:
76
76
  info(f"{hostname}: no title for IMDb {imdb_id}")
77
77
  return releases
78
+ year = get_year(imdb_id)
79
+ if year:
80
+ local_title += f" {year}"
78
81
  source_search = local_title
79
82
  else:
80
83
  return releases
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.2.0
3
+ Version: 2.3.0
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -1,4 +1,4 @@
1
- quasarr/__init__.py,sha256=cEtxN2AuwKvrxpIvAR7UL997VtYQ4iN3Eo3ZnP-WjZQ,14682
1
+ quasarr/__init__.py,sha256=eGJ6-vv6yQBq04TyeZ2j7EW0i-UW21Khbqoj5JJktoo,14759
2
2
  quasarr/api/__init__.py,sha256=KLnFSe5l3MrVgrbu6-7GlE2PqouVyizqiRZfQkBtge0,19587
3
3
  quasarr/api/arr/__init__.py,sha256=eEop8A5t936uT5azn4qz0bq1DMX84_Ja16wyleGFhyM,18495
4
4
  quasarr/api/captcha/__init__.py,sha256=Mqg2HhWMaUc07cVaEYHAbf-YvnxkiYVbkWT-g92J-2k,72960
@@ -35,7 +35,7 @@ quasarr/providers/cloudflare.py,sha256=oUDR7OQ8E-8vCtagZLnIS2ZZV3ERffhxmW0njKKbt
35
35
  quasarr/providers/hostname_issues.py,sha256=9PJFIosLB-bMTmgWlR5-sYAmcyps7TDoSYjoL9cw9TE,1460
36
36
  quasarr/providers/html_images.py,sha256=rrovPNl-FTTKKA-4HCPEhsYpq5b20VDrsB7t4RrQf3w,15531
37
37
  quasarr/providers/html_templates.py,sha256=IGWwt78bP2oJx4VzOP6w9zp7KVXgDY6Qz5ySL9cLGWI,15815
38
- quasarr/providers/imdb_metadata.py,sha256=10L4kZkt6Fg0HGdNcc6KCtIQHRYEqdarLyaMVN6mT8w,4843
38
+ quasarr/providers/imdb_metadata.py,sha256=Rq43t1-uFEYZ8Iq197I6FZjYwDkSuymIjEYxWwlbfmE,12572
39
39
  quasarr/providers/jd_cache.py,sha256=mSvMrs3UwTn3sd9yGSJKGT-qwYeyYKC_l8whpXTVn7s,13530
40
40
  quasarr/providers/log.py,sha256=_g5RwtfuksARXnvryhsngzoJyFcNzj6suqd3ndqZM0Y,313
41
41
  quasarr/providers/myjd_api.py,sha256=Z3PEiO3c3UfDSr4Up5rgwTAnjloWHb-H1RkJ6BLKZv8,34140
@@ -44,14 +44,14 @@ quasarr/providers/obfuscated.py,sha256=EYm_7SfdJd9ae_m4HZgY9ruDXC5J9hb4KEV_WAnk-
44
44
  quasarr/providers/shared_state.py,sha256=5a_ZbGqTvt4-OqBt2a1WtR9I5J_Ky7IlkEY8EGtKVu8,30646
45
45
  quasarr/providers/statistics.py,sha256=cEQixYnDMDqtm5wWe40E_2ucyo4mD0n3SrfelhQi1L8,6452
46
46
  quasarr/providers/utils.py,sha256=mcUPbcXMsLmrYv0CTZO5a9aOt2-JLyL3SZxu6N8OyjU,12075
47
- quasarr/providers/version.py,sha256=iakqDG1xdl-OhipfMJ9jdOG8du1BnS6rllqOId1-LAo,4003
47
+ quasarr/providers/version.py,sha256=yMqY-dgXR5J5LGPMOg1B63SCVU62c-fsK76fQMP1cYo,4003
48
48
  quasarr/providers/web_server.py,sha256=AYd0KRxdDWMBr87BP8wlSMuL4zZo0I_rY-vHBai6Pfg,1688
49
49
  quasarr/providers/sessions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  quasarr/providers/sessions/al.py,sha256=AQ59vVU7uQSuwZLNppNsZAFvpow3zcxQ29dirPbyYc4,13432
51
51
  quasarr/providers/sessions/dd.py,sha256=ty9dnDFVJs-tFNcTS5QT9_wP82cKQGnCvb6v5In3Mog,3324
52
52
  quasarr/providers/sessions/dl.py,sha256=yTJlD84ItotViA1d-m0RwrbEJlL-VK-0nGw_4kfNLe0,5923
53
53
  quasarr/providers/sessions/nx.py,sha256=ZuWuqfb_rPJVom0c1dsXefXPXdzAIYqnQZapOPaUYUI,3421
54
- quasarr/search/__init__.py,sha256=V59LIiC75mQvasDdTjiWZRbPD1jXO1lhXlKeNVX0iOc,5726
54
+ quasarr/search/__init__.py,sha256=1Z4dEfbbTiUSs139S5_5VivnXszQjQrLn9AQPX87OyU,5920
55
55
  quasarr/search/sources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
56
  quasarr/search/sources/al.py,sha256=-2-yRGubyE7bw4-ntGtZ04_LkbnEXhTidfKzKfmeVws,17745
57
57
  quasarr/search/sources/by.py,sha256=vNKMEmFpWxpZS9shh5M8gzrOlyyoOc8CveFv6piJ9FM,8344
@@ -61,9 +61,9 @@ quasarr/search/sources/dl.py,sha256=L4GK58Mp46dAZzmwtMB4ia1w0SSpp3z3eFvrmT-5278,
61
61
  quasarr/search/sources/dt.py,sha256=hvOqPKQRw5joSaTb9mpdPZXL4xpU167SFmLg8yhsPwM,10227
62
62
  quasarr/search/sources/dw.py,sha256=hna1ueKjdi9uqRQJ7UPenT0ym7igQgWGrv_--yGChVs,8215
63
63
  quasarr/search/sources/fx.py,sha256=xZUrv7dJSSmeLR2xnRQsRZAk9Q0-fDfQLNjz4wdBTqo,9452
64
- quasarr/search/sources/he.py,sha256=SoH6X-PsnaOUiQL3yaUbWkI-DDjnyQCMSAwAmv-vpAc,7063
64
+ quasarr/search/sources/he.py,sha256=eBzOtJRNpJuc7YQmMFMMQ7SLp81sLwA3Jdk_fKYPNRE,7330
65
65
  quasarr/search/sources/mb.py,sha256=Hq1zupo27FzYSQUio03HPG0wP4jYwOXl6cqgdOpjlzQ,8178
66
- quasarr/search/sources/nk.py,sha256=trb5rTQL_j9br6yBsdSFUp-V4L8_lFYEYpQ4qcB-JlE,6989
66
+ quasarr/search/sources/nk.py,sha256=MZXW6QK78-NBzoHf-bw3B_-TwpGkbyVSOd9bHT2qISo,7099
67
67
  quasarr/search/sources/nx.py,sha256=UXUSYEL4zwYVwCri359I26GYN8CDuCKokpOOR21YEns,7602
68
68
  quasarr/search/sources/sf.py,sha256=9k9K8_tYVarpW8n20HA2qAplBL14mIQCsorJO-ZxN6g,15811
69
69
  quasarr/search/sources/sj.py,sha256=LW2dVDfZ90mDdrQ6ZYtXb0eOjV3cCh6kEW7lTra1c5M,7608
@@ -74,9 +74,9 @@ quasarr/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
74
  quasarr/storage/config.py,sha256=SSTgIce2FVYoVTK_6OCU3msknhxuLA3EC4Kcrrf_dxQ,6378
75
75
  quasarr/storage/setup.py,sha256=Cbo0phZbC6JP2wx_qER3vpaLSTDLbKEfdXj6KoAMkWw,47403
76
76
  quasarr/storage/sqlite_database.py,sha256=yMqFQfKf0k7YS-6Z3_7pj4z1GwWSXJ8uvF4IydXsuTE,3554
77
- quasarr-2.2.0.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
78
- quasarr-2.2.0.dist-info/METADATA,sha256=dkJt9lLP1HUd-JSYhswlcYSsHYPz9wlU44gBXFlu7NA,15024
79
- quasarr-2.2.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
80
- quasarr-2.2.0.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
81
- quasarr-2.2.0.dist-info/top_level.txt,sha256=dipJdaRda5ruTZkoGfZU60bY4l9dtPlmOWwxK_oGSF0,8
82
- quasarr-2.2.0.dist-info/RECORD,,
77
+ quasarr-2.3.0.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
78
+ quasarr-2.3.0.dist-info/METADATA,sha256=ZmpqDWmp3YiLqw7huxVjmGCkUIvdPxSdyQ29icEe9bY,15024
79
+ quasarr-2.3.0.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
80
+ quasarr-2.3.0.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
81
+ quasarr-2.3.0.dist-info/top_level.txt,sha256=dipJdaRda5ruTZkoGfZU60bY4l9dtPlmOWwxK_oGSF0,8
82
+ quasarr-2.3.0.dist-info/RECORD,,