quasarr 2.3.1__py3-none-any.whl → 2.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

quasarr/__init__.py CHANGED
@@ -169,6 +169,14 @@ def run():
169
169
  else:
170
170
  hostname_credentials_config(shared_state, site.upper(), hostname)
171
171
 
172
+ # Check FlareSolverr configuration
173
+ skip_flaresolverr_db = DataBase("skip_flaresolverr")
174
+ flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
175
+ flaresolverr_url = Config('FlareSolverr').get('url')
176
+
177
+ if not flaresolverr_url and not flaresolverr_skipped:
178
+ flaresolverr_config(shared_state)
179
+
172
180
  config = Config('JDownloader')
173
181
  user = config.get('user')
174
182
  password = config.get('password')
@@ -249,23 +257,33 @@ def flaresolverr_checker(shared_state_dict, shared_state_lock):
249
257
  flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
250
258
 
251
259
  flaresolverr_url = Config('FlareSolverr').get('url')
260
+
261
+ # If FlareSolverr is not configured and not skipped, it means it's the first run
262
+ # and the user needs to be prompted via the WebUI.
263
+ # This background process should NOT block or prompt the user.
264
+ # It should only check and log the status.
252
265
  if not flaresolverr_url and not flaresolverr_skipped:
253
- flaresolverr_config(shared_state)
254
- # Re-check after config - user may have skipped
255
- flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
256
- flaresolverr_url = Config('FlareSolverr').get('url')
266
+ info('FlareSolverr URL not configured. Please configure it via the WebUI.')
267
+ info('Some sites (AL) will not work without FlareSolverr.')
268
+ return # Exit the checker, it will be re-checked if user configures it later
257
269
 
258
270
  if flaresolverr_skipped:
259
271
  info('FlareSolverr setup skipped by user preference')
260
272
  info('Some sites (AL) will not work without FlareSolverr. Configure it later in the web UI.')
261
273
  elif flaresolverr_url:
262
- print(f'Flaresolverr URL: "{flaresolverr_url}"')
274
+ info(f'Checking FlareSolverr at URL: "{flaresolverr_url}"')
263
275
  flaresolverr_check = check_flaresolverr(shared_state, flaresolverr_url)
264
276
  if flaresolverr_check:
265
- print(f'Using same User-Agent as FlareSolverr: "{shared_state.values["user_agent"]}"')
277
+ info(f'FlareSolverr connection successful. Using User-Agent: "{shared_state.values["user_agent"]}"')
278
+ else:
279
+ info('FlareSolverr check failed - using fallback user agent')
280
+ # Fallback user agent is already set in main process, but we log it
281
+ info(f'User Agent (fallback): "{FALLBACK_USER_AGENT}"')
266
282
 
267
283
  except KeyboardInterrupt:
268
284
  pass
285
+ except Exception as e:
286
+ info(f"An unexpected error occurred in FlareSolverr checker: {e}")
269
287
 
270
288
 
271
289
  def update_checker(shared_state_dict, shared_state_lock):
@@ -288,8 +288,8 @@ def setup_config(app, shared_state):
288
288
  "FlareSolverr URL saved successfully! A restart is recommended.")
289
289
  else:
290
290
  return render_fail(f"FlareSolverr returned unexpected status: {json_data.get('status')}")
291
- except requests.RequestException as e:
292
- return render_fail(f"Could not reach FlareSolverr: {str(e)}")
291
+ except requests.RequestException:
292
+ return render_fail(f"Could not reach FlareSolverr!")
293
293
 
294
294
  return render_fail("Could not reach FlareSolverr at that URL (expected HTTP 200).")
295
295
 
@@ -33,16 +33,16 @@ def setup_sponsors_helper_routes(app):
33
33
  if not protected:
34
34
  return abort(404, "No encrypted packages found")
35
35
 
36
- # Find the first package without a "session" key
36
+ # Find the first package that hasn't been disabled
37
37
  selected_package = None
38
38
  for package in protected:
39
39
  data = json.loads(package[1])
40
- if "session" not in data:
40
+ if "disabled" not in data:
41
41
  selected_package = (package[0], data)
42
42
  break
43
43
 
44
44
  if not selected_package:
45
- return abort(404, "No valid packages without session found")
45
+ return abort(404, "No valid packages found")
46
46
 
47
47
  package_id, data = selected_package
48
48
  title = data["title"]
@@ -67,9 +67,9 @@ def setup_sponsors_helper_routes(app):
67
67
  except Exception as e:
68
68
  return abort(500, str(e))
69
69
 
70
- @app.post("/sponsors_helper/api/to_download/")
70
+ @app.post("/sponsors_helper/api/download/")
71
71
  @require_api_key
72
- def to_download_api():
72
+ def download_api():
73
73
  try:
74
74
  data = request.json
75
75
  title = data.get('name')
@@ -97,51 +97,39 @@ def setup_sponsors_helper_routes(app):
97
97
  StatsHelper(shared_state).increment_failed_decryptions_automatic()
98
98
  return abort(500, "Failed")
99
99
 
100
- @app.post("/sponsors_helper/api/to_replace/")
100
+ @app.post("/sponsors_helper/api/disable/")
101
101
  @require_api_key
102
- def to_replace_api():
102
+ def disable_api():
103
103
  try:
104
104
  data = request.json
105
- name = data.get('name')
106
105
  package_id = data.get('package_id')
107
- password = data.get('password')
108
- replace_url = data.get('replace_url')
109
- mirror = data.get('mirror')
110
- session = data.get('session')
111
-
112
- if not all([name, package_id, replace_url, mirror, session]):
113
- info("Missing required replacement data")
114
- return {"error": "Missing required replacement data"}, 400
115
-
116
- if password is None:
117
- password = ""
118
-
119
- blob = json.dumps(
120
- {
121
- "title": name,
122
- "links": [replace_url, mirror],
123
- "size_mb": 0,
124
- "password": password,
125
- "mirror": mirror,
126
- "session": session
127
- })
128
106
 
129
- shared_state.get_db("protected").update_store(package_id, blob)
107
+ if not package_id:
108
+ return {"error": "Missing package_id"}, 400
109
+
110
+ StatsHelper(shared_state).increment_failed_decryptions_automatic()
130
111
 
131
- info(f"Another CAPTCHA solution is required for {mirror} link: {replace_url}")
112
+ blob = shared_state.get_db("protected").retrieve(package_id)
113
+ package_data = json.loads(blob)
114
+ title = package_data.get('title')
115
+
116
+ package_data["disabled"] = True
117
+
118
+ shared_state.get_db("protected").update_store(package_id, json.dumps(package_data))
119
+
120
+ info(f"Disabled package {title}")
132
121
 
133
122
  StatsHelper(shared_state).increment_captcha_decryptions_automatic()
134
123
 
135
- return f"Replacement link stored for {name}"
124
+ return f"Package {title} disabled"
136
125
 
137
126
  except Exception as e:
138
- StatsHelper(shared_state).increment_failed_decryptions_automatic()
139
- info(f"Error handling replacement: {e}")
127
+ info(f"Error handling disable: {e}")
140
128
  return {"error": str(e)}, 500
141
129
 
142
- @app.delete("/sponsors_helper/api/to_failed/")
130
+ @app.delete("/sponsors_helper/api/fail/")
143
131
  @require_api_key
144
- def move_to_failed_api():
132
+ def fail_api():
145
133
  try:
146
134
  StatsHelper(shared_state).increment_failed_decryptions_automatic()
147
135
 
@@ -165,7 +153,7 @@ def setup_sponsors_helper_routes(app):
165
153
 
166
154
  @app.put("/sponsors_helper/api/set_sponsor_status/")
167
155
  @require_api_key
168
- def activate_sponsor_status():
156
+ def set_sponsor_status_api():
169
157
  try:
170
158
  data = request.body.read().decode("utf-8")
171
159
  payload = json.loads(data)
@@ -20,8 +20,57 @@ def _get_db(table_name):
20
20
  return DataBase(table_name)
21
21
 
22
22
 
23
+ def _get_config(section):
24
+ """Lazy import to avoid circular dependency."""
25
+ from quasarr.storage.config import Config
26
+ return Config(section)
27
+
28
+
29
+ class TitleCleaner:
30
+ @staticmethod
31
+ def sanitize(title):
32
+ if not title:
33
+ return ""
34
+ sanitized_title = html.unescape(title)
35
+ sanitized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', sanitized_title).strip()
36
+ sanitized_title = sanitized_title.replace(" - ", "-")
37
+ sanitized_title = re.sub(r'\s{2,}', ' ', sanitized_title)
38
+ return sanitized_title
39
+
40
+ @staticmethod
41
+ def clean(title):
42
+ try:
43
+ # Regex to find the title part before common release tags
44
+ pattern = r"(.*?)(?:[\.\s](?!19|20)\d{2}|[\.\s]German|[\.\s]GERMAN|[\.\s]\d{3,4}p|[\.\s]S(?:\d{1,3}))"
45
+ match = re.search(pattern, title)
46
+ if match:
47
+ extracted_title = match.group(1)
48
+ else:
49
+ extracted_title = title
50
+
51
+ tags_to_remove = [
52
+ r'[\.\s]UNRATED.*', r'[\.\s]Unrated.*', r'[\.\s]Uncut.*', r'[\.\s]UNCUT.*',
53
+ r'[\.\s]Directors[\.\s]Cut.*', r'[\.\s]Final[\.\s]Cut.*', r'[\.\s]DC.*',
54
+ r'[\.\s]REMASTERED.*', r'[\.\s]EXTENDED.*', r'[\.\s]Extended.*',
55
+ r'[\.\s]Theatrical.*', r'[\.\s]THEATRICAL.*'
56
+ ]
57
+
58
+ clean_title = extracted_title
59
+ for tag in tags_to_remove:
60
+ clean_title = re.sub(tag, "", clean_title, flags=re.IGNORECASE)
61
+
62
+ clean_title = clean_title.replace(".", " ").strip()
63
+ clean_title = re.sub(r'\s+', ' ', clean_title)
64
+ clean_title = clean_title.replace(" ", "+")
65
+
66
+ return clean_title
67
+ except Exception as e:
68
+ debug(f"Error cleaning title '{title}': {e}")
69
+ return title
70
+
71
+
23
72
  class IMDbAPI:
24
- """Handles interactions with api.imdbapi.dev"""
73
+ """Tier 1: api.imdbapi.dev - Primary, fast, comprehensive."""
25
74
  BASE_URL = "https://api.imdbapi.dev"
26
75
 
27
76
  @staticmethod
@@ -31,7 +80,7 @@ class IMDbAPI:
31
80
  response.raise_for_status()
32
81
  return response.json()
33
82
  except Exception as e:
34
- info(f"Error loading imdbapi.dev for {imdb_id}: {e}")
83
+ info(f"IMDbAPI get_title failed for {imdb_id}: {e}")
35
84
  return None
36
85
 
37
86
  @staticmethod
@@ -41,7 +90,7 @@ class IMDbAPI:
41
90
  response.raise_for_status()
42
91
  return response.json().get("akas", [])
43
92
  except Exception as e:
44
- info(f"Error loading localized titles from IMDbAPI.dev for {imdb_id}: {e}")
93
+ info(f"IMDbAPI get_akas failed for {imdb_id}: {e}")
45
94
  return []
46
95
 
47
96
  @staticmethod
@@ -51,155 +100,348 @@ class IMDbAPI:
51
100
  response.raise_for_status()
52
101
  return response.json().get("titles", [])
53
102
  except Exception as e:
54
- debug(f"Request on IMDbAPI failed: {e}")
103
+ debug(f"IMDbAPI search_titles failed: {e}")
55
104
  return []
56
105
 
57
106
 
58
- class IMDbWeb:
59
- """Handles fallback interactions by scraping imdb.com"""
60
- BASE_URL = "https://www.imdb.com"
107
+ class IMDbCDN:
108
+ """Tier 2: v2.sg.media-imdb.com - Fast fallback for English data."""
109
+ CDN_URL = "https://v2.sg.media-imdb.com/suggestion"
61
110
 
62
111
  @staticmethod
63
- def get_poster(imdb_id, user_agent):
64
- headers = {'User-Agent': user_agent}
112
+ def _get_cdn_data(imdb_id, language, user_agent):
65
113
  try:
66
- request = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10).text
67
- soup = BeautifulSoup(request, "html.parser")
68
- poster_set = soup.find('div', class_='ipc-poster').div.img["srcset"]
69
- poster_links = [x for x in poster_set.split(" ") if len(x) > 10]
70
- return poster_links[-1]
71
- except Exception as e:
72
- debug(f"Could not get poster title for {imdb_id} from IMDb: {e}")
73
- return None
114
+ if not imdb_id or len(imdb_id) < 2:
115
+ return None
74
116
 
75
- @staticmethod
76
- def get_localized_title(imdb_id, language, user_agent):
77
- headers = {
78
- 'Accept-Language': language,
79
- 'User-Agent': user_agent
80
- }
81
- try:
82
- response = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10)
117
+ headers = {
118
+ 'Accept-Language': f'{language},en;q=0.9',
119
+ 'User-Agent': user_agent,
120
+ 'Accept': 'application/json'
121
+ }
122
+
123
+ first_char = imdb_id[0].lower()
124
+ url = f"{IMDbCDN.CDN_URL}/{first_char}/{imdb_id}.json"
125
+
126
+ response = requests.get(url, headers=headers, timeout=5)
83
127
  response.raise_for_status()
84
128
 
85
- match = re.search(r'<title>(.*?) \(.*?</title>', response.text)
86
- if not match:
87
- match = re.search(r'<title>(.*?) - IMDb</title>', response.text)
129
+ data = response.json()
130
+
131
+ if "d" in data and len(data["d"]) > 0:
132
+ for entry in data["d"]:
133
+ if entry.get("id") == imdb_id:
134
+ return entry
135
+ return data["d"][0]
88
136
 
89
- if match:
90
- return match.group(1)
91
137
  except Exception as e:
92
- info(f"Error loading IMDb metadata for {imdb_id}: {e}")
138
+ debug(f"IMDbCDN request failed for {imdb_id}: {e}")
139
+
140
+ return None
93
141
 
142
+ @staticmethod
143
+ def get_poster(imdb_id, user_agent):
144
+ data = IMDbCDN._get_cdn_data(imdb_id, 'en', user_agent)
145
+ if data:
146
+ image_node = data.get("i")
147
+ if image_node and "imageUrl" in image_node:
148
+ return image_node["imageUrl"]
149
+ return None
150
+
151
+ @staticmethod
152
+ def get_title(imdb_id, user_agent):
153
+ """Returns the English title from CDN."""
154
+ data = IMDbCDN._get_cdn_data(imdb_id, 'en', user_agent)
155
+ if data and "l" in data:
156
+ return data["l"]
94
157
  return None
95
158
 
96
159
  @staticmethod
97
160
  def search_titles(query, ttype, language, user_agent):
98
- headers = {
99
- 'Accept-Language': language,
100
- 'User-Agent': user_agent
101
- }
102
161
  try:
103
- results = requests.get(f"{IMDbWeb.BASE_URL}/find/?q={quote(query)}&s=tt&ttype={ttype}&ref_=fn_{ttype}",
104
- headers=headers, timeout=10)
162
+ clean_query = quote(query.lower().replace(" ", "_"))
163
+ if not clean_query: return []
164
+
165
+ headers = {
166
+ 'Accept-Language': f'{language},en;q=0.9',
167
+ 'User-Agent': user_agent
168
+ }
169
+
170
+ first_char = clean_query[0]
171
+ url = f"{IMDbCDN.CDN_URL}/{first_char}/{clean_query}.json"
172
+
173
+ response = requests.get(url, headers=headers, timeout=5)
174
+
175
+ if response.status_code == 200:
176
+ data = response.json()
177
+ results = []
178
+ if "d" in data:
179
+ for item in data["d"]:
180
+ results.append({
181
+ 'id': item.get('id'),
182
+ 'titleNameText': item.get('l'),
183
+ 'titleReleaseText': item.get('y')
184
+ })
185
+ return results
105
186
 
106
- if results.status_code == 200:
107
- soup = BeautifulSoup(results.text, "html.parser")
108
- props = soup.find("script", text=re.compile("props"))
109
- if props:
110
- details = loads(props.string)
111
- return details['props']['pageProps']['titleResults']['results']
112
- else:
113
- debug(f"Request on IMDb failed: {results.status_code}")
114
187
  except Exception as e:
115
- debug(f"IMDb scraping fallback failed: {e}")
188
+ from quasarr.providers.log import debug
189
+ debug(f"IMDb CDN search failed: {e}")
116
190
 
117
191
  return []
118
192
 
119
193
 
120
- class TitleCleaner:
121
- @staticmethod
122
- def sanitize(title):
123
- if not title:
124
- return ""
125
- sanitized_title = html.unescape(title)
126
- sanitized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', sanitized_title).strip()
127
- sanitized_title = sanitized_title.replace(" - ", "-")
128
- sanitized_title = re.sub(r'\s{2,}', ' ', sanitized_title)
129
- return sanitized_title
194
+ class IMDbFlareSolverr:
195
+ """Tier 3: FlareSolverr - Robust fallback using browser automation."""
196
+ WEB_URL = "https://www.imdb.com"
130
197
 
131
198
  @staticmethod
132
- def clean(title):
199
+ def _request(url):
200
+ flaresolverr_url = _get_config('FlareSolverr').get('url')
201
+ flaresolverr_skipped = _get_db("skip_flaresolverr").retrieve("skipped")
202
+
203
+ if not flaresolverr_url or flaresolverr_skipped:
204
+ return None
205
+
133
206
  try:
134
- # Regex to find the title part before common release tags
135
- # Stops at:
136
- # - Year (19xx or 20xx) preceded by a separator
137
- # - Language tags (.German, .GERMAN)
138
- # - Resolution (.1080p, .720p, etc.)
139
- # - Season info (.S01)
140
- pattern = r"(.*?)(?:[\.\s](?!19|20)\d{2}|[\.\s]German|[\.\s]GERMAN|[\.\s]\d{3,4}p|[\.\s]S(?:\d{1,3}))"
141
- match = re.search(pattern, title)
142
- if match:
143
- extracted_title = match.group(1)
144
- else:
145
- extracted_title = title
207
+ post_data = {
208
+ "cmd": "request.get",
209
+ "url": url,
210
+ "maxTimeout": 60000,
211
+ }
212
+
213
+ response = requests.post(flaresolverr_url, json=post_data, headers={"Content-Type": "application/json"},
214
+ timeout=60)
215
+ if response.status_code == 200:
216
+ json_response = response.json()
217
+ if json_response.get("status") == "ok":
218
+ return json_response.get("solution", {}).get("response", "")
219
+ except Exception as e:
220
+ debug(f"FlareSolverr request failed for {url}: {e}")
146
221
 
147
- # Remove specific tags that might appear in the title part
148
- tags_to_remove = [
149
- r'[\.\s]UNRATED.*', r'[\.\s]Unrated.*', r'[\.\s]Uncut.*', r'[\.\s]UNCUT.*',
150
- r'[\.\s]Directors[\.\s]Cut.*', r'[\.\s]Final[\.\s]Cut.*', r'[\.\s]DC.*',
151
- r'[\.\s]REMASTERED.*', r'[\.\s]EXTENDED.*', r'[\.\s]Extended.*',
152
- r'[\.\s]Theatrical.*', r'[\.\s]THEATRICAL.*'
153
- ]
222
+ return None
154
223
 
155
- clean_title = extracted_title
156
- for tag in tags_to_remove:
157
- clean_title = re.sub(tag, "", clean_title, flags=re.IGNORECASE)
224
+ @staticmethod
225
+ def get_poster(imdb_id):
226
+ html_content = IMDbFlareSolverr._request(f"{IMDbFlareSolverr.WEB_URL}/title/{imdb_id}/")
227
+ if html_content:
228
+ try:
229
+ soup = BeautifulSoup(html_content, "html.parser")
230
+ poster_div = soup.find('div', class_='ipc-poster')
231
+ if poster_div and poster_div.div and poster_div.div.img:
232
+ poster_set = poster_div.div.img.get("srcset")
233
+ if poster_set:
234
+ poster_links = [x for x in poster_set.split(" ") if len(x) > 10]
235
+ return poster_links[-1]
236
+ except Exception as e:
237
+ debug(f"FlareSolverr poster parsing failed: {e}")
238
+ return None
158
239
 
159
- clean_title = clean_title.replace(".", " ").strip()
160
- clean_title = re.sub(r'\s+', ' ', clean_title) # Remove multiple spaces
161
- clean_title = clean_title.replace(" ", "+")
240
+ @staticmethod
241
+ def get_localized_title(imdb_id, language):
242
+ # FlareSolverr doesn't reliably support headers for localization.
243
+ # Instead, we scrape the release info page which lists AKAs.
244
+ url = f"{IMDbFlareSolverr.WEB_URL}/title/{imdb_id}/releaseinfo"
245
+ html_content = IMDbFlareSolverr._request(url)
246
+
247
+ if html_content:
248
+ try:
249
+ soup = BeautifulSoup(html_content, "html.parser")
250
+
251
+ # Map language codes to country names commonly used in IMDb AKAs
252
+ country_map = {
253
+ 'de': ['Germany', 'Austria', 'Switzerland', 'West Germany'],
254
+ 'fr': ['France', 'Canada', 'Belgium'],
255
+ 'es': ['Spain', 'Mexico', 'Argentina'],
256
+ 'it': ['Italy'],
257
+ 'pt': ['Portugal', 'Brazil'],
258
+ 'ru': ['Russia', 'Soviet Union'],
259
+ 'ja': ['Japan'],
260
+ 'hi': ['India']
261
+ }
262
+
263
+ target_countries = country_map.get(language, [])
264
+
265
+ # Find the AKAs list
266
+ # The structure is a list of items with country names and titles
267
+ items = soup.find_all("li", class_="ipc-metadata-list__item")
268
+
269
+ for item in items:
270
+ label_span = item.find("span", class_="ipc-metadata-list-item__label")
271
+ if not label_span:
272
+ # Sometimes it's an anchor if it's a link
273
+ label_span = item.find("a", class_="ipc-metadata-list-item__label")
274
+
275
+ if label_span:
276
+ country = label_span.get_text(strip=True)
277
+ # Check if this country matches our target language
278
+ if any(c in country for c in target_countries):
279
+ # Found a matching country, get the title
280
+ title_span = item.find("span", class_="ipc-metadata-list-item__list-content-item")
281
+ if title_span:
282
+ return title_span.get_text(strip=True)
283
+
284
+ except Exception as e:
285
+ debug(f"FlareSolverr localized title parsing failed: {e}")
162
286
 
163
- return clean_title
164
- except Exception as e:
165
- debug(f"Error cleaning title '{title}': {e}")
166
- return title
287
+ return None
288
+
289
+ @staticmethod
290
+ def search_titles(query, ttype):
291
+ url = f"{IMDbFlareSolverr.WEB_URL}/find/?q={quote(query)}&s=tt&ttype={ttype}&ref_=fn_{ttype}"
292
+ html_content = IMDbFlareSolverr._request(url)
293
+
294
+ if html_content:
295
+ try:
296
+ soup = BeautifulSoup(html_content, "html.parser")
297
+ props = soup.find("script", text=re.compile("props"))
298
+ if props:
299
+ details = loads(props.string)
300
+ results = details['props']['pageProps']['titleResults']['results']
301
+ mapped_results = []
302
+ for result in results:
303
+ try:
304
+ mapped_results.append({
305
+ 'id': result["listItem"]["titleId"],
306
+ 'titleNameText': result["listItem"]["titleText"],
307
+ 'titleReleaseText': result["listItem"].get("releaseYear")
308
+ })
309
+ except KeyError:
310
+ mapped_results.append({
311
+ 'id': result.get('id'),
312
+ 'titleNameText': result.get("titleNameText"),
313
+ 'titleReleaseText': result.get("titleReleaseText")
314
+ })
315
+ return mapped_results
316
+
317
+ results = []
318
+ items = soup.find_all("li", class_="ipc-metadata-list-summary-item")
319
+ for item in items:
320
+ a_tag = item.find("a", class_="ipc-metadata-list-summary-item__t")
321
+ if a_tag:
322
+ href = a_tag.get("href", "")
323
+ id_match = re.search(r"(tt\d+)", href)
324
+ if id_match:
325
+ results.append({
326
+ 'id': id_match.group(1),
327
+ 'titleNameText': a_tag.get_text(strip=True),
328
+ 'titleReleaseText': ""
329
+ })
330
+ return results
331
+
332
+ except Exception as e:
333
+ debug(f"FlareSolverr search parsing failed: {e}")
334
+ return []
335
+
336
+
337
+ # =============================================================================
338
+ # Main Functions (Chain of Responsibility)
339
+ # =============================================================================
340
+
341
+ def _update_cache(imdb_id, key, value, language=None):
342
+ db = _get_db("imdb_metadata")
343
+ try:
344
+ cached_data = db.retrieve(imdb_id)
345
+ if cached_data:
346
+ metadata = loads(cached_data)
347
+ else:
348
+ metadata = {
349
+ "title": None,
350
+ "year": None,
351
+ "poster_link": None,
352
+ "localized": {},
353
+ "ttl": 0
354
+ }
355
+
356
+ if key == "localized" and language:
357
+ if "localized" not in metadata or not isinstance(metadata["localized"], dict):
358
+ metadata["localized"] = {}
359
+ metadata["localized"][language] = value
360
+ else:
361
+ metadata[key] = value
362
+
363
+ now = datetime.now().timestamp()
364
+ days = 7 if metadata.get("title") and metadata.get("year") else 1
365
+ metadata["ttl"] = now + timedelta(days=days).total_seconds()
366
+
367
+ db.update_store(imdb_id, dumps(metadata))
368
+ except Exception as e:
369
+ debug(f"Error updating IMDb metadata cache for {imdb_id}: {e}")
167
370
 
168
371
 
169
372
  def get_poster_link(shared_state, imdb_id):
373
+ # 0. Check Cache (via get_imdb_metadata)
170
374
  imdb_metadata = get_imdb_metadata(imdb_id)
171
- if imdb_metadata:
172
- poster_link = imdb_metadata.get("poster_link")
173
- if poster_link:
174
- return poster_link
375
+ if imdb_metadata and imdb_metadata.get("poster_link"):
376
+ return imdb_metadata.get("poster_link")
175
377
 
176
- poster_link = None
177
- if imdb_id:
178
- poster_link = IMDbWeb.get_poster(imdb_id, shared_state.values["user_agent"])
378
+ user_agent = shared_state.values["user_agent"]
179
379
 
180
- if not poster_link:
181
- debug(f"Could not get poster title for {imdb_id} from IMDb")
380
+ poster = IMDbCDN.get_poster(imdb_id, user_agent)
381
+ if poster:
382
+ _update_cache(imdb_id, "poster_link", poster)
383
+ return poster
182
384
 
183
- return poster_link
385
+ poster = IMDbFlareSolverr.get_poster(imdb_id)
386
+ if poster:
387
+ _update_cache(imdb_id, "poster_link", poster)
388
+ return poster
389
+
390
+ debug(f"Could not get poster title for {imdb_id}")
391
+ return None
392
+
393
+
394
+ def get_localized_title(shared_state, imdb_id, language='de'):
395
+ # 0. Check Cache (via get_imdb_metadata)
396
+ imdb_metadata = get_imdb_metadata(imdb_id)
397
+ if imdb_metadata:
398
+ localized = imdb_metadata.get("localized", {}).get(language)
399
+ if localized: return localized
400
+ if language == 'en' and imdb_metadata.get("title"):
401
+ return imdb_metadata.get("title")
402
+
403
+ user_agent = shared_state.values["user_agent"]
404
+
405
+ if language == 'en':
406
+ title = IMDbCDN.get_title(imdb_id, user_agent)
407
+ if title:
408
+ sanitized_title = TitleCleaner.sanitize(title)
409
+ _update_cache(imdb_id, "title", sanitized_title)
410
+ return sanitized_title
411
+
412
+ title = IMDbFlareSolverr.get_localized_title(imdb_id, language)
413
+ if title:
414
+ sanitized_title = TitleCleaner.sanitize(title)
415
+ _update_cache(imdb_id, "localized", sanitized_title, language)
416
+ return sanitized_title
417
+
418
+ # Final fallback: Try CDN for English title if localization failed
419
+ title = IMDbCDN.get_title(imdb_id, user_agent)
420
+ if title:
421
+ sanitized_title = TitleCleaner.sanitize(title)
422
+ _update_cache(imdb_id, "title", sanitized_title)
423
+ return sanitized_title
424
+
425
+ debug(f"Could not get localized title for {imdb_id} in {language}")
426
+ return None
184
427
 
185
428
 
186
429
  def get_imdb_metadata(imdb_id):
187
430
  db = _get_db("imdb_metadata")
188
431
  now = datetime.now().timestamp()
189
-
190
- # Try to load from DB
191
432
  cached_metadata = None
433
+
434
+ # 0. Check Cache
192
435
  try:
193
436
  cached_data = db.retrieve(imdb_id)
194
437
  if cached_data:
195
438
  cached_metadata = loads(cached_data)
196
- # If valid, update TTL and return
197
439
  if cached_metadata.get("ttl") and cached_metadata["ttl"] > now:
198
440
  return cached_metadata
199
441
  except Exception as e:
200
442
  debug(f"Error retrieving IMDb metadata from DB for {imdb_id}: {e}")
443
+ cached_metadata = None
201
444
 
202
- # Initialize new metadata structure
203
445
  imdb_metadata = {
204
446
  "title": None,
205
447
  "year": None,
@@ -208,66 +450,44 @@ def get_imdb_metadata(imdb_id):
208
450
  "ttl": 0
209
451
  }
210
452
 
211
- # Fetch from API
453
+ # 1. Try API
212
454
  response_json = IMDbAPI.get_title(imdb_id)
213
455
 
214
- if not response_json:
215
- # API failed. If we have stale cached data, return it as fallback
216
- if cached_metadata:
217
- debug(f"IMDb API failed for {imdb_id}, returning stale cached data.")
218
- return cached_metadata
219
- return imdb_metadata
220
-
221
- # Process API response
222
- imdb_metadata["title"] = TitleCleaner.sanitize(response_json.get("primaryTitle", ""))
223
- imdb_metadata["year"] = response_json.get("startYear")
224
- imdb_metadata["ttl"] = now + timedelta(days=7).total_seconds()
225
-
226
- try:
227
- imdb_metadata["poster_link"] = response_json.get("primaryImage").get("url")
228
- except Exception as e:
229
- debug(f"Could not find poster link for {imdb_id} from imdbapi.dev: {e}")
230
- # Shorten TTL if data is incomplete
231
- imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
232
-
233
- akas = IMDbAPI.get_akas(imdb_id)
234
- if akas:
235
- for aka in akas:
236
- if aka.get("language"):
237
- continue # skip entries with specific language tags
238
- if aka.get("country", {}).get("code", "").lower() == "de":
239
- imdb_metadata["localized"]["de"] = TitleCleaner.sanitize(aka.get("text"))
240
- break
241
- else:
242
- # Shorten TTL if AKAs failed
243
- imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
456
+ if response_json:
457
+ imdb_metadata["title"] = TitleCleaner.sanitize(response_json.get("primaryTitle", ""))
458
+ imdb_metadata["year"] = response_json.get("startYear")
459
+
460
+ days = 7 if imdb_metadata.get("title") and imdb_metadata.get("year") else 1
461
+ imdb_metadata["ttl"] = now + timedelta(days=days).total_seconds()
244
462
 
245
- db.update_store(imdb_id, dumps(imdb_metadata))
246
- return imdb_metadata
247
-
248
-
249
- def get_year(imdb_id):
250
- imdb_metadata = get_imdb_metadata(imdb_id)
251
- if imdb_metadata:
252
- return imdb_metadata.get("year")
253
- return None
463
+ try:
464
+ imdb_metadata["poster_link"] = response_json.get("primaryImage").get("url")
465
+ except:
466
+ pass
467
+
468
+ akas = IMDbAPI.get_akas(imdb_id)
469
+ if akas:
470
+ for aka in akas:
471
+ if aka.get("language"): continue
472
+ if aka.get("country", {}).get("code", "").lower() == "de":
473
+ imdb_metadata["localized"]["de"] = TitleCleaner.sanitize(aka.get("text"))
474
+ break
254
475
 
476
+ db.update_store(imdb_id, dumps(imdb_metadata))
477
+ return imdb_metadata
255
478
 
256
- def get_localized_title(shared_state, imdb_id, language='de'):
257
- imdb_metadata = get_imdb_metadata(imdb_id)
258
- if imdb_metadata:
259
- localized_title = imdb_metadata.get("localized").get(language)
260
- if localized_title:
261
- return localized_title
262
- return imdb_metadata.get("title")
479
+ # API Failed. If we have stale cache, return it.
480
+ if cached_metadata:
481
+ return cached_metadata
263
482
 
264
- localized_title = IMDbWeb.get_localized_title(imdb_id, language, shared_state.values["user_agent"])
483
+ # 2. Fallback: Try CDN for basic info (English title, Year, Poster)
484
+ # We can't get localized titles from CDN, but we can get the rest.
485
+ # We need a user agent, but this function doesn't receive shared_state.
486
+ # We'll skip CDN fallback here to avoid circular deps or complexity,
487
+ # as get_poster_link and get_localized_title handle their own fallbacks.
488
+ # But to populate the DB, we could try. For now, return empty/partial if API fails.
265
489
 
266
- if not localized_title:
267
- debug(f"Could not get localized title for {imdb_id} in {language} from IMDb")
268
- else:
269
- localized_title = TitleCleaner.sanitize(localized_title)
270
- return localized_title
490
+ return imdb_metadata
271
491
 
272
492
 
273
493
  def get_imdb_id_from_title(shared_state, title, language="de"):
@@ -282,72 +502,76 @@ def get_imdb_id_from_title(shared_state, title, language="de"):
282
502
 
283
503
  title = TitleCleaner.clean(title)
284
504
 
285
- # Check Search Cache (DB)
505
+ # 0. Check Search Cache
286
506
  db = _get_db("imdb_searches")
287
507
  try:
288
508
  cached_data = db.retrieve(title)
289
509
  if cached_data:
290
510
  data = loads(cached_data)
291
- # Check TTL (48 hours)
292
511
  if data.get("timestamp") and datetime.fromtimestamp(data["timestamp"]) > datetime.now() - timedelta(
293
512
  hours=48):
294
513
  return data.get("imdb_id")
295
- except Exception as e:
296
- debug(f"Error retrieving search cache for {title}: {e}")
514
+ except Exception:
515
+ pass
297
516
 
298
- # Try IMDbAPI.dev first
517
+ user_agent = shared_state.values["user_agent"]
518
+
519
+ # 1. Try API
299
520
  search_results = IMDbAPI.search_titles(title)
300
521
  if search_results:
301
- for result in search_results:
302
- found_title = result.get("primaryTitle")
303
- found_id = result.get("id")
304
- found_type = result.get("type")
522
+ imdb_id = _match_result(shared_state, title, search_results, ttype_api, is_api=True)
305
523
 
306
- # Basic type filtering if possible from result data
307
- if ttype_api == "TV_SERIES" and found_type not in ["tvSeries", "tvMiniSeries"]:
308
- continue
309
- if ttype_api == "MOVIE" and found_type not in ["movie", "tvMovie"]:
310
- continue
311
-
312
- if shared_state.search_string_in_sanitized_title(title, found_title):
313
- imdb_id = found_id
314
- break
315
-
316
- # If no exact match found with type filtering, try relaxed matching
317
- if not imdb_id:
318
- for result in search_results:
319
- found_title = result.get("primaryTitle")
320
- found_id = result.get("id")
321
- if shared_state.search_string_in_sanitized_title(title, found_title):
322
- imdb_id = found_id
323
- break
524
+ # 2. Try CDN (Fallback)
525
+ if not imdb_id:
526
+ search_results = IMDbCDN.search_titles(title, ttype_web, language, user_agent)
527
+ if search_results:
528
+ imdb_id = _match_result(shared_state, title, search_results, ttype_api, is_api=False)
324
529
 
325
- # Fallback to IMDb scraping if API failed or returned no results
530
+ # 3. Try FlareSolverr (Last Resort)
326
531
  if not imdb_id:
327
- search_results = IMDbWeb.search_titles(title, ttype_web, language, shared_state.values["user_agent"])
532
+ search_results = IMDbFlareSolverr.search_titles(title, ttype_web)
328
533
  if search_results:
329
- for result in search_results:
330
- try:
331
- found_title = result["listItem"]["titleText"]
332
- found_id = result["listItem"]["titleId"]
333
- except KeyError:
334
- found_title = result["titleNameText"]
335
- found_id = result['id']
336
-
337
- if shared_state.search_string_in_sanitized_title(title, found_title):
338
- imdb_id = found_id
339
- break
534
+ imdb_id = _match_result(shared_state, title, search_results, ttype_api, is_api=False)
340
535
 
341
- # Update Search Cache
536
+ # Update Cache
342
537
  try:
343
538
  db.update_store(title, dumps({
344
539
  "imdb_id": imdb_id,
345
540
  "timestamp": datetime.now().timestamp()
346
541
  }))
347
- except Exception as e:
348
- debug(f"Error updating search cache for {title}: {e}")
542
+ except Exception:
543
+ pass
349
544
 
350
545
  if not imdb_id:
351
546
  debug(f"No IMDb-ID found for {title}")
352
547
 
353
548
  return imdb_id
549
+
550
+
551
+ def _match_result(shared_state, title, results, ttype_api, is_api=False):
552
+ for result in results:
553
+ found_title = result.get("primaryTitle") if is_api else result.get("titleNameText")
554
+ found_id = result.get("id")
555
+
556
+ if is_api:
557
+ found_type = result.get("type")
558
+ if ttype_api == "TV_SERIES" and found_type not in ["tvSeries", "tvMiniSeries"]: continue
559
+ if ttype_api == "MOVIE" and found_type not in ["movie", "tvMovie"]: continue
560
+
561
+ if shared_state.search_string_in_sanitized_title(title, found_title):
562
+ return found_id
563
+
564
+ for result in results:
565
+ found_title = result.get("primaryTitle") if is_api else result.get("titleNameText")
566
+ found_id = result.get("id")
567
+ if shared_state.search_string_in_sanitized_title(title, found_title):
568
+ return found_id
569
+
570
+ return None
571
+
572
+
573
+ def get_year(imdb_id):
574
+ imdb_metadata = get_imdb_metadata(imdb_id)
575
+ if imdb_metadata:
576
+ return imdb_metadata.get("year")
577
+ return None
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "2.3.1"
11
+ return "2.3.3"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.3.1
3
+ Version: 2.3.3
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -1,10 +1,10 @@
1
- quasarr/__init__.py,sha256=eGJ6-vv6yQBq04TyeZ2j7EW0i-UW21Khbqoj5JJktoo,14759
1
+ quasarr/__init__.py,sha256=QRu_dlfLdToYkeO96bHaA8Kp0GmSL0IZc9ceRSxKWS0,15766
2
2
  quasarr/api/__init__.py,sha256=KLnFSe5l3MrVgrbu6-7GlE2PqouVyizqiRZfQkBtge0,19587
3
3
  quasarr/api/arr/__init__.py,sha256=eEop8A5t936uT5azn4qz0bq1DMX84_Ja16wyleGFhyM,18495
4
4
  quasarr/api/captcha/__init__.py,sha256=Mqg2HhWMaUc07cVaEYHAbf-YvnxkiYVbkWT-g92J-2k,72960
5
- quasarr/api/config/__init__.py,sha256=kIGCHtKTUovOHe9xMEdz-6_psCmx6aFoyrTP-jJah0s,14187
5
+ quasarr/api/config/__init__.py,sha256=q-7vK5YULrSDgTicho--bNK8aAhcbzCdhhNwEwUEwWg,14173
6
6
  quasarr/api/packages/__init__.py,sha256=ox0vzuXByag49RUEwYPWtMacsXl_iksvubHgDmG5RWQ,25192
7
- quasarr/api/sponsors_helper/__init__.py,sha256=vZIFGkc5HTRozjvi47tqxz6XpwDe8sDXVyeydc9k0Y0,6708
7
+ quasarr/api/sponsors_helper/__init__.py,sha256=1NREbllGaWFfFtnwixR936qBJmlAkqCupHtWrphpe5A,6137
8
8
  quasarr/api/statistics/__init__.py,sha256=0Os2rbqQ8ZN3R0XAavGVHlacKsAjp7GYjEIJCwvnsl8,7063
9
9
  quasarr/downloads/__init__.py,sha256=ikoHK5C8veDiU4M3eoDaUjFl0pYPSa91_7h65qEFiUM,16435
10
10
  quasarr/downloads/linkcrypters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -35,7 +35,7 @@ quasarr/providers/cloudflare.py,sha256=oUDR7OQ8E-8vCtagZLnIS2ZZV3ERffhxmW0njKKbt
35
35
  quasarr/providers/hostname_issues.py,sha256=9PJFIosLB-bMTmgWlR5-sYAmcyps7TDoSYjoL9cw9TE,1460
36
36
  quasarr/providers/html_images.py,sha256=rrovPNl-FTTKKA-4HCPEhsYpq5b20VDrsB7t4RrQf3w,15531
37
37
  quasarr/providers/html_templates.py,sha256=IGWwt78bP2oJx4VzOP6w9zp7KVXgDY6Qz5ySL9cLGWI,15815
38
- quasarr/providers/imdb_metadata.py,sha256=SzSBNIwkXhtdYb4mIZuEKjzEORivQMFzfZ81bmPu9mQ,12424
38
+ quasarr/providers/imdb_metadata.py,sha256=a_kn9lw5cj5ZbxtrRBQKyF78ctMgHJJTW0DF2DONWOY,20771
39
39
  quasarr/providers/jd_cache.py,sha256=mSvMrs3UwTn3sd9yGSJKGT-qwYeyYKC_l8whpXTVn7s,13530
40
40
  quasarr/providers/log.py,sha256=_g5RwtfuksARXnvryhsngzoJyFcNzj6suqd3ndqZM0Y,313
41
41
  quasarr/providers/myjd_api.py,sha256=Z3PEiO3c3UfDSr4Up5rgwTAnjloWHb-H1RkJ6BLKZv8,34140
@@ -44,7 +44,7 @@ quasarr/providers/obfuscated.py,sha256=EYm_7SfdJd9ae_m4HZgY9ruDXC5J9hb4KEV_WAnk-
44
44
  quasarr/providers/shared_state.py,sha256=5a_ZbGqTvt4-OqBt2a1WtR9I5J_Ky7IlkEY8EGtKVu8,30646
45
45
  quasarr/providers/statistics.py,sha256=cEQixYnDMDqtm5wWe40E_2ucyo4mD0n3SrfelhQi1L8,6452
46
46
  quasarr/providers/utils.py,sha256=mcUPbcXMsLmrYv0CTZO5a9aOt2-JLyL3SZxu6N8OyjU,12075
47
- quasarr/providers/version.py,sha256=YBZUEkS1b2l_SBRvs7Uewcvwa3W2kphVx1dzjGi0INs,4003
47
+ quasarr/providers/version.py,sha256=7ylGTRMzi1TRneo7pwdAqP2unQYxH49tCK0kqbRUFyU,4003
48
48
  quasarr/providers/web_server.py,sha256=AYd0KRxdDWMBr87BP8wlSMuL4zZo0I_rY-vHBai6Pfg,1688
49
49
  quasarr/providers/sessions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  quasarr/providers/sessions/al.py,sha256=AQ59vVU7uQSuwZLNppNsZAFvpow3zcxQ29dirPbyYc4,13432
@@ -74,9 +74,9 @@ quasarr/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
74
74
  quasarr/storage/config.py,sha256=SSTgIce2FVYoVTK_6OCU3msknhxuLA3EC4Kcrrf_dxQ,6378
75
75
  quasarr/storage/setup.py,sha256=Cbo0phZbC6JP2wx_qER3vpaLSTDLbKEfdXj6KoAMkWw,47403
76
76
  quasarr/storage/sqlite_database.py,sha256=yMqFQfKf0k7YS-6Z3_7pj4z1GwWSXJ8uvF4IydXsuTE,3554
77
- quasarr-2.3.1.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
78
- quasarr-2.3.1.dist-info/METADATA,sha256=H3UXdXSHs5DONX6kG-3RXSTZnjzSZY0Ci9PBxEbU3Ww,15024
79
- quasarr-2.3.1.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
80
- quasarr-2.3.1.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
81
- quasarr-2.3.1.dist-info/top_level.txt,sha256=dipJdaRda5ruTZkoGfZU60bY4l9dtPlmOWwxK_oGSF0,8
82
- quasarr-2.3.1.dist-info/RECORD,,
77
+ quasarr-2.3.3.dist-info/licenses/LICENSE,sha256=QQFCAfDgt7lSA8oSWDHIZ9aTjFbZaBJdjnGOHkuhK7k,1060
78
+ quasarr-2.3.3.dist-info/METADATA,sha256=n02_HYJ7qpsk6QSoc7Qq6zIT3QQAsq7tlzBg15MNE1g,15024
79
+ quasarr-2.3.3.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
80
+ quasarr-2.3.3.dist-info/entry_points.txt,sha256=gXi8mUKsIqKVvn-bOc8E5f04sK_KoMCC-ty6b2Hf-jc,40
81
+ quasarr-2.3.3.dist-info/top_level.txt,sha256=dipJdaRda5ruTZkoGfZU60bY4l9dtPlmOWwxK_oGSF0,8
82
+ quasarr-2.3.3.dist-info/RECORD,,