quasarr 2.2.0__tar.gz → 2.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (89) hide show
  1. {quasarr-2.2.0 → quasarr-2.3.0}/PKG-INFO +1 -1
  2. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/__init__.py +38 -29
  3. quasarr-2.3.0/quasarr/providers/imdb_metadata.py +355 -0
  4. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/version.py +1 -1
  5. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/__init__.py +5 -0
  6. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/he.py +7 -1
  7. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/nk.py +4 -1
  8. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/PKG-INFO +1 -1
  9. quasarr-2.2.0/quasarr/providers/imdb_metadata.py +0 -142
  10. {quasarr-2.2.0 → quasarr-2.3.0}/LICENSE +0 -0
  11. {quasarr-2.2.0 → quasarr-2.3.0}/README.md +0 -0
  12. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/__init__.py +0 -0
  13. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/arr/__init__.py +0 -0
  14. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/captcha/__init__.py +0 -0
  15. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/config/__init__.py +0 -0
  16. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/packages/__init__.py +0 -0
  17. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/sponsors_helper/__init__.py +0 -0
  18. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/api/statistics/__init__.py +0 -0
  19. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/__init__.py +0 -0
  20. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/linkcrypters/__init__.py +0 -0
  21. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/linkcrypters/al.py +0 -0
  22. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/linkcrypters/filecrypt.py +0 -0
  23. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/linkcrypters/hide.py +0 -0
  24. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/packages/__init__.py +0 -0
  25. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/__init__.py +0 -0
  26. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/al.py +0 -0
  27. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/by.py +0 -0
  28. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/dd.py +0 -0
  29. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/dj.py +0 -0
  30. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/dl.py +0 -0
  31. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/dt.py +0 -0
  32. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/dw.py +0 -0
  33. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/he.py +0 -0
  34. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/mb.py +0 -0
  35. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/nk.py +0 -0
  36. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/nx.py +0 -0
  37. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/sf.py +0 -0
  38. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/sj.py +0 -0
  39. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/sl.py +0 -0
  40. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/wd.py +0 -0
  41. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/downloads/sources/wx.py +0 -0
  42. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/__init__.py +0 -0
  43. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/auth.py +0 -0
  44. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/cloudflare.py +0 -0
  45. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/hostname_issues.py +0 -0
  46. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/html_images.py +0 -0
  47. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/html_templates.py +0 -0
  48. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/jd_cache.py +0 -0
  49. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/log.py +0 -0
  50. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/myjd_api.py +0 -0
  51. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/notifications.py +0 -0
  52. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/obfuscated.py +0 -0
  53. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/sessions/__init__.py +0 -0
  54. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/sessions/al.py +0 -0
  55. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/sessions/dd.py +0 -0
  56. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/sessions/dl.py +0 -0
  57. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/sessions/nx.py +0 -0
  58. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/shared_state.py +0 -0
  59. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/statistics.py +0 -0
  60. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/utils.py +0 -0
  61. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/providers/web_server.py +0 -0
  62. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/__init__.py +0 -0
  63. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/al.py +0 -0
  64. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/by.py +0 -0
  65. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/dd.py +0 -0
  66. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/dj.py +0 -0
  67. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/dl.py +0 -0
  68. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/dt.py +0 -0
  69. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/dw.py +0 -0
  70. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/fx.py +0 -0
  71. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/mb.py +0 -0
  72. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/nx.py +0 -0
  73. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/sf.py +0 -0
  74. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/sj.py +0 -0
  75. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/sl.py +0 -0
  76. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/wd.py +0 -0
  77. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/search/sources/wx.py +0 -0
  78. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/storage/__init__.py +0 -0
  79. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/storage/config.py +0 -0
  80. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/storage/setup.py +0 -0
  81. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr/storage/sqlite_database.py +0 -0
  82. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/SOURCES.txt +0 -0
  83. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/dependency_links.txt +0 -0
  84. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/entry_points.txt +0 -0
  85. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/not-zip-safe +0 -0
  86. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/requires.txt +0 -0
  87. {quasarr-2.2.0 → quasarr-2.3.0}/quasarr.egg-info/top_level.txt +0 -0
  88. {quasarr-2.2.0 → quasarr-2.3.0}/setup.cfg +0 -0
  89. {quasarr-2.2.0 → quasarr-2.3.0}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.2.0
3
+ Version: 2.3.0
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -100,38 +100,12 @@ def run():
100
100
  shared_state.update("database", DataBase)
101
101
  supported_hostnames = extract_allowed_keys(Config._DEFAULT_CONFIG, 'Hostnames')
102
102
  shared_state.update("sites", [key.upper() for key in supported_hostnames])
103
- shared_state.update("user_agent", "") # will be set by FlareSolverr or fallback
103
+ # Set fallback user agent immediately so it's available while background check runs
104
+ shared_state.update("user_agent", FALLBACK_USER_AGENT)
104
105
  shared_state.update("helper_active", False)
105
106
 
106
107
  print(f'Config path: "{config_path}"')
107
108
 
108
- # Check if FlareSolverr was previously skipped
109
- skip_flaresolverr_db = DataBase("skip_flaresolverr")
110
- flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
111
-
112
- flaresolverr_url = Config('FlareSolverr').get('url')
113
- if not flaresolverr_url and not flaresolverr_skipped:
114
- flaresolverr_config(shared_state)
115
- # Re-check after config - user may have skipped
116
- flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
117
- flaresolverr_url = Config('FlareSolverr').get('url')
118
-
119
- if flaresolverr_skipped:
120
- info('FlareSolverr setup skipped by user preference')
121
- info('Some sites (AL) will not work without FlareSolverr. Configure it later in the web UI.')
122
- # Set fallback user agent
123
- shared_state.update("user_agent", FALLBACK_USER_AGENT)
124
- print(f'User Agent (fallback): "{FALLBACK_USER_AGENT}"')
125
- elif flaresolverr_url:
126
- print(f'Flaresolverr URL: "{flaresolverr_url}"')
127
- flaresolverr_check = check_flaresolverr(shared_state, flaresolverr_url)
128
- if flaresolverr_check:
129
- print(f'User Agent: "{shared_state.values["user_agent"]}"')
130
- else:
131
- info('FlareSolverr check failed - using fallback user agent')
132
- shared_state.update("user_agent", FALLBACK_USER_AGENT)
133
- print(f'User Agent (fallback): "{FALLBACK_USER_AGENT}"')
134
-
135
109
  print("\n===== Hostnames =====")
136
110
  try:
137
111
  if arguments.hostnames:
@@ -181,7 +155,7 @@ def run():
181
155
 
182
156
  # Check credentials for login-required hostnames
183
157
  skip_login_db = DataBase("skip_login")
184
- login_required_sites = ['al', 'dd', 'nx', 'dl']
158
+ login_required_sites = ['al', 'dd', 'dl', 'nx']
185
159
 
186
160
  for site in login_required_sites:
187
161
  hostname = Config('Hostnames').get(site)
@@ -239,6 +213,13 @@ def run():
239
213
  info(f'CAPTCHA-Solution required for {package_count} package{'s' if package_count > 1 else ''} at: '
240
214
  f'"{shared_state.values["external_address"]}/captcha"!')
241
215
 
216
+ flaresolverr = multiprocessing.Process(
217
+ target=flaresolverr_checker,
218
+ args=(shared_state_dict, shared_state_lock),
219
+ daemon=True
220
+ )
221
+ flaresolverr.start()
222
+
242
223
  jdownloader = multiprocessing.Process(
243
224
  target=jdownloader_connection,
244
225
  args=(shared_state_dict, shared_state_lock),
@@ -259,6 +240,34 @@ def run():
259
240
  sys.exit(0)
260
241
 
261
242
 
243
+ def flaresolverr_checker(shared_state_dict, shared_state_lock):
244
+ try:
245
+ shared_state.set_state(shared_state_dict, shared_state_lock)
246
+
247
+ # Check if FlareSolverr was previously skipped
248
+ skip_flaresolverr_db = DataBase("skip_flaresolverr")
249
+ flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
250
+
251
+ flaresolverr_url = Config('FlareSolverr').get('url')
252
+ if not flaresolverr_url and not flaresolverr_skipped:
253
+ flaresolverr_config(shared_state)
254
+ # Re-check after config - user may have skipped
255
+ flaresolverr_skipped = skip_flaresolverr_db.retrieve("skipped")
256
+ flaresolverr_url = Config('FlareSolverr').get('url')
257
+
258
+ if flaresolverr_skipped:
259
+ info('FlareSolverr setup skipped by user preference')
260
+ info('Some sites (AL) will not work without FlareSolverr. Configure it later in the web UI.')
261
+ elif flaresolverr_url:
262
+ print(f'Flaresolverr URL: "{flaresolverr_url}"')
263
+ flaresolverr_check = check_flaresolverr(shared_state, flaresolverr_url)
264
+ if flaresolverr_check:
265
+ print(f'Using same User-Agent as FlareSolverr: "{shared_state.values["user_agent"]}"')
266
+
267
+ except KeyboardInterrupt:
268
+ pass
269
+
270
+
262
271
  def update_checker(shared_state_dict, shared_state_lock):
263
272
  try:
264
273
  shared_state.set_state(shared_state_dict, shared_state_lock)
@@ -0,0 +1,355 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import re
7
+ from datetime import datetime, timedelta
8
+ from json import loads, dumps
9
+ from urllib.parse import quote
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.log import info, debug
15
+
16
+
17
+ def _get_db(table_name):
18
+ """Lazy import to avoid circular dependency."""
19
+ from quasarr.storage.sqlite_database import DataBase
20
+ return DataBase(table_name)
21
+
22
+
23
+ class IMDbAPI:
24
+ """Handles interactions with api.imdbapi.dev"""
25
+ BASE_URL = "https://api.imdbapi.dev"
26
+
27
+ @staticmethod
28
+ def get_title(imdb_id):
29
+ try:
30
+ response = requests.get(f"{IMDbAPI.BASE_URL}/titles/{imdb_id}", timeout=30)
31
+ response.raise_for_status()
32
+ return response.json()
33
+ except Exception as e:
34
+ info(f"Error loading imdbapi.dev for {imdb_id}: {e}")
35
+ return None
36
+
37
+ @staticmethod
38
+ def get_akas(imdb_id):
39
+ try:
40
+ response = requests.get(f"{IMDbAPI.BASE_URL}/titles/{imdb_id}/akas", timeout=30)
41
+ response.raise_for_status()
42
+ return response.json().get("akas", [])
43
+ except Exception as e:
44
+ info(f"Error loading localized titles from IMDbAPI.dev for {imdb_id}: {e}")
45
+ return []
46
+
47
+ @staticmethod
48
+ def search_titles(query):
49
+ try:
50
+ response = requests.get(f"{IMDbAPI.BASE_URL}/search/titles?query={quote(query)}&limit=5", timeout=30)
51
+ response.raise_for_status()
52
+ return response.json().get("titles", [])
53
+ except Exception as e:
54
+ debug(f"Request on IMDbAPI failed: {e}")
55
+ return []
56
+
57
+
58
+ class IMDbWeb:
59
+ """Handles fallback interactions by scraping imdb.com"""
60
+ BASE_URL = "https://www.imdb.com"
61
+
62
+ @staticmethod
63
+ def get_poster(imdb_id, user_agent):
64
+ headers = {'User-Agent': user_agent}
65
+ try:
66
+ request = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10).text
67
+ soup = BeautifulSoup(request, "html.parser")
68
+ poster_set = soup.find('div', class_='ipc-poster').div.img["srcset"]
69
+ poster_links = [x for x in poster_set.split(" ") if len(x) > 10]
70
+ return poster_links[-1]
71
+ except Exception as e:
72
+ debug(f"Could not get poster title for {imdb_id} from IMDb: {e}")
73
+ return None
74
+
75
+ @staticmethod
76
+ def get_localized_title(imdb_id, language, user_agent):
77
+ headers = {
78
+ 'Accept-Language': language,
79
+ 'User-Agent': user_agent
80
+ }
81
+ try:
82
+ response = requests.get(f"{IMDbWeb.BASE_URL}/title/{imdb_id}/", headers=headers, timeout=10)
83
+ response.raise_for_status()
84
+
85
+ match = re.search(r'<title>(.*?) \(.*?</title>', response.text)
86
+ if not match:
87
+ match = re.search(r'<title>(.*?) - IMDb</title>', response.text)
88
+
89
+ if match:
90
+ return match.group(1)
91
+ except Exception as e:
92
+ info(f"Error loading IMDb metadata for {imdb_id}: {e}")
93
+
94
+ return None
95
+
96
+ @staticmethod
97
+ def search_titles(query, ttype, language, user_agent):
98
+ headers = {
99
+ 'Accept-Language': language,
100
+ 'User-Agent': user_agent
101
+ }
102
+ try:
103
+ results = requests.get(f"{IMDbWeb.BASE_URL}/find/?q={quote(query)}&s=tt&ttype={ttype}&ref_=fn_{ttype}",
104
+ headers=headers, timeout=10)
105
+
106
+ if results.status_code == 200:
107
+ soup = BeautifulSoup(results.text, "html.parser")
108
+ props = soup.find("script", text=re.compile("props"))
109
+ if props:
110
+ details = loads(props.string)
111
+ return details['props']['pageProps']['titleResults']['results']
112
+ else:
113
+ debug(f"Request on IMDb failed: {results.status_code}")
114
+ except Exception as e:
115
+ debug(f"IMDb scraping fallback failed: {e}")
116
+
117
+ return []
118
+
119
+
120
+ class TitleCleaner:
121
+ @staticmethod
122
+ def sanitize(title):
123
+ if not title:
124
+ return ""
125
+ sanitized_title = html.unescape(title)
126
+ sanitized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', sanitized_title).strip()
127
+ sanitized_title = sanitized_title.replace(" - ", "-")
128
+ sanitized_title = re.sub(r'\s{2,}', ' ', sanitized_title)
129
+ return sanitized_title
130
+
131
+ @staticmethod
132
+ def clean(title):
133
+ try:
134
+ # Regex to find the title part before common release tags
135
+ # Stops at:
136
+ # - Year (19xx or 20xx) preceded by a separator
137
+ # - Language tags (.German, .GERMAN)
138
+ # - Resolution (.1080p, .720p, etc.)
139
+ # - Season info (.S01)
140
+ pattern = r"(.*?)(?:[\.\s](?!19|20)\d{2}|[\.\s]German|[\.\s]GERMAN|[\.\s]\d{3,4}p|[\.\s]S(?:\d{1,3}))"
141
+ match = re.search(pattern, title)
142
+ if match:
143
+ extracted_title = match.group(1)
144
+ else:
145
+ extracted_title = title
146
+
147
+ # Remove specific tags that might appear in the title part
148
+ tags_to_remove = [
149
+ r'[\.\s]UNRATED.*', r'[\.\s]Unrated.*', r'[\.\s]Uncut.*', r'[\.\s]UNCUT.*',
150
+ r'[\.\s]Directors[\.\s]Cut.*', r'[\.\s]Final[\.\s]Cut.*', r'[\.\s]DC.*',
151
+ r'[\.\s]REMASTERED.*', r'[\.\s]EXTENDED.*', r'[\.\s]Extended.*',
152
+ r'[\.\s]Theatrical.*', r'[\.\s]THEATRICAL.*'
153
+ ]
154
+
155
+ clean_title = extracted_title
156
+ for tag in tags_to_remove:
157
+ clean_title = re.sub(tag, "", clean_title, flags=re.IGNORECASE)
158
+
159
+ clean_title = clean_title.replace(".", " ").strip()
160
+ clean_title = re.sub(r'\s+', ' ', clean_title) # Remove multiple spaces
161
+ clean_title = clean_title.replace(" ", "+")
162
+
163
+ return clean_title
164
+ except Exception as e:
165
+ debug(f"Error cleaning title '{title}': {e}")
166
+ return title
167
+
168
+
169
+ def get_poster_link(shared_state, imdb_id):
170
+ imdb_metadata = get_imdb_metadata(imdb_id)
171
+ if imdb_metadata:
172
+ poster_link = imdb_metadata.get("poster_link")
173
+ if poster_link:
174
+ return poster_link
175
+
176
+ poster_link = None
177
+ if imdb_id:
178
+ poster_link = IMDbWeb.get_poster(imdb_id, shared_state.values["user_agent"])
179
+
180
+ if not poster_link:
181
+ debug(f"Could not get poster title for {imdb_id} from IMDb")
182
+
183
+ return poster_link
184
+
185
+
186
+ def get_imdb_metadata(imdb_id):
187
+ db = _get_db("imdb_metadata")
188
+ now = datetime.now().timestamp()
189
+
190
+ # Try to load from DB
191
+ cached_metadata = None
192
+ try:
193
+ cached_data = db.retrieve(imdb_id)
194
+ if cached_data:
195
+ cached_metadata = loads(cached_data)
196
+ # If valid, update TTL and return
197
+ if cached_metadata.get("ttl") and cached_metadata["ttl"] > now:
198
+ cached_metadata["ttl"] = now + timedelta(days=30).total_seconds()
199
+ db.update_store(imdb_id, dumps(cached_metadata))
200
+ return cached_metadata
201
+ except Exception as e:
202
+ debug(f"Error retrieving IMDb metadata from DB for {imdb_id}: {e}")
203
+
204
+ # Initialize new metadata structure
205
+ imdb_metadata = {
206
+ "title": None,
207
+ "year": None,
208
+ "poster_link": None,
209
+ "localized": {},
210
+ "ttl": 0
211
+ }
212
+
213
+ # Fetch from API
214
+ response_json = IMDbAPI.get_title(imdb_id)
215
+
216
+ if not response_json:
217
+ # API failed. If we have stale cached data, return it as fallback
218
+ if cached_metadata:
219
+ debug(f"IMDb API failed for {imdb_id}, returning stale cached data.")
220
+ return cached_metadata
221
+ return imdb_metadata
222
+
223
+ # Process API response
224
+ imdb_metadata["title"] = TitleCleaner.sanitize(response_json.get("primaryTitle", ""))
225
+ imdb_metadata["year"] = response_json.get("startYear")
226
+ imdb_metadata["ttl"] = now + timedelta(days=30).total_seconds()
227
+
228
+ try:
229
+ imdb_metadata["poster_link"] = response_json.get("primaryImage").get("url")
230
+ except Exception as e:
231
+ debug(f"Could not find poster link for {imdb_id} from imdbapi.dev: {e}")
232
+ # Shorten TTL if data is incomplete
233
+ imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
234
+
235
+ akas = IMDbAPI.get_akas(imdb_id)
236
+ if akas:
237
+ for aka in akas:
238
+ if aka.get("language"):
239
+ continue # skip entries with specific language tags
240
+ if aka.get("country", {}).get("code", "").lower() == "de":
241
+ imdb_metadata["localized"]["de"] = TitleCleaner.sanitize(aka.get("text"))
242
+ break
243
+ else:
244
+ # Shorten TTL if AKAs failed
245
+ imdb_metadata["ttl"] = now + timedelta(days=1).total_seconds()
246
+
247
+ db.update_store(imdb_id, dumps(imdb_metadata))
248
+ return imdb_metadata
249
+
250
+
251
+ def get_year(imdb_id):
252
+ imdb_metadata = get_imdb_metadata(imdb_id)
253
+ if imdb_metadata:
254
+ return imdb_metadata.get("year")
255
+ return None
256
+
257
+
258
+ def get_localized_title(shared_state, imdb_id, language='de'):
259
+ imdb_metadata = get_imdb_metadata(imdb_id)
260
+ if imdb_metadata:
261
+ localized_title = imdb_metadata.get("localized").get(language)
262
+ if localized_title:
263
+ return localized_title
264
+ return imdb_metadata.get("title")
265
+
266
+ localized_title = IMDbWeb.get_localized_title(imdb_id, language, shared_state.values["user_agent"])
267
+
268
+ if not localized_title:
269
+ debug(f"Could not get localized title for {imdb_id} in {language} from IMDb")
270
+ else:
271
+ localized_title = TitleCleaner.sanitize(localized_title)
272
+ return localized_title
273
+
274
+
275
+ def get_imdb_id_from_title(shared_state, title, language="de"):
276
+ imdb_id = None
277
+
278
+ if re.search(r"S\d{1,3}(E\d{1,3})?", title, re.IGNORECASE):
279
+ ttype_api = "TV_SERIES"
280
+ ttype_web = "tv"
281
+ else:
282
+ ttype_api = "MOVIE"
283
+ ttype_web = "ft"
284
+
285
+ title = TitleCleaner.clean(title)
286
+
287
+ # Check Search Cache (DB)
288
+ db = _get_db("imdb_searches")
289
+ try:
290
+ cached_data = db.retrieve(title)
291
+ if cached_data:
292
+ data = loads(cached_data)
293
+ # Check TTL (48 hours)
294
+ if data.get("timestamp") and datetime.fromtimestamp(data["timestamp"]) > datetime.now() - timedelta(
295
+ hours=48):
296
+ return data.get("imdb_id")
297
+ except Exception as e:
298
+ debug(f"Error retrieving search cache for {title}: {e}")
299
+
300
+ # Try IMDbAPI.dev first
301
+ search_results = IMDbAPI.search_titles(title)
302
+ if search_results:
303
+ for result in search_results:
304
+ found_title = result.get("primaryTitle")
305
+ found_id = result.get("id")
306
+ found_type = result.get("type")
307
+
308
+ # Basic type filtering if possible from result data
309
+ if ttype_api == "TV_SERIES" and found_type not in ["tvSeries", "tvMiniSeries"]:
310
+ continue
311
+ if ttype_api == "MOVIE" and found_type not in ["movie", "tvMovie"]:
312
+ continue
313
+
314
+ if shared_state.search_string_in_sanitized_title(title, found_title):
315
+ imdb_id = found_id
316
+ break
317
+
318
+ # If no exact match found with type filtering, try relaxed matching
319
+ if not imdb_id:
320
+ for result in search_results:
321
+ found_title = result.get("primaryTitle")
322
+ found_id = result.get("id")
323
+ if shared_state.search_string_in_sanitized_title(title, found_title):
324
+ imdb_id = found_id
325
+ break
326
+
327
+ # Fallback to IMDb scraping if API failed or returned no results
328
+ if not imdb_id:
329
+ search_results = IMDbWeb.search_titles(title, ttype_web, language, shared_state.values["user_agent"])
330
+ if search_results:
331
+ for result in search_results:
332
+ try:
333
+ found_title = result["listItem"]["titleText"]
334
+ found_id = result["listItem"]["titleId"]
335
+ except KeyError:
336
+ found_title = result["titleNameText"]
337
+ found_id = result['id']
338
+
339
+ if shared_state.search_string_in_sanitized_title(title, found_title):
340
+ imdb_id = found_id
341
+ break
342
+
343
+ # Update Search Cache
344
+ try:
345
+ db.update_store(title, dumps({
346
+ "imdb_id": imdb_id,
347
+ "timestamp": datetime.now().timestamp()
348
+ }))
349
+ except Exception as e:
350
+ debug(f"Error updating search cache for {title}: {e}")
351
+
352
+ if not imdb_id:
353
+ debug(f"No IMDb-ID found for {title}")
354
+
355
+ return imdb_id
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "2.2.0"
11
+ return "2.3.0"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -5,6 +5,7 @@
5
5
  import time
6
6
  from concurrent.futures import ThreadPoolExecutor, as_completed
7
7
 
8
+ from quasarr.providers.imdb_metadata import get_imdb_metadata
8
9
  from quasarr.providers.log import info, debug
9
10
  from quasarr.search.sources.al import al_feed, al_search
10
11
  from quasarr.search.sources.by import by_feed, by_search
@@ -31,6 +32,10 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
31
32
  if imdb_id and not imdb_id.startswith('tt'):
32
33
  imdb_id = f'tt{imdb_id}'
33
34
 
35
+ # Pre-populate IMDb metadata cache to avoid API hammering by search threads
36
+ if imdb_id:
37
+ get_imdb_metadata(imdb_id)
38
+
34
39
  docs_search = "lazylibrarian" in request_from.lower()
35
40
 
36
41
  al = shared_state.values["config"]("Hostnames").get("al")
@@ -12,7 +12,7 @@ import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
14
  from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
- from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
16
16
  from quasarr.providers.log import info, debug
17
17
 
18
18
  hostname = "he"
@@ -84,6 +84,9 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
84
84
  if not local_title:
85
85
  info(f"{hostname}: no title for IMDb {imdb_id}")
86
86
  return releases
87
+ year = get_year(imdb_id)
88
+ if year:
89
+ local_title += f" {year}"
87
90
  source_search = local_title
88
91
  else:
89
92
  return releases
@@ -173,6 +176,9 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
173
176
  try:
174
177
  r = requests.get(source, headers=headers, timeout=10)
175
178
  soup = BeautifulSoup(r.content, 'html.parser')
179
+ except Exception as e:
180
+ mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
181
+ try:
176
182
  imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
177
183
  if imdb_link:
178
184
  release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
@@ -13,7 +13,7 @@ import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
15
  from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
16
- from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
17
  from quasarr.providers.log import info, debug
18
18
 
19
19
  hostname = "nk"
@@ -75,6 +75,9 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
75
75
  if not local_title:
76
76
  info(f"{hostname}: no title for IMDb {imdb_id}")
77
77
  return releases
78
+ year = get_year(imdb_id)
79
+ if year:
80
+ local_title += f" {year}"
78
81
  source_search = local_title
79
82
  else:
80
83
  return releases
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 2.2.0
3
+ Version: 2.3.0
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -1,142 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Quasarr
3
- # Project by https://github.com/rix1337
4
-
5
- import html
6
- import re
7
- from datetime import datetime, timedelta
8
- from json import loads
9
- from urllib.parse import quote
10
-
11
- import requests
12
- from bs4 import BeautifulSoup
13
-
14
- from quasarr.providers.log import info, debug
15
-
16
-
17
- def get_poster_link(shared_state, imdb_id):
18
- poster_link = None
19
- if imdb_id:
20
- headers = {'User-Agent': shared_state.values["user_agent"]}
21
- request = requests.get(f"https://www.imdb.com/title/{imdb_id}/", headers=headers, timeout=10).text
22
- soup = BeautifulSoup(request, "html.parser")
23
- try:
24
- poster_set = soup.find('div', class_='ipc-poster').div.img[
25
- "srcset"] # contains links to posters in ascending resolution
26
- poster_links = [x for x in poster_set.split(" ") if
27
- len(x) > 10] # extract all poster links ignoring resolution info
28
- poster_link = poster_links[-1] # get the highest resolution poster
29
- except:
30
- pass
31
-
32
- if not poster_link:
33
- debug(f"Could not get poster title for {imdb_id} from IMDb")
34
-
35
- return poster_link
36
-
37
-
38
- def get_localized_title(shared_state, imdb_id, language='de'):
39
- localized_title = None
40
-
41
- headers = {
42
- 'Accept-Language': language,
43
- 'User-Agent': shared_state.values["user_agent"]
44
- }
45
-
46
- try:
47
- response = requests.get(f"https://www.imdb.com/title/{imdb_id}/", headers=headers, timeout=10)
48
- except Exception as e:
49
- info(f"Error loading IMDb metadata for {imdb_id}: {e}")
50
- return localized_title
51
-
52
- try:
53
- match = re.findall(r'<title>(.*?) \(.*?</title>', response.text)
54
- localized_title = match[0]
55
- except:
56
- try:
57
- match = re.findall(r'<title>(.*?) - IMDb</title>', response.text)
58
- localized_title = match[0]
59
- except:
60
- pass
61
-
62
- if not localized_title:
63
- debug(f"Could not get localized title for {imdb_id} in {language} from IMDb")
64
-
65
- localized_title = html.unescape(localized_title)
66
- localized_title = re.sub(r"[^a-zA-Z0-9äöüÄÖÜß&-']", ' ', localized_title).strip()
67
- localized_title = localized_title.replace(" - ", "-")
68
- localized_title = re.sub(r'\s{2,}', ' ', localized_title)
69
-
70
- return localized_title
71
-
72
-
73
- def get_clean_title(title):
74
- try:
75
- extracted_title = re.findall(r"(.*?)(?:.(?!19|20)\d{2}|\.German|.GERMAN|\.\d{3,4}p|\.S(?:\d{1,3}))", title)[0]
76
- leftover_tags_removed = re.sub(
77
- r'(|.UNRATED.*|.Unrated.*|.Uncut.*|.UNCUT.*)(|.Directors.Cut.*|.Final.Cut.*|.DC.*|.REMASTERED.*|.EXTENDED.*|.Extended.*|.Theatrical.*|.THEATRICAL.*)',
78
- "", extracted_title)
79
- clean_title = leftover_tags_removed.replace(".", " ").strip().replace(" ", "+")
80
-
81
- except:
82
- clean_title = title
83
- return clean_title
84
-
85
-
86
- def get_imdb_id_from_title(shared_state, title, language="de"):
87
- imdb_id = None
88
-
89
- if re.search(r"S\d{1,3}(E\d{1,3})?", title, re.IGNORECASE):
90
- ttype = "tv"
91
- else:
92
- ttype = "ft"
93
-
94
- title = get_clean_title(title)
95
-
96
- threshold = 60 * 60 * 48 # 48 hours
97
- context = "recents_imdb"
98
- recently_searched = shared_state.get_recently_searched(shared_state, context, threshold)
99
- if title in recently_searched:
100
- title_item = recently_searched[title]
101
- if title_item["timestamp"] > datetime.now() - timedelta(seconds=threshold):
102
- return title_item["imdb_id"]
103
-
104
- headers = {
105
- 'Accept-Language': language,
106
- 'User-Agent': shared_state.values["user_agent"]
107
- }
108
-
109
- results = requests.get(f"https://www.imdb.com/find/?q={quote(title)}&s=tt&ttype={ttype}&ref_=fn_{ttype}",
110
- headers=headers, timeout=10)
111
-
112
- if results.status_code == 200:
113
- soup = BeautifulSoup(results.text, "html.parser")
114
- props = soup.find("script", text=re.compile("props"))
115
- details = loads(props.string)
116
- search_results = details['props']['pageProps']['titleResults']['results']
117
-
118
- if len(search_results) > 0:
119
- for result in search_results:
120
- try:
121
- found_title = result["listItem"]["titleText"]
122
- found_id = result["listItem"]["titleId"]
123
- except KeyError:
124
- found_title = result["titleNameText"]
125
- found_id = result['id']
126
-
127
- if shared_state.search_string_in_sanitized_title(title, found_title):
128
- imdb_id = found_id
129
- break
130
- else:
131
- debug(f"Request on IMDb failed: {results.status_code}")
132
-
133
- recently_searched[title] = {
134
- "imdb_id": imdb_id,
135
- "timestamp": datetime.now()
136
- }
137
- shared_state.update(context, recently_searched)
138
-
139
- if not imdb_id:
140
- debug(f"No IMDb-ID found for {title}")
141
-
142
- return imdb_id
File without changes
File without changes
File without changes
File without changes
File without changes