quasarr 1.30.0__py3-none-any.whl → 1.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,8 +2,18 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
+ from quasarr.providers.log import debug
5
6
  from quasarr.providers.myjd_api import TokenExpiredException, RequestTimeoutException, MYJDException
6
7
 
8
+ # Known archive extensions for fallback detection
9
+ ARCHIVE_EXTENSIONS = frozenset([
10
+ '.rar', '.zip', '.7z', '.tar', '.gz', '.bz2', '.xz',
11
+ '.001', '.002', '.003', '.004', '.005', '.006', '.007', '.008', '.009',
12
+ '.r00', '.r01', '.r02', '.r03', '.r04', '.r05', '.r06', '.r07', '.r08', '.r09',
13
+ '.part1.rar', '.part01.rar', '.part001.rar',
14
+ '.part2.rar', '.part02.rar', '.part002.rar',
15
+ ])
16
+
7
17
 
8
18
  class JDPackageCache:
9
19
  """
@@ -16,116 +26,264 @@ class JDPackageCache:
16
26
 
17
27
  This reduces redundant API calls within a single operation where the same
18
28
  data (e.g., linkgrabber_links) is needed multiple times.
19
-
20
- Usage:
21
- # Cache is created and discarded within a single function call
22
- cache = JDPackageCache(device)
23
- packages = cache.linkgrabber_packages # Fetches from API
24
- packages = cache.linkgrabber_packages # Returns cached (same request)
25
- # Cache goes out of scope and is garbage collected
26
29
  """
27
30
 
28
31
  def __init__(self, device):
32
+ debug("JDPackageCache: Initializing new cache instance")
29
33
  self._device = device
30
34
  self._linkgrabber_packages = None
31
35
  self._linkgrabber_links = None
32
36
  self._downloader_packages = None
33
37
  self._downloader_links = None
34
- self._archive_package_uuids = None # Set of package UUIDs containing archives
38
+ self._archive_cache = {} # package_uuid -> bool (is_archive)
35
39
  self._is_collecting = None
40
+ # Stats tracking
41
+ self._api_calls = 0
42
+ self._cache_hits = 0
43
+
44
+ def get_stats(self):
45
+ """Return cache statistics string."""
46
+ pkg_count = len(self._downloader_packages or []) + len(self._linkgrabber_packages or [])
47
+ link_count = len(self._downloader_links or []) + len(self._linkgrabber_links or [])
48
+ return f"{self._api_calls} API calls | {pkg_count} packages, {link_count} links cached"
36
49
 
37
50
  @property
38
51
  def linkgrabber_packages(self):
39
52
  if self._linkgrabber_packages is None:
53
+ debug("JDPackageCache: Fetching linkgrabber_packages from API")
54
+ self._api_calls += 1
40
55
  try:
41
56
  self._linkgrabber_packages = self._device.linkgrabber.query_packages()
42
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
57
+ debug(f"JDPackageCache: Retrieved {len(self._linkgrabber_packages)} linkgrabber packages")
58
+ except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
59
+ debug(f"JDPackageCache: Failed to fetch linkgrabber_packages: {e}")
43
60
  self._linkgrabber_packages = []
61
+ else:
62
+ self._cache_hits += 1
63
+ debug(f"JDPackageCache: Using cached linkgrabber_packages ({len(self._linkgrabber_packages)} packages)")
44
64
  return self._linkgrabber_packages
45
65
 
46
66
  @property
47
67
  def linkgrabber_links(self):
48
68
  if self._linkgrabber_links is None:
69
+ debug("JDPackageCache: Fetching linkgrabber_links from API")
70
+ self._api_calls += 1
49
71
  try:
50
72
  self._linkgrabber_links = self._device.linkgrabber.query_links()
51
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
73
+ debug(f"JDPackageCache: Retrieved {len(self._linkgrabber_links)} linkgrabber links")
74
+ except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
75
+ debug(f"JDPackageCache: Failed to fetch linkgrabber_links: {e}")
52
76
  self._linkgrabber_links = []
77
+ else:
78
+ self._cache_hits += 1
79
+ debug(f"JDPackageCache: Using cached linkgrabber_links ({len(self._linkgrabber_links)} links)")
53
80
  return self._linkgrabber_links
54
81
 
55
82
  @property
56
83
  def downloader_packages(self):
57
84
  if self._downloader_packages is None:
85
+ debug("JDPackageCache: Fetching downloader_packages from API")
86
+ self._api_calls += 1
58
87
  try:
59
88
  self._downloader_packages = self._device.downloads.query_packages()
60
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
89
+ debug(f"JDPackageCache: Retrieved {len(self._downloader_packages)} downloader packages")
90
+ except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
91
+ debug(f"JDPackageCache: Failed to fetch downloader_packages: {e}")
61
92
  self._downloader_packages = []
93
+ else:
94
+ self._cache_hits += 1
95
+ debug(f"JDPackageCache: Using cached downloader_packages ({len(self._downloader_packages)} packages)")
62
96
  return self._downloader_packages
63
97
 
64
98
  @property
65
99
  def downloader_links(self):
66
100
  if self._downloader_links is None:
101
+ debug("JDPackageCache: Fetching downloader_links from API")
102
+ self._api_calls += 1
67
103
  try:
68
104
  self._downloader_links = self._device.downloads.query_links()
69
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
105
+ debug(f"JDPackageCache: Retrieved {len(self._downloader_links)} downloader links")
106
+ except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
107
+ debug(f"JDPackageCache: Failed to fetch downloader_links: {e}")
70
108
  self._downloader_links = []
109
+ else:
110
+ self._cache_hits += 1
111
+ debug(f"JDPackageCache: Using cached downloader_links ({len(self._downloader_links)} links)")
71
112
  return self._downloader_links
72
113
 
73
114
  @property
74
115
  def is_collecting(self):
75
116
  if self._is_collecting is None:
117
+ debug("JDPackageCache: Checking is_collecting from API")
118
+ self._api_calls += 1
76
119
  try:
77
120
  self._is_collecting = self._device.linkgrabber.is_collecting()
78
- except (TokenExpiredException, RequestTimeoutException, MYJDException):
121
+ debug(f"JDPackageCache: is_collecting = {self._is_collecting}")
122
+ except (TokenExpiredException, RequestTimeoutException, MYJDException) as e:
123
+ debug(f"JDPackageCache: Failed to check is_collecting: {e}")
79
124
  self._is_collecting = False
125
+ else:
126
+ self._cache_hits += 1
127
+ debug(f"JDPackageCache: Using cached is_collecting = {self._is_collecting}")
80
128
  return self._is_collecting
81
129
 
82
- def get_archive_package_uuids(self, downloader_packages, downloader_links):
130
+ def _has_archive_extension(self, package_uuid, links):
131
+ """Check if any link in the package has an archive file extension."""
132
+ for link in links:
133
+ if link.get("packageUUID") != package_uuid:
134
+ continue
135
+ name = link.get("name", "")
136
+ name_lower = name.lower()
137
+ for ext in ARCHIVE_EXTENSIONS:
138
+ if name_lower.endswith(ext):
139
+ debug(
140
+ f"JDPackageCache: Found archive extension '{ext}' in file '{name}' for package {package_uuid}")
141
+ return True
142
+ return False
143
+
144
+ def _bulk_detect_archives(self, package_uuids):
83
145
  """
84
- Get set of package UUIDs that contain at least one archive file.
146
+ Detect archives for multiple packages in ONE API call.
147
+
148
+ Returns:
149
+ tuple: (confirmed_archives: set, api_succeeded: bool)
150
+ - confirmed_archives: Package UUIDs confirmed as archives
151
+ - api_succeeded: Whether the API call worked (for fallback decisions)
152
+ """
153
+ confirmed_archives = set()
154
+
155
+ if not package_uuids:
156
+ debug("JDPackageCache: _bulk_detect_archives called with empty package_uuids")
157
+ return confirmed_archives, True
85
158
 
86
- Two-phase detection:
87
- 1. Check extractionStatus in link data (free - catches in-progress/completed extractions)
88
- 2. Single API call for all remaining packages (catches pre-extraction archives)
159
+ package_list = list(package_uuids)
160
+ debug(f"JDPackageCache: Bulk archive detection for {len(package_list)} packages")
89
161
 
90
- This correctly handles:
91
- - Mixed packages (archive + non-archive files)
92
- - Archives before extraction starts
93
- - Archives during/after extraction
162
+ try:
163
+ self._api_calls += 1
164
+ archive_infos = self._device.extraction.get_archive_info([], package_list)
165
+ debug(f"JDPackageCache: get_archive_info returned {len(archive_infos) if archive_infos else 0} results")
166
+
167
+ if archive_infos:
168
+ for i, archive_info in enumerate(archive_infos):
169
+ if archive_info:
170
+ debug(f"JDPackageCache: archive_info[{i}] = {archive_info}")
171
+ # Try to get packageUUID from response
172
+ pkg_uuid = archive_info.get("packageUUID")
173
+ if pkg_uuid:
174
+ debug(f"JDPackageCache: Confirmed archive via packageUUID: {pkg_uuid}")
175
+ confirmed_archives.add(pkg_uuid)
176
+ else:
177
+ # Log what fields ARE available for debugging
178
+ debug(
179
+ f"JDPackageCache: archive_info has no packageUUID, available keys: {list(archive_info.keys())}")
180
+ else:
181
+ debug(f"JDPackageCache: archive_info[{i}] is empty/None")
182
+
183
+ debug(f"JDPackageCache: Bulk detection confirmed {len(confirmed_archives)} archives: {confirmed_archives}")
184
+ return confirmed_archives, True
185
+
186
+ except Exception as e:
187
+ debug(f"JDPackageCache: Bulk archive detection API FAILED: {type(e).__name__}: {e}")
188
+ return confirmed_archives, False
189
+
190
+ def detect_all_archives(self, packages, links):
94
191
  """
95
- if self._archive_package_uuids is not None:
96
- return self._archive_package_uuids
192
+ Detect archives for all packages efficiently.
97
193
 
98
- self._archive_package_uuids = set()
194
+ Uses ONE bulk API call, then applies safety fallbacks for packages
195
+ where detection was uncertain.
99
196
 
100
- if not downloader_packages:
101
- return self._archive_package_uuids
197
+ Args:
198
+ packages: List of downloader packages
199
+ links: List of downloader links (for extension fallback)
102
200
 
103
- all_package_uuids = {p.get("uuid") for p in downloader_packages if p.get("uuid")}
201
+ Returns:
202
+ Set of package UUIDs that should be treated as archives
203
+ """
204
+ if not packages:
205
+ debug("JDPackageCache: detect_all_archives called with no packages")
206
+ return set()
104
207
 
105
- # Phase 1: Check extractionStatus in already-fetched link data (free - no API call)
106
- # This catches packages where extraction is in progress or completed
107
- for link in downloader_links:
108
- extraction_status = link.get("extractionStatus")
109
- if extraction_status: # Any non-empty extraction status means it's an archive
110
- pkg_uuid = link.get("packageUUID")
111
- if pkg_uuid:
112
- self._archive_package_uuids.add(pkg_uuid)
208
+ all_package_uuids = {p.get("uuid") for p in packages if p.get("uuid")}
209
+ debug(f"JDPackageCache: detect_all_archives for {len(all_package_uuids)} packages")
113
210
 
114
- # Phase 2: Single API call for all unchecked packages
115
- unchecked_package_uuids = list(all_package_uuids - self._archive_package_uuids)
211
+ # ONE bulk API call for all packages
212
+ confirmed_archives, api_succeeded = self._bulk_detect_archives(all_package_uuids)
213
+ debug(f"JDPackageCache: Bulk API succeeded={api_succeeded}, confirmed={len(confirmed_archives)} archives")
116
214
 
117
- if unchecked_package_uuids:
118
- try:
119
- # One API call for ALL unchecked packages
120
- archive_infos = self._device.extraction.get_archive_info([], unchecked_package_uuids)
121
- if archive_infos:
122
- for archive_info in archive_infos:
123
- if archive_info:
124
- # Extract package UUID from response
125
- pkg_uuid = archive_info.get("packageUUID")
126
- if pkg_uuid:
127
- self._archive_package_uuids.add(pkg_uuid)
128
- except:
129
- pass
130
-
131
- return self._archive_package_uuids
215
+ # For packages NOT confirmed as archives, apply safety fallbacks
216
+ unconfirmed = all_package_uuids - confirmed_archives
217
+ debug(f"JDPackageCache: {len(unconfirmed)} packages need fallback checking")
218
+
219
+ for pkg_uuid in unconfirmed:
220
+ # Fallback 1: Check file extensions
221
+ if self._has_archive_extension(pkg_uuid, links):
222
+ debug(f"JDPackageCache: Package {pkg_uuid} confirmed as archive via extension fallback")
223
+ confirmed_archives.add(pkg_uuid)
224
+ # Fallback 2: If bulk API failed completely, assume archive (safe)
225
+ elif not api_succeeded:
226
+ debug(f"JDPackageCache: SAFETY - Bulk API failed, assuming package {pkg_uuid} is archive")
227
+ confirmed_archives.add(pkg_uuid)
228
+ else:
229
+ debug(f"JDPackageCache: Package {pkg_uuid} confirmed as NON-archive (API worked, no extension match)")
230
+
231
+ # Cache results for is_package_archive() lookups
232
+ for pkg_uuid in all_package_uuids:
233
+ self._archive_cache[pkg_uuid] = pkg_uuid in confirmed_archives
234
+
235
+ debug(
236
+ f"JDPackageCache: Final archive detection: {len(confirmed_archives)}/{len(all_package_uuids)} packages are archives")
237
+ return confirmed_archives
238
+
239
+ def is_package_archive(self, package_uuid, links=None):
240
+ """
241
+ Check if a package contains archive files.
242
+
243
+ Prefer calling detect_all_archives() first for efficiency.
244
+ This method is for single lookups or cache hits.
245
+
246
+ SAFETY: On API error, defaults to True (assume archive) to prevent
247
+ premature "finished" status.
248
+ """
249
+ if package_uuid is None:
250
+ debug("JDPackageCache: is_package_archive called with None UUID")
251
+ return False
252
+
253
+ if package_uuid in self._archive_cache:
254
+ self._cache_hits += 1
255
+ cached = self._archive_cache[package_uuid]
256
+ debug(f"JDPackageCache: is_package_archive({package_uuid}) = {cached} (cached)")
257
+ return cached
258
+
259
+ debug(f"JDPackageCache: is_package_archive({package_uuid}) - cache miss, querying API")
260
+
261
+ # Single package lookup (fallback if detect_all_archives wasn't called)
262
+ is_archive = None
263
+ api_failed = False
264
+
265
+ try:
266
+ self._api_calls += 1
267
+ archive_info = self._device.extraction.get_archive_info([], [package_uuid])
268
+ debug(f"JDPackageCache: Single get_archive_info returned: {archive_info}")
269
+ # Original logic: is_archive = True if archive_info and archive_info[0] else False
270
+ is_archive = True if archive_info and archive_info[0] else False
271
+ debug(f"JDPackageCache: API says is_archive = {is_archive}")
272
+ except Exception as e:
273
+ api_failed = True
274
+ debug(f"JDPackageCache: Single archive detection API FAILED for {package_uuid}: {type(e).__name__}: {e}")
275
+
276
+ # Fallback: check file extensions if API failed or returned False
277
+ if (api_failed or not is_archive) and links:
278
+ if self._has_archive_extension(package_uuid, links):
279
+ debug(f"JDPackageCache: Package {package_uuid} confirmed as archive via extension fallback")
280
+ is_archive = True
281
+
282
+ # SAFETY: If API failed and no extension detected, assume archive (conservative)
283
+ if is_archive is None:
284
+ debug(f"JDPackageCache: SAFETY - Detection uncertain for {package_uuid}, assuming archive")
285
+ is_archive = True
286
+
287
+ self._archive_cache[package_uuid] = is_archive
288
+ debug(f"JDPackageCache: is_package_archive({package_uuid}) = {is_archive} (final)")
289
+ return is_archive
@@ -6,9 +6,12 @@ import os
6
6
  import re
7
7
  import socket
8
8
  import sys
9
+ from concurrent.futures import ThreadPoolExecutor, as_completed
10
+ from io import BytesIO
9
11
  from urllib.parse import urlparse
10
12
 
11
13
  import requests
14
+ from PIL import Image
12
15
 
13
16
  # Fallback user agent when FlareSolverr is not available
14
17
  FALLBACK_USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36"
@@ -187,3 +190,177 @@ def is_site_usable(shared_state, shorthand):
187
190
  password = config.get('password')
188
191
 
189
192
  return bool(user and password)
193
+
194
+
195
+ # =============================================================================
196
+ # LINK STATUS CHECKING
197
+ # =============================================================================
198
+
199
+ def generate_status_url(href, crypter_type):
200
+ """
201
+ Generate a status URL for crypters that support it.
202
+ Returns None if status URL cannot be generated.
203
+ """
204
+ if crypter_type == "hide":
205
+ # hide.cx links: https://hide.cx/folder/{UUID} or /container/{UUID} → https://hide.cx/state/{UUID}
206
+ match = re.search(r'hide\.cx/(?:folder/|container/)?([a-f0-9-]{36})', href, re.IGNORECASE)
207
+ if match:
208
+ uuid = match.group(1)
209
+ return f"https://hide.cx/state/{uuid}"
210
+
211
+ elif crypter_type == "tolink":
212
+ # tolink links: https://tolink.to/f/{ID} → https://tolink.to/f/{ID}/s/status.png
213
+ match = re.search(r'tolink\.to/f/([a-zA-Z0-9]+)', href, re.IGNORECASE)
214
+ if match:
215
+ link_id = match.group(1)
216
+ return f"https://tolink.to/f/{link_id}/s/status.png"
217
+
218
+ return None
219
+
220
+
221
+ def detect_crypter_type(url):
222
+ """Detect crypter type from URL for status checking."""
223
+ url_lower = url.lower()
224
+ if 'hide.' in url_lower:
225
+ return "hide"
226
+ elif 'tolink.' in url_lower:
227
+ return "tolink"
228
+ elif 'filecrypt.' in url_lower:
229
+ return "filecrypt"
230
+ elif 'keeplinks.' in url_lower:
231
+ return "keeplinks"
232
+ return None
233
+
234
+
235
+ def image_has_green(image_data):
236
+ """
237
+ Analyze image data to check if it contains green pixels.
238
+ Returns True if any significant green is detected (indicating online status).
239
+ """
240
+ try:
241
+ img = Image.open(BytesIO(image_data))
242
+ # Convert palette images with transparency to RGBA first to avoid warning
243
+ if img.mode == 'P' and 'transparency' in img.info:
244
+ img = img.convert('RGBA')
245
+ img = img.convert('RGB')
246
+
247
+ pixels = list(img.getdata())
248
+
249
+ for r, g, b in pixels:
250
+ # Check if pixel is greenish: green channel is dominant
251
+ # and has a reasonable absolute value
252
+ if g > 100 and g > r * 1.3 and g > b * 1.3:
253
+ return True
254
+
255
+ return False
256
+ except Exception:
257
+ # If we can't analyze, assume online to not skip valid links
258
+ return True
259
+
260
+
261
+ def fetch_status_image(status_url, shared_state=None):
262
+ """
263
+ Fetch a status image and return (status_url, image_data).
264
+ Returns (status_url, None) on failure.
265
+ """
266
+ try:
267
+ headers = {}
268
+ if shared_state:
269
+ user_agent = shared_state.values.get("user_agent")
270
+ if user_agent:
271
+ headers["User-Agent"] = user_agent
272
+ response = requests.get(status_url, headers=headers, timeout=10)
273
+ if response.status_code == 200:
274
+ return (status_url, response.content)
275
+ except Exception:
276
+ pass
277
+ return (status_url, None)
278
+
279
+
280
+ def check_links_online_status(links_with_status, shared_state=None):
281
+ """
282
+ Check online status for links that have status URLs.
283
+ Returns list of links that are online (or have no status URL to check).
284
+
285
+ links_with_status: list of [href, identifier, status_url] where status_url can be None
286
+ shared_state: optional shared state for user agent
287
+ """
288
+ links_to_check = [(i, link) for i, link in enumerate(links_with_status) if link[2]]
289
+
290
+ if not links_to_check:
291
+ # No status URLs to check, return all links as potentially online
292
+ return [[link[0], link[1]] for link in links_with_status]
293
+
294
+ # Batch fetch status images
295
+ status_results = {} # status_url -> has_green
296
+ status_urls = list(set(link[2] for _, link in links_to_check))
297
+
298
+ batch_size = 10
299
+ for i in range(0, len(status_urls), batch_size):
300
+ batch = status_urls[i:i + batch_size]
301
+ with ThreadPoolExecutor(max_workers=batch_size) as executor:
302
+ futures = [executor.submit(fetch_status_image, url, shared_state) for url in batch]
303
+ for future in as_completed(futures):
304
+ try:
305
+ status_url, image_data = future.result()
306
+ if image_data:
307
+ status_results[status_url] = image_has_green(image_data)
308
+ else:
309
+ # Could not fetch, assume online
310
+ status_results[status_url] = True
311
+ except Exception:
312
+ pass
313
+
314
+ # Filter to online links
315
+ online_links = []
316
+
317
+ for link in links_with_status:
318
+ href, identifier, status_url = link
319
+ if not status_url:
320
+ # No status URL, include link
321
+ online_links.append([href, identifier])
322
+ elif status_url in status_results:
323
+ if status_results[status_url]:
324
+ online_links.append([href, identifier])
325
+ else:
326
+ # Status check failed, include link
327
+ online_links.append([href, identifier])
328
+
329
+ return online_links
330
+
331
+
332
+ def filter_offline_links(links, shared_state=None, log_func=None):
333
+ """
334
+ Filter out offline links from a list of [url, identifier] pairs.
335
+ Only checks links where status can be verified (hide.cx, tolink).
336
+ Returns filtered list of [url, identifier] pairs.
337
+ """
338
+ if not links:
339
+ return links
340
+
341
+ # Build list with status URLs
342
+ links_with_status = []
343
+ for link in links:
344
+ url = link[0]
345
+ identifier = link[1] if len(link) > 1 else "unknown"
346
+ crypter_type = detect_crypter_type(url)
347
+ status_url = generate_status_url(url, crypter_type) if crypter_type else None
348
+ links_with_status.append([url, identifier, status_url])
349
+
350
+ # Check if any links can be verified
351
+ verifiable_count = sum(1 for l in links_with_status if l[2])
352
+ if verifiable_count == 0:
353
+ # Nothing to verify, return original links
354
+ return links
355
+
356
+ if log_func:
357
+ log_func(f"Checking online status for {verifiable_count} verifiable link(s)...")
358
+
359
+ # Check status and filter
360
+ online_links = check_links_online_status(links_with_status, shared_state)
361
+
362
+ if log_func and len(online_links) < len(links):
363
+ offline_count = len(links) - len(online_links)
364
+ log_func(f"Filtered out {offline_count} offline link(s)")
365
+
366
+ return online_links
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "1.30.0"
11
+ return "1.32.0"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -131,6 +131,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
131
131
  def wx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
132
132
  """
133
133
  Search using internal API.
134
+ Deduplicates results by fulltitle - each unique release appears only once.
134
135
  """
135
136
  releases = []
136
137
  host = shared_state.values["config"]("Hostnames").get(hostname)
@@ -201,6 +202,9 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
201
202
 
202
203
  debug(f"{hostname.upper()}: Found {len(items)} items in search results")
203
204
 
205
+ # Track seen titles to deduplicate (mirrors have same fulltitle)
206
+ seen_titles = set()
207
+
204
208
  for item in items:
205
209
  try:
206
210
  uid = item.get('uid')
@@ -238,29 +242,34 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
238
242
  title = title.replace(' ', '.')
239
243
 
240
244
  if shared_state.is_valid_release(title, request_from, search_string, season, episode):
241
- published = detail_item.get('updated_at') or detail_item.get('created_at')
242
- if not published:
243
- published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
244
- password = f"www.{host}"
245
-
246
- payload = urlsafe_b64encode(
247
- f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}".encode("utf-8")
248
- ).decode("utf-8")
249
- link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
250
-
251
- releases.append({
252
- "details": {
253
- "title": title,
254
- "hostname": hostname,
255
- "imdb_id": item_imdb_id,
256
- "link": link,
257
- "mirror": mirror,
258
- "size": 0,
259
- "date": published,
260
- "source": source
261
- },
262
- "type": "protected"
263
- })
245
+ # Skip if we've already seen this exact title
246
+ if title in seen_titles:
247
+ debug(f"{hostname.upper()}: Skipping duplicate main title: {title}")
248
+ else:
249
+ seen_titles.add(title)
250
+ published = detail_item.get('updated_at') or detail_item.get('created_at')
251
+ if not published:
252
+ published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
253
+ password = f"www.{host}"
254
+
255
+ payload = urlsafe_b64encode(
256
+ f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}".encode("utf-8")
257
+ ).decode("utf-8")
258
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
259
+
260
+ releases.append({
261
+ "details": {
262
+ "title": title,
263
+ "hostname": hostname,
264
+ "imdb_id": item_imdb_id,
265
+ "link": link,
266
+ "mirror": mirror,
267
+ "size": 0,
268
+ "date": published,
269
+ "source": source
270
+ },
271
+ "type": "protected"
272
+ })
264
273
 
265
274
  if 'releases' in detail_item and isinstance(detail_item['releases'], list):
266
275
  debug(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
@@ -279,6 +288,13 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
279
288
  debug(f"{hostname.upper()}: ✗ Release filtered out: {release_title}")
280
289
  continue
281
290
 
291
+ # Skip if we've already seen this exact title (deduplication)
292
+ if release_title in seen_titles:
293
+ debug(f"{hostname.upper()}: Skipping duplicate release: {release_title}")
294
+ continue
295
+
296
+ seen_titles.add(release_title)
297
+
282
298
  release_uid = release.get('uid')
283
299
  if release_uid:
284
300
  release_source = f"https://{host}/detail/{uid}?release={release_uid}"
@@ -323,7 +339,7 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
323
339
  debug(f"{hostname.upper()}: {traceback.format_exc()}")
324
340
  continue
325
341
 
326
- debug(f"{hostname.upper()}: Returning {len(releases)} total releases")
342
+ debug(f"{hostname.upper()}: Returning {len(releases)} total releases (deduplicated)")
327
343
 
328
344
  except Exception as e:
329
345
  info(f"Error in {hostname.upper()} search: {e}")
quasarr/storage/setup.py CHANGED
@@ -239,7 +239,9 @@ def hostname_form_html(shared_state, message, show_restart_button=False, show_sk
239
239
  .import-status {{
240
240
  margin-top: 0.5rem;
241
241
  font-size: 0.875rem;
242
- min-height: 1.25rem;
242
+ }}
243
+ .import-status:empty {{
244
+ display: none;
243
245
  }}
244
246
  .import-status.success {{ color: #198754; }}
245
247
  .import-status.error {{ color: #dc3545; }}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.30.0
3
+ Version: 1.32.0
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337