quasarr 1.26.3__tar.gz → 1.26.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (84) hide show
  1. {quasarr-1.26.3 → quasarr-1.26.5}/PKG-INFO +2 -2
  2. quasarr-1.26.5/quasarr/downloads/sources/dl.py +478 -0
  3. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/version.py +1 -1
  4. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/he.py +6 -0
  5. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/nk.py +6 -0
  6. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/wx.py +11 -11
  7. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/PKG-INFO +2 -2
  8. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/requires.txt +1 -1
  9. quasarr-1.26.3/quasarr/downloads/sources/dl.py +0 -191
  10. {quasarr-1.26.3 → quasarr-1.26.5}/LICENSE +0 -0
  11. {quasarr-1.26.3 → quasarr-1.26.5}/README.md +0 -0
  12. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/__init__.py +0 -0
  13. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/__init__.py +0 -0
  14. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/arr/__init__.py +0 -0
  15. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/captcha/__init__.py +0 -0
  16. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/config/__init__.py +0 -0
  17. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/sponsors_helper/__init__.py +0 -0
  18. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/api/statistics/__init__.py +0 -0
  19. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/__init__.py +0 -0
  20. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/linkcrypters/__init__.py +0 -0
  21. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/linkcrypters/al.py +0 -0
  22. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/linkcrypters/filecrypt.py +0 -0
  23. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/linkcrypters/hide.py +0 -0
  24. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/packages/__init__.py +0 -0
  25. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/__init__.py +0 -0
  26. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/al.py +0 -0
  27. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/by.py +0 -0
  28. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/dd.py +0 -0
  29. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/dj.py +0 -0
  30. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/dt.py +0 -0
  31. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/dw.py +0 -0
  32. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/he.py +0 -0
  33. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/mb.py +0 -0
  34. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/nk.py +0 -0
  35. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/nx.py +0 -0
  36. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/sf.py +0 -0
  37. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/sj.py +0 -0
  38. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/sl.py +0 -0
  39. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/wd.py +0 -0
  40. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/downloads/sources/wx.py +0 -0
  41. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/__init__.py +0 -0
  42. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/cloudflare.py +0 -0
  43. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/html_images.py +0 -0
  44. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/html_templates.py +0 -0
  45. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/imdb_metadata.py +0 -0
  46. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/log.py +0 -0
  47. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/myjd_api.py +0 -0
  48. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/notifications.py +0 -0
  49. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/obfuscated.py +0 -0
  50. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/sessions/__init__.py +0 -0
  51. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/sessions/al.py +0 -0
  52. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/sessions/dd.py +0 -0
  53. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/sessions/dl.py +0 -0
  54. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/sessions/nx.py +0 -0
  55. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/shared_state.py +0 -0
  56. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/statistics.py +0 -0
  57. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/providers/web_server.py +0 -0
  58. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/__init__.py +0 -0
  59. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/__init__.py +0 -0
  60. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/al.py +0 -0
  61. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/by.py +0 -0
  62. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/dd.py +0 -0
  63. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/dj.py +0 -0
  64. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/dl.py +0 -0
  65. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/dt.py +0 -0
  66. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/dw.py +0 -0
  67. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/fx.py +0 -0
  68. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/mb.py +0 -0
  69. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/nx.py +0 -0
  70. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/sf.py +0 -0
  71. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/sj.py +0 -0
  72. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/sl.py +0 -0
  73. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/search/sources/wd.py +0 -0
  74. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/storage/__init__.py +0 -0
  75. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/storage/config.py +0 -0
  76. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/storage/setup.py +0 -0
  77. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr/storage/sqlite_database.py +0 -0
  78. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/SOURCES.txt +0 -0
  79. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/dependency_links.txt +0 -0
  80. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/entry_points.txt +0 -0
  81. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/not-zip-safe +0 -0
  82. {quasarr-1.26.3 → quasarr-1.26.5}/quasarr.egg-info/top_level.txt +0 -0
  83. {quasarr-1.26.3 → quasarr-1.26.5}/setup.cfg +0 -0
  84. {quasarr-1.26.3 → quasarr-1.26.5}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.26.3
3
+ Version: 1.26.5
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -10,7 +10,7 @@ Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE
13
- Requires-Dist: beautifulsoup4>=4.14.2
13
+ Requires-Dist: beautifulsoup4>=4.14.3
14
14
  Requires-Dist: bottle>=0.13.4
15
15
  Requires-Dist: dukpy>=0.5.0
16
16
  Requires-Dist: pillow>=12.0.0
@@ -0,0 +1,478 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ from concurrent.futures import ThreadPoolExecutor, as_completed
7
+ from io import BytesIO
8
+
9
+ from PIL import Image
10
+ from bs4 import BeautifulSoup, NavigableString
11
+
12
+ from quasarr.providers.log import info, debug
13
+ from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
14
+
15
+ hostname = "dl"
16
+
17
+ # Common TLDs to strip for mirror name comparison
18
+ COMMON_TLDS = {'.com', '.net', '.io', '.cc', '.to', '.me', '.org', '.co', '.de', '.eu', '.info'}
19
+
20
+
21
+ def normalize_mirror_name(name):
22
+ """
23
+ Normalize mirror name for comparison by lowercasing and removing TLDs.
24
+ e.g., "DDownload.com" -> "ddownload", "Rapidgator.net" -> "rapidgator"
25
+ """
26
+ if not name:
27
+ return ""
28
+ normalized = name.lower().strip()
29
+ for tld in COMMON_TLDS:
30
+ if normalized.endswith(tld):
31
+ normalized = normalized[:-len(tld)]
32
+ break
33
+ return normalized
34
+
35
+
36
+ def extract_password_from_post(soup, host):
37
+ """
38
+ Extract password from forum post using multiple strategies.
39
+ Returns empty string if no password found or if explicitly marked as 'no password'.
40
+ """
41
+ post_text = soup.get_text()
42
+ post_text = re.sub(r'\s+', ' ', post_text).strip()
43
+
44
+ password_pattern = r'(?:passwort|password|pass|pw)[\s:]+([a-zA-Z0-9._-]{2,50})'
45
+ match = re.search(password_pattern, post_text, re.IGNORECASE)
46
+
47
+ if match:
48
+ password = match.group(1).strip()
49
+ if not re.match(r'^(?:download|mirror|link|episode|info|mediainfo|spoiler|hier|click|klick|kein|none|no)',
50
+ password, re.IGNORECASE):
51
+ debug(f"Found password: {password}")
52
+ return password
53
+
54
+ no_password_patterns = [
55
+ r'(?:passwort|password|pass|pw)[\s:]*(?:kein(?:es)?|none|no|nicht|not|nein|-|–|—)',
56
+ r'(?:kein(?:es)?|none|no|nicht|not|nein)\s*(?:passwort|password|pass|pw)',
57
+ ]
58
+
59
+ for pattern in no_password_patterns:
60
+ if re.search(pattern, post_text, re.IGNORECASE):
61
+ debug("No password required (explicitly stated)")
62
+ return ""
63
+
64
+ default_password = f"www.{host}"
65
+ debug(f"No password found, using default: {default_password}")
66
+ return default_password
67
+
68
+
69
+ def extract_mirror_name_from_link(link_element):
70
+ """
71
+ Extract the mirror/hoster name from the link text or nearby text.
72
+ """
73
+ link_text = link_element.get_text(strip=True)
74
+ common_non_hosters = {'download', 'mirror', 'link', 'hier', 'click', 'klick', 'code', 'spoiler'}
75
+
76
+ # Known hoster patterns for image detection
77
+ known_hosters = {
78
+ 'rapidgator': ['rapidgator', 'rg'],
79
+ 'ddownload': ['ddownload', 'ddl'],
80
+ 'turbobit': ['turbobit'],
81
+ '1fichier': ['1fichier'],
82
+ }
83
+
84
+ # Skip if link text is a URL
85
+ if link_text and len(link_text) > 2 and not link_text.startswith('http'):
86
+ cleaned = re.sub(r'[^\w\s-]', '', link_text).strip().lower()
87
+ if cleaned and cleaned not in common_non_hosters:
88
+ main_part = cleaned.split()[0] if ' ' in cleaned else cleaned
89
+ if 2 < len(main_part) < 30:
90
+ return main_part
91
+
92
+ # Check previous siblings including text nodes
93
+ for sibling in link_element.previous_siblings:
94
+ # Handle text nodes (NavigableString)
95
+ if isinstance(sibling, NavigableString):
96
+ text = sibling.strip()
97
+ if text:
98
+ # Remove common separators like @ : -
99
+ cleaned = re.sub(r'[@:\-–—\s]+$', '', text).strip().lower()
100
+ cleaned = re.sub(r'[^\w\s.-]', '', cleaned).strip()
101
+ if cleaned and len(cleaned) > 2 and cleaned not in common_non_hosters:
102
+ # Take the last word as mirror name (e.g., "Rapidgator" from "Rapidgator @")
103
+ parts = cleaned.split()
104
+ if parts:
105
+ mirror = parts[-1]
106
+ if 2 < len(mirror) < 30:
107
+ return mirror
108
+ continue
109
+
110
+ # Skip non-Tag elements
111
+ if not hasattr(sibling, 'name') or sibling.name is None:
112
+ continue
113
+
114
+ # Skip spoiler elements entirely
115
+ classes = sibling.get('class', [])
116
+ if classes and any('spoiler' in str(c).lower() for c in classes):
117
+ continue
118
+
119
+ # Check for images with hoster names in src/alt/data-url
120
+ img = sibling.find('img') if sibling.name != 'img' else sibling
121
+ if img:
122
+ img_identifiers = (img.get('src', '') + img.get('alt', '') + img.get('data-url', '')).lower()
123
+ for hoster, patterns in known_hosters.items():
124
+ if any(pattern in img_identifiers for pattern in patterns):
125
+ return hoster
126
+
127
+ sibling_text = sibling.get_text(strip=True).lower()
128
+ # Skip if text is too long - likely NFO content or other non-mirror text
129
+ if len(sibling_text) > 30:
130
+ continue
131
+ if sibling_text and len(sibling_text) > 2 and sibling_text not in common_non_hosters:
132
+ cleaned = re.sub(r'[^\w\s-]', '', sibling_text).strip()
133
+ if cleaned and 2 < len(cleaned) < 30:
134
+ return cleaned.split()[0] if ' ' in cleaned else cleaned
135
+
136
+ return None
137
+
138
+
139
+ def generate_status_url(href, crypter_type):
140
+ """
141
+ Generate a status URL for crypters that support it.
142
+ Returns None if status URL cannot be generated.
143
+ """
144
+ if crypter_type == "hide":
145
+ # hide.cx links: https://hide.cx/folder/{UUID} → https://hide.cx/state/{UUID}
146
+ match = re.search(r'hide\.cx/(?:folder/)?([a-f0-9-]{36})', href, re.IGNORECASE)
147
+ if match:
148
+ uuid = match.group(1)
149
+ return f"https://hide.cx/state/{uuid}"
150
+
151
+ elif crypter_type == "tolink":
152
+ # tolink links: https://tolink.to/f/{ID} → https://tolink.to/f/{ID}/s/status.png
153
+ match = re.search(r'tolink\.to/f/([a-zA-Z0-9]+)', href, re.IGNORECASE)
154
+ if match:
155
+ link_id = match.group(1)
156
+ return f"https://tolink.to/f/{link_id}/s/status.png"
157
+
158
+ return None
159
+
160
+
161
+ def extract_status_url_from_html(link_element, crypter_type):
162
+ """
163
+ Extract status image URL from HTML near the link element.
164
+ Used primarily for FileCrypt where status URLs cannot be generated.
165
+ """
166
+ if crypter_type != "filecrypt":
167
+ return None
168
+
169
+ # Look for status image in the link itself
170
+ img = link_element.find('img')
171
+ if img:
172
+ for attr in ['src', 'data-url']:
173
+ url = img.get(attr, '')
174
+ if 'filecrypt.cc/Stat/' in url:
175
+ return url
176
+
177
+ # Look in siblings
178
+ for sibling in link_element.next_siblings:
179
+ if not hasattr(sibling, 'name') or sibling.name is None:
180
+ continue
181
+ if sibling.name == 'img':
182
+ for attr in ['src', 'data-url']:
183
+ url = sibling.get(attr, '')
184
+ if 'filecrypt.cc/Stat/' in url:
185
+ return url
186
+ # Check nested images
187
+ nested_img = sibling.find('img') if hasattr(sibling, 'find') else None
188
+ if nested_img:
189
+ for attr in ['src', 'data-url']:
190
+ url = nested_img.get(attr, '')
191
+ if 'filecrypt.cc/Stat/' in url:
192
+ return url
193
+ # Stop at next link
194
+ if sibling.name == 'a':
195
+ break
196
+
197
+ return None
198
+
199
+
200
+ def build_filecrypt_status_map(soup):
201
+ """
202
+ Build a map of mirror names to FileCrypt status URLs.
203
+ Handles cases where status images are in a separate section from links.
204
+ Returns dict: {mirror_name_lowercase: status_url}
205
+ """
206
+ status_map = {}
207
+
208
+ # Find all FileCrypt status images in the post
209
+ for img in soup.find_all('img'):
210
+ status_url = None
211
+ for attr in ['src', 'data-url']:
212
+ url = img.get(attr, '')
213
+ if 'filecrypt.cc/Stat/' in url:
214
+ status_url = url
215
+ break
216
+
217
+ if not status_url:
218
+ continue
219
+
220
+ # Look for associated mirror name in previous text/siblings
221
+ mirror_name = None
222
+
223
+ # Check parent's previous siblings and text nodes
224
+ parent = img.parent
225
+ if parent:
226
+ # Get all previous text content before this image
227
+ prev_text = ""
228
+ for prev in parent.previous_siblings:
229
+ if hasattr(prev, 'get_text'):
230
+ prev_text = prev.get_text(strip=True)
231
+ elif isinstance(prev, NavigableString):
232
+ prev_text = prev.strip()
233
+ if prev_text:
234
+ break
235
+
236
+ # Also check text directly before within parent
237
+ for prev in img.previous_siblings:
238
+ if isinstance(prev, NavigableString) and prev.strip():
239
+ prev_text = prev.strip()
240
+ break
241
+ elif hasattr(prev, 'get_text'):
242
+ text = prev.get_text(strip=True)
243
+ if text:
244
+ prev_text = text
245
+ break
246
+
247
+ if prev_text:
248
+ # Clean up the text to get mirror name
249
+ cleaned = re.sub(r'[^\w\s.-]', '', prev_text).strip().lower()
250
+ # Take last word/phrase as it's likely the mirror name
251
+ parts = cleaned.split()
252
+ if parts:
253
+ mirror_name = parts[-1] if len(parts[-1]) > 2 else cleaned
254
+
255
+ if mirror_name and mirror_name not in status_map:
256
+ status_map[mirror_name] = status_url
257
+ debug(f"Mapped status image for mirror: {mirror_name} -> {status_url}")
258
+
259
+ return status_map
260
+
261
+
262
+ def image_has_green(image_data):
263
+ """
264
+ Analyze image data to check if it contains green pixels.
265
+ Returns True if any significant green is detected (indicating online status).
266
+ """
267
+ try:
268
+ img = Image.open(BytesIO(image_data))
269
+ img = img.convert('RGB')
270
+
271
+ pixels = list(img.getdata())
272
+
273
+ for r, g, b in pixels:
274
+ # Check if pixel is greenish: green channel is dominant
275
+ # and has a reasonable absolute value
276
+ if g > 100 and g > r * 1.3 and g > b * 1.3:
277
+ return True
278
+
279
+ return False
280
+ except Exception as e:
281
+ debug(f"Error analyzing status image: {e}")
282
+ # If we can't analyze, assume online to not skip valid links
283
+ return True
284
+
285
+
286
+ def fetch_status_image(status_url):
287
+ """
288
+ Fetch a status image and return (status_url, image_data).
289
+ Returns (status_url, None) on failure.
290
+ """
291
+ try:
292
+ import requests
293
+ response = requests.get(status_url, timeout=10)
294
+ if response.status_code == 200:
295
+ return (status_url, response.content)
296
+ except Exception as e:
297
+ debug(f"Error fetching status image {status_url}: {e}")
298
+ return (status_url, None)
299
+
300
+
301
+ def check_links_online_status(links_with_status):
302
+ """
303
+ Check online status for links that have status URLs.
304
+ Returns list of links that are online (or have no status URL to check).
305
+
306
+ links_with_status: list of [href, identifier, status_url] where status_url can be None
307
+ """
308
+
309
+ links_to_check = [(i, link) for i, link in enumerate(links_with_status) if link[2]]
310
+
311
+ if not links_to_check:
312
+ # No status URLs to check, return all links as potentially online
313
+ return [[link[0], link[1]] for link in links_with_status]
314
+
315
+ # Batch fetch status images
316
+ status_results = {} # status_url -> has_green
317
+ status_urls = list(set(link[2] for _, link in links_to_check))
318
+
319
+ batch_size = 10
320
+ for i in range(0, len(status_urls), batch_size):
321
+ batch = status_urls[i:i + batch_size]
322
+ with ThreadPoolExecutor(max_workers=batch_size) as executor:
323
+ futures = [executor.submit(fetch_status_image, url) for url in batch]
324
+ for future in as_completed(futures):
325
+ try:
326
+ status_url, image_data = future.result()
327
+ if image_data:
328
+ status_results[status_url] = image_has_green(image_data)
329
+ else:
330
+ # Could not fetch, assume online
331
+ status_results[status_url] = True
332
+ except Exception as e:
333
+ debug(f"Error checking status: {e}")
334
+
335
+ # Filter to online links
336
+ online_links = []
337
+
338
+ for link in links_with_status:
339
+ href, identifier, status_url = link
340
+ if not status_url:
341
+ # No status URL, include link (keeplinks case)
342
+ online_links.append([href, identifier])
343
+ elif status_url in status_results:
344
+ if status_results[status_url]:
345
+ online_links.append([href, identifier])
346
+ debug(f"Link online: {identifier} ({href})")
347
+ else:
348
+ debug(f"Link offline: {identifier} ({href})")
349
+ else:
350
+ # Status check failed, include link
351
+ online_links.append([href, identifier])
352
+
353
+ return online_links
354
+
355
+
356
+ def extract_links_and_password_from_post(post_content, host):
357
+ """
358
+ Extract download links and password from a forum post.
359
+ Returns links with status URLs for online checking.
360
+ """
361
+ links = [] # [href, identifier, status_url]
362
+ soup = BeautifulSoup(post_content, 'html.parser')
363
+
364
+ # Build status map for FileCrypt links (handles separated status images)
365
+ filecrypt_status_map = build_filecrypt_status_map(soup)
366
+
367
+ for link in soup.find_all('a', href=True):
368
+ href = link.get('href')
369
+
370
+ if href.startswith('/') or host in href:
371
+ continue
372
+
373
+ if re.search(r'filecrypt\.', href, re.IGNORECASE):
374
+ crypter_type = "filecrypt"
375
+ elif re.search(r'hide\.', href, re.IGNORECASE):
376
+ crypter_type = "hide"
377
+ elif re.search(r'keeplinks\.', href, re.IGNORECASE):
378
+ crypter_type = "keeplinks"
379
+ elif re.search(r'tolink\.', href, re.IGNORECASE):
380
+ crypter_type = "tolink"
381
+ else:
382
+ debug(f"Unsupported link crypter/hoster found: {href}")
383
+ continue
384
+
385
+ mirror_name = extract_mirror_name_from_link(link)
386
+ identifier = mirror_name if mirror_name else crypter_type
387
+
388
+ # Get status URL - try extraction first, then status map, then generation
389
+ status_url = extract_status_url_from_html(link, crypter_type)
390
+
391
+ if not status_url and crypter_type == "filecrypt" and mirror_name:
392
+ # Try to find in status map by mirror name (normalized, case-insensitive, TLD-stripped)
393
+ mirror_normalized = normalize_mirror_name(mirror_name)
394
+ for map_key, map_url in filecrypt_status_map.items():
395
+ map_key_normalized = normalize_mirror_name(map_key)
396
+ if mirror_normalized in map_key_normalized or map_key_normalized in mirror_normalized:
397
+ status_url = map_url
398
+ break
399
+
400
+ if not status_url:
401
+ status_url = generate_status_url(href, crypter_type)
402
+
403
+ # Avoid duplicates (check href and identifier)
404
+ if not any(l[0] == href and l[1] == identifier for l in links):
405
+ links.append([href, identifier, status_url])
406
+ status_info = f"status: {status_url}" if status_url else "no status URL"
407
+ if mirror_name:
408
+ debug(f"Found {crypter_type} link for mirror: {mirror_name} ({status_info})")
409
+ else:
410
+ debug(f"Found {crypter_type} link ({status_info})")
411
+
412
+ password = ""
413
+ if links:
414
+ password = extract_password_from_post(soup, host)
415
+
416
+ return links, password
417
+
418
+
419
+ def get_dl_download_links(shared_state, url, mirror, title, password):
420
+ """
421
+ KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
422
+
423
+ DL source handler - extracts links and password from forum thread.
424
+ Iterates through posts to find one with online links.
425
+
426
+ Note: The password parameter is unused intentionally - password must be extracted from the post.
427
+ """
428
+
429
+ host = shared_state.values["config"]("Hostnames").get(hostname)
430
+
431
+ sess = retrieve_and_validate_session(shared_state)
432
+ if not sess:
433
+ info(f"Could not retrieve valid session for {host}")
434
+ return {"links": [], "password": ""}
435
+
436
+ try:
437
+ response = fetch_via_requests_session(shared_state, method="GET", target_url=url, timeout=30)
438
+
439
+ if response.status_code != 200:
440
+ info(f"Failed to load thread page: {url} (Status: {response.status_code})")
441
+ return {"links": [], "password": ""}
442
+
443
+ soup = BeautifulSoup(response.text, 'html.parser')
444
+
445
+ # Get all posts in thread
446
+ posts = soup.select('article.message--post')
447
+ if not posts:
448
+ info(f"Could not find any posts in thread: {url}")
449
+ return {"links": [], "password": ""}
450
+
451
+ # Iterate through posts to find one with online links
452
+ for post_index, post in enumerate(posts):
453
+ post_content = post.select_one('div.bbWrapper')
454
+ if not post_content:
455
+ continue
456
+
457
+ links_with_status, extracted_password = extract_links_and_password_from_post(str(post_content), host)
458
+
459
+ if not links_with_status:
460
+ continue
461
+
462
+ # Check which links are online
463
+ online_links = check_links_online_status(links_with_status)
464
+
465
+ if online_links:
466
+ post_info = "first post" if post_index == 0 else f"post #{post_index + 1}"
467
+ debug(f"Found {len(online_links)} online link(s) in {post_info} for: {title}")
468
+ return {"links": online_links, "password": extracted_password}
469
+ else:
470
+ debug(f"All links in post #{post_index + 1} are offline, checking next post...")
471
+
472
+ info(f"No online download links found in any post: {url}")
473
+ return {"links": [], "password": ""}
474
+
475
+ except Exception as e:
476
+ info(f"Error extracting download links from {url}: {e}")
477
+ invalidate_session(shared_state)
478
+ return {"links": [], "password": ""}
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "1.26.3"
11
+ return "1.26.5"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -90,6 +90,12 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
90
90
  else:
91
91
  imdb_id = None
92
92
 
93
+ if season:
94
+ source_search += f" S{int(season):02d}"
95
+
96
+ if episode:
97
+ source_search += f"E{int(episode):02d}"
98
+
93
99
  url = f'https://{host}/tag/{tag}/'
94
100
 
95
101
  headers = {"User-Agent": shared_state.values["user_agent"]}
@@ -81,6 +81,12 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
81
81
  else:
82
82
  imdb_id = None
83
83
 
84
+ if season:
85
+ source_search += f" S{int(season):02d}"
86
+
87
+ if episode:
88
+ source_search += f"E{int(episode):02d}"
89
+
84
90
  url = f'https://{host}/search'
85
91
  headers = {"User-Agent": shared_state.values["user_agent"]}
86
92
  data = {"search": source_search}
@@ -52,7 +52,7 @@ def wx_feed(shared_state, start_time, request_from, mirror=None):
52
52
  items = soup.find_all('item')
53
53
 
54
54
  if not items:
55
- debug(f"{hostname.upper()}: No entries found in RSS feed")
55
+ info(f"{hostname.upper()}: No entries found in RSS feed")
56
56
  return releases
57
57
 
58
58
  debug(f"{hostname.upper()}: Found {len(items)} entries in RSS feed")
@@ -141,15 +141,15 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
141
141
 
142
142
  imdb_id = shared_state.is_imdb_id(search_string)
143
143
  if imdb_id:
144
- info(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
144
+ debug(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
145
145
  title = get_localized_title(shared_state, imdb_id, 'de')
146
146
  if not title:
147
- info(f"{hostname.upper()}: no title for IMDb {imdb_id}")
147
+ debug(f"{hostname.upper()}: no title for IMDb {imdb_id}")
148
148
  return releases
149
- info(f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'")
149
+ debug(f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'")
150
150
  search_string = html.unescape(title)
151
151
  else:
152
- info(f"{hostname.upper()}: Using search string directly: '{search_string}'")
152
+ debug(f"{hostname.upper()}: Using search string directly: '{search_string}'")
153
153
 
154
154
  api_url = f'https://api.{host}/start/search'
155
155
 
@@ -179,13 +179,13 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
179
179
  elif "radarr" in request_from.lower():
180
180
  params['types'] = 'movie'
181
181
 
182
- info(f"{hostname.upper()}: Searching: '{search_string}'")
182
+ debug(f"{hostname.upper()}: Searching: '{search_string}'")
183
183
 
184
184
  try:
185
185
  response = requests.get(api_url, headers=headers, params=params, timeout=10)
186
186
 
187
187
  if response.status_code != 200:
188
- info(f"{hostname.upper()}: Search API returned status {response.status_code}")
188
+ debug(f"{hostname.upper()}: Search API returned status {response.status_code}")
189
189
  return releases
190
190
 
191
191
  data = response.json()
@@ -199,7 +199,7 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
199
199
  else:
200
200
  items = data if isinstance(data, list) else []
201
201
 
202
- info(f"{hostname.upper()}: Found {len(items)} items in search results")
202
+ debug(f"{hostname.upper()}: Found {len(items)} items in search results")
203
203
 
204
204
  for item in items:
205
205
  try:
@@ -208,7 +208,7 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
208
208
  debug(f"{hostname.upper()}: Item has no UID, skipping")
209
209
  continue
210
210
 
211
- info(f"{hostname.upper()}: Fetching details for UID: {uid}")
211
+ debug(f"{hostname.upper()}: Fetching details for UID: {uid}")
212
212
 
213
213
  detail_url = f'https://api.{host}/start/d/{uid}'
214
214
  detail_response = requests.get(detail_url, headers=headers, timeout=10)
@@ -263,7 +263,7 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
263
263
  })
264
264
 
265
265
  if 'releases' in detail_item and isinstance(detail_item['releases'], list):
266
- info(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
266
+ debug(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
267
267
 
268
268
  for release in detail_item['releases']:
269
269
  try:
@@ -323,7 +323,7 @@ def wx_search(shared_state, start_time, request_from, search_string, mirror=None
323
323
  debug(f"{hostname.upper()}: {traceback.format_exc()}")
324
324
  continue
325
325
 
326
- info(f"{hostname.upper()}: Returning {len(releases)} total releases")
326
+ debug(f"{hostname.upper()}: Returning {len(releases)} total releases")
327
327
 
328
328
  except Exception as e:
329
329
  info(f"Error in {hostname.upper()} search: {e}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: quasarr
3
- Version: 1.26.3
3
+ Version: 1.26.5
4
4
  Summary: Quasarr connects JDownloader with Radarr, Sonarr and LazyLibrarian. It also decrypts links protected by CAPTCHAs.
5
5
  Home-page: https://github.com/rix1337/Quasarr
6
6
  Author: rix1337
@@ -10,7 +10,7 @@ Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE
13
- Requires-Dist: beautifulsoup4>=4.14.2
13
+ Requires-Dist: beautifulsoup4>=4.14.3
14
14
  Requires-Dist: bottle>=0.13.4
15
15
  Requires-Dist: dukpy>=0.5.0
16
16
  Requires-Dist: pillow>=12.0.0
@@ -1,4 +1,4 @@
1
- beautifulsoup4>=4.14.2
1
+ beautifulsoup4>=4.14.3
2
2
  bottle>=0.13.4
3
3
  dukpy>=0.5.0
4
4
  pillow>=12.0.0
@@ -1,191 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Quasarr
3
- # Project by https://github.com/rix1337
4
-
5
- import re
6
-
7
- from bs4 import BeautifulSoup
8
-
9
- from quasarr.providers.log import info, debug
10
- from quasarr.providers.sessions.dl import retrieve_and_validate_session, fetch_via_requests_session, invalidate_session
11
-
12
- hostname = "dl"
13
-
14
-
15
- def extract_password_from_post(soup, host):
16
- """
17
- Extract password from forum post using multiple strategies.
18
- Returns empty string if no password found or if explicitly marked as 'no password'.
19
- """
20
- post_text = soup.get_text()
21
- post_text = re.sub(r'\s+', ' ', post_text).strip()
22
-
23
- password_pattern = r'(?:passwort|password|pass|pw)[\s:]+([a-zA-Z0-9._-]{2,50})'
24
- match = re.search(password_pattern, post_text, re.IGNORECASE)
25
-
26
- if match:
27
- password = match.group(1).strip()
28
- if not re.match(r'^(?:download|mirror|link|episode|info|mediainfo|spoiler|hier|click|klick|kein|none|no)',
29
- password, re.IGNORECASE):
30
- debug(f"Found password: {password}")
31
- return password
32
-
33
- no_password_patterns = [
34
- r'(?:passwort|password|pass|pw)[\s:]*(?:kein(?:es)?|none|no|nicht|not|nein|-|–|—)',
35
- r'(?:kein(?:es)?|none|no|nicht|not|nein)\s*(?:passwort|password|pass|pw)',
36
- ]
37
-
38
- for pattern in no_password_patterns:
39
- if re.search(pattern, post_text, re.IGNORECASE):
40
- debug("No password required (explicitly stated)")
41
- return ""
42
-
43
- default_password = f"www.{host}"
44
- debug(f"No password found, using default: {default_password}")
45
- return default_password
46
-
47
-
48
- def extract_mirror_name_from_link(link_element):
49
- """
50
- Extract the mirror/hoster name from the link text or nearby text.
51
- """
52
- link_text = link_element.get_text(strip=True)
53
- common_non_hosters = {'download', 'mirror', 'link', 'hier', 'click', 'klick', 'code', 'spoiler'}
54
-
55
- # Known hoster patterns for image detection
56
- known_hosters = {
57
- 'rapidgator': ['rapidgator', 'rg'],
58
- 'ddownload': ['ddownload', 'ddl'],
59
- 'turbobit': ['turbobit'],
60
- '1fichier': ['1fichier'],
61
- }
62
-
63
- if link_text and len(link_text) > 2:
64
- cleaned = re.sub(r'[^\w\s-]', '', link_text).strip().lower()
65
- if cleaned and cleaned not in common_non_hosters:
66
- main_part = cleaned.split()[0] if ' ' in cleaned else cleaned
67
- if 2 < len(main_part) < 30:
68
- return main_part
69
-
70
- parent = link_element.parent
71
- if parent:
72
- for sibling in link_element.previous_siblings:
73
- # Only process Tag elements, skip NavigableString (text nodes)
74
- if not hasattr(sibling, 'name') or sibling.name is None:
75
- continue
76
-
77
- # Skip spoiler elements entirely
78
- classes = sibling.get('class', [])
79
- if classes and any('spoiler' in str(c).lower() for c in classes):
80
- continue
81
-
82
- # Check for images with hoster names in src/alt/data-url
83
- img = sibling.find('img') if sibling.name != 'img' else sibling
84
- if img:
85
- img_identifiers = (img.get('src', '') + img.get('alt', '') + img.get('data-url', '')).lower()
86
- for hoster, patterns in known_hosters.items():
87
- if any(pattern in img_identifiers for pattern in patterns):
88
- return hoster
89
-
90
- sibling_text = sibling.get_text(strip=True).lower()
91
- # Skip if text is too long - likely NFO content or other non-mirror text
92
- if len(sibling_text) > 30:
93
- continue
94
- if sibling_text and len(sibling_text) > 2 and sibling_text not in common_non_hosters:
95
- cleaned = re.sub(r'[^\w\s-]', '', sibling_text).strip()
96
- if cleaned and 2 < len(cleaned) < 30:
97
- return cleaned.split()[0] if ' ' in cleaned else cleaned
98
-
99
- return None
100
-
101
-
102
- def extract_links_and_password_from_post(post_content, host):
103
- """
104
- Extract download links and password from a forum post.
105
- """
106
- links = []
107
- soup = BeautifulSoup(post_content, 'html.parser')
108
-
109
- for link in soup.find_all('a', href=True):
110
- href = link.get('href')
111
-
112
- if href.startswith('/') or host in href:
113
- continue
114
-
115
- if re.search(r'filecrypt\.', href, re.IGNORECASE):
116
- crypter_type = "filecrypt"
117
- elif re.search(r'hide\.', href, re.IGNORECASE):
118
- crypter_type = "hide"
119
- elif re.search(r'keeplinks\.', href, re.IGNORECASE):
120
- crypter_type = "keeplinks"
121
- elif re.search(r'tolink\.', href, re.IGNORECASE):
122
- crypter_type = "tolink"
123
- else:
124
- debug(f"Unsupported link crypter/hoster found: {href}")
125
- continue
126
-
127
- mirror_name = extract_mirror_name_from_link(link)
128
- identifier = mirror_name if mirror_name else crypter_type
129
-
130
- if [href, identifier] not in links:
131
- links.append([href, identifier])
132
- if mirror_name:
133
- debug(f"Found {crypter_type} link for mirror: {mirror_name}")
134
- else:
135
- debug(f"Found {crypter_type} link (no mirror name detected)")
136
-
137
- password = ""
138
- if links:
139
- password = extract_password_from_post(soup, host)
140
-
141
- return links, password
142
-
143
-
144
- def get_dl_download_links(shared_state, url, mirror, title, password):
145
- """
146
- KEEP THE SIGNATURE EVEN IF SOME PARAMETERS ARE UNUSED!
147
-
148
- DL source handler - extracts links and password from forum thread.
149
-
150
- Note: The password parameter is unused intentionally - password must be extracted from the post.
151
- """
152
-
153
- host = shared_state.values["config"]("Hostnames").get(hostname)
154
-
155
- sess = retrieve_and_validate_session(shared_state)
156
- if not sess:
157
- info(f"Could not retrieve valid session for {host}")
158
- return {"links": [], "password": ""}
159
-
160
- try:
161
- response = fetch_via_requests_session(shared_state, method="GET", target_url=url, timeout=30)
162
-
163
- if response.status_code != 200:
164
- info(f"Failed to load thread page: {url} (Status: {response.status_code})")
165
- return {"links": [], "password": ""}
166
-
167
- soup = BeautifulSoup(response.text, 'html.parser')
168
-
169
- first_post = soup.select_one('article.message--post')
170
- if not first_post:
171
- info(f"Could not find first post in thread: {url}")
172
- return {"links": [], "password": ""}
173
-
174
- post_content = first_post.select_one('div.bbWrapper')
175
- if not post_content:
176
- info(f"Could not find post content in thread: {url}")
177
- return {"links": [], "password": ""}
178
-
179
- links, extracted_password = extract_links_and_password_from_post(str(post_content), host)
180
-
181
- if not links:
182
- info(f"No supported download links found in thread: {url}")
183
- return {"links": [], "password": ""}
184
-
185
- debug(f"Found {len(links)} download link(s) for: {title} (password: {extracted_password})")
186
- return {"links": links, "password": extracted_password}
187
-
188
- except Exception as e:
189
- info(f"Error extracting download links from {url}: {e}")
190
- invalidate_session(shared_state)
191
- return {"links": [], "password": ""}
File without changes
File without changes
File without changes
File without changes
File without changes