quasarr 2.0.0__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -2,10 +2,12 @@
2
2
  # Quasarr
3
3
  # Project by https://github.com/rix1337
4
4
 
5
+ import hashlib
5
6
  import json
6
7
  import re
7
8
 
8
9
  from quasarr.downloads.linkcrypters.hide import decrypt_links_if_hide
10
+ from quasarr.downloads.packages import get_packages
9
11
  from quasarr.downloads.sources.al import get_al_download_links
10
12
  from quasarr.downloads.sources.by import get_by_download_links
11
13
  from quasarr.downloads.sources.dd import get_dd_download_links
@@ -65,6 +67,72 @@ SOURCE_GETTERS = {
65
67
  }
66
68
 
67
69
 
70
+ # =============================================================================
71
+ # DETERMINISTIC PACKAGE ID GENERATION
72
+ # =============================================================================
73
+
74
+ def extract_client_type(request_from):
75
+ """
76
+ Extract client type from User-Agent, stripping version info.
77
+
78
+ Examples:
79
+ "Radarr/6.0.4.10291 (alpine 3.23.2)" → "radarr"
80
+ "Sonarr/4.0.0.123" → "sonarr"
81
+ "LazyLibrarian/1.0" → "lazylibrarian"
82
+ """
83
+ if not request_from:
84
+ return "unknown"
85
+
86
+ # Extract the client name before the version (first part before '/')
87
+ client = request_from.split('/')[0].lower().strip()
88
+
89
+ # Normalize known clients
90
+ if 'radarr' in client:
91
+ return 'radarr'
92
+ elif 'sonarr' in client:
93
+ return 'sonarr'
94
+ elif 'lazylibrarian' in client:
95
+ return 'lazylibrarian'
96
+
97
+ return client
98
+
99
+
100
+ def generate_deterministic_package_id(title, source_key, client_type):
101
+ """
102
+ Generate a deterministic package ID from title, source, and client type.
103
+
104
+ The same combination of (title, source_key, client_type) will ALWAYS produce
105
+ the same package_id, allowing clients to reliably blocklist erroneous releases.
106
+
107
+ Args:
108
+ title: Release title (e.g., "Movie.Name.2024.1080p.BluRay")
109
+ source_key: Source identifier/hostname shorthand (e.g., "nx", "dl", "al")
110
+ client_type: Client type without version (e.g., "radarr", "sonarr", "lazylibrarian")
111
+
112
+ Returns:
113
+ Deterministic package ID in format: Quasarr_{category}_{hash32}
114
+ """
115
+ # Normalize inputs for consistency
116
+ normalized_title = title.strip()
117
+ normalized_source = source_key.lower().strip() if source_key else "unknown"
118
+ normalized_client = client_type.lower().strip() if client_type else "unknown"
119
+
120
+ # Category mapping (for compatibility with existing package ID format)
121
+ category_map = {
122
+ "lazylibrarian": "docs",
123
+ "radarr": "movies",
124
+ "sonarr": "tv"
125
+ }
126
+ category = category_map.get(normalized_client, "tv")
127
+
128
+ # Create deterministic hash from combination using SHA256
129
+ hash_input = f"{normalized_title}|{normalized_source}|{normalized_client}"
130
+ hash_bytes = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
131
+
132
+ # Use first 32 characters for good collision resistance (128-bit)
133
+ return f"Quasarr_{category}_{hash_bytes[:32]}"
134
+
135
+
68
136
  # =============================================================================
69
137
  # LINK CLASSIFICATION
70
138
  # =============================================================================
@@ -228,28 +296,58 @@ def process_links(shared_state, source_result, title, password, package_id, imdb
228
296
  # MAIN ENTRY POINT
229
297
  # =============================================================================
230
298
 
231
- def download(shared_state, request_from, title, url, mirror, size_mb, password, imdb_id=None):
232
- """Main download entry point."""
233
- category = "docs" if "lazylibrarian" in request_from.lower() else \
234
- "movies" if "radarr" in request_from.lower() else "tv"
299
+ def package_id_exists(shared_state, package_id):
300
+ # DB checks
301
+ if shared_state.get_db("protected").retrieve(package_id):
302
+ return True
303
+ if shared_state.get_db("failed").retrieve(package_id):
304
+ return True
305
+
306
+ data = get_packages(shared_state) or {}
307
+
308
+ for section in ("queue", "history"):
309
+ for pkg in data.get(section, []) or []:
310
+ if pkg.get("nzo_id") == package_id:
311
+ return True
235
312
 
236
- # Problem, we should make this id deterministic, so same source and same request_from (radarr / sonarr, not their version!) must yield same hash
237
- package_id = f"Quasarr_{category}_{str(hash(title + url)).replace('-', '')}"
313
+ return False
238
314
 
315
+
316
+ def download(shared_state, request_from, title, url, mirror, size_mb, password, imdb_id=None, source_key=None):
317
+ """
318
+ Main download entry point.
319
+
320
+ Args:
321
+ shared_state: Application shared state
322
+ request_from: User-Agent string (e.g., "Radarr/6.0.4.10291")
323
+ title: Release title
324
+ url: Source URL
325
+ mirror: Preferred mirror/hoster
326
+ size_mb: Size in MB
327
+ password: Archive password
328
+ imdb_id: IMDb ID (optional)
329
+ source_key: Hostname shorthand from search (e.g., "nx", "dl"). If not provided,
330
+ will be derived from URL matching against configured hostnames.
331
+ """
239
332
  if imdb_id and imdb_id.lower() == "none":
240
333
  imdb_id = None
241
334
 
242
335
  config = shared_state.values["config"]("Hostnames")
243
336
 
337
+ # Extract client type (without version) for deterministic hashing
338
+ client_type = extract_client_type(request_from)
339
+
244
340
  # Find matching source - all getters have unified signature
245
341
  source_result = None
246
342
  label = None
343
+ detected_source_key = None
247
344
 
248
345
  for key, getter in SOURCE_GETTERS.items():
249
346
  hostname = config.get(key)
250
347
  if hostname and hostname.lower() in url.lower():
251
348
  source_result = getter(shared_state, url, mirror, title, password)
252
349
  label = key.upper()
350
+ detected_source_key = key
253
351
  break
254
352
 
255
353
  # No source matched - check if URL is a known crypter directly
@@ -259,6 +357,19 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
259
357
  # For direct crypter URLs, we only know the crypter type, not the hoster inside
260
358
  source_result = {"links": [[url, crypter]]}
261
359
  label = crypter.upper()
360
+ detected_source_key = crypter
361
+
362
+ # Use provided source_key if available, otherwise use detected one
363
+ # This ensures we use the authoritative source from the search results
364
+ final_source_key = source_key if source_key else detected_source_key
365
+
366
+ # Generate DETERMINISTIC package_id
367
+ package_id = generate_deterministic_package_id(title, final_source_key, client_type)
368
+
369
+ # Skip Download if package_id already exists
370
+ if package_id_exists(shared_state, package_id):
371
+ info(f"Package {package_id} already exists. Skipping download!")
372
+ return {"success": True, "package_id": package_id, "title": title}
262
373
 
263
374
  if source_result is None:
264
375
  info(f'Could not find matching source for "{title}" - "{url}"')
@@ -229,7 +229,7 @@ def get_filecrypt_links(shared_state, token, title, url, password=None, mirror=N
229
229
  debug(f"Circle captcha present: {circle_captcha}")
230
230
  i = 0
231
231
  while circle_captcha and i < 3:
232
- debug(f"Submitting fake circle captcha click attempt {i+1}.")
232
+ debug(f"Submitting fake circle captcha click attempt {i + 1}.")
233
233
  random_x = str(random.randint(100, 200))
234
234
  random_y = str(random.randint(100, 200))
235
235
  output = session.post(url, data="buttonx.x=" + random_x + "&buttonx.y=" + random_y,
quasarr/providers/auth.py CHANGED
@@ -7,11 +7,13 @@ import hashlib
7
7
  import hmac
8
8
  import os
9
9
  import time
10
+ from functools import wraps
10
11
 
11
- from bottle import request, response, redirect
12
+ from bottle import request, response, redirect, abort
12
13
 
13
14
  import quasarr.providers.html_images as images
14
15
  from quasarr.providers.version import get_version
16
+ from quasarr.storage.config import Config
15
17
 
16
18
  # Auth configuration from environment
17
19
  AUTH_USER = os.environ.get('USER', '')
@@ -248,3 +250,16 @@ def add_auth_hook(app, whitelist_prefixes=None):
248
250
  else:
249
251
  if not check_basic_auth():
250
252
  return require_basic_auth()
253
+
254
+
255
+ def require_api_key(func):
256
+ @wraps(func)
257
+ def decorated(*args, **kwargs):
258
+ api_key = Config('API').get('key')
259
+ if not request.query.apikey:
260
+ return abort(401, "Missing API key")
261
+ if request.query.apikey != api_key:
262
+ return abort(403, "Invalid API key")
263
+ return func(*args, **kwargs)
264
+
265
+ return decorated
@@ -6,7 +6,7 @@ import quasarr.providers.html_images as images
6
6
  from quasarr.providers.version import get_version
7
7
 
8
8
 
9
- def render_centered_html(inner_content):
9
+ def render_centered_html(inner_content, footer_content=""):
10
10
  head = '''
11
11
  <head>
12
12
  <meta charset="utf-8">
@@ -20,6 +20,7 @@ def render_centered_html(inner_content):
20
20
  --fg-color: #212529;
21
21
  --card-bg: #ffffff;
22
22
  --card-shadow: rgba(0, 0, 0, 0.1);
23
+ --card-border: #dee2e6;
23
24
  --primary: #0d6efd;
24
25
  --secondary: #6c757d;
25
26
  --code-bg: #f8f9fa;
@@ -27,21 +28,40 @@ def render_centered_html(inner_content):
27
28
  --info-border: #2d5a2d;
28
29
  --setup-border: var(--primary);
29
30
  --divider-color: #dee2e6;
31
+ --border-color: #dee2e6;
30
32
  --btn-subtle-bg: #e9ecef;
31
33
  --btn-subtle-border: #ced4da;
34
+ --text-muted: #666;
35
+ --link-color: #0d6efd;
36
+ --success-color: #198754;
37
+ --success-bg: #d1e7dd;
38
+ --success-border: #a3cfbb;
39
+ --error-color: #dc3545;
40
+ --error-bg: #f8d7da;
41
+ --error-border: #f1aeb5;
32
42
  }
33
43
  @media (prefers-color-scheme: dark) {
34
44
  :root {
35
45
  --bg-color: #181a1b;
36
46
  --fg-color: #f1f1f1;
37
- --card-bg: #242526;
47
+ --card-bg: #2d3748;
38
48
  --card-shadow: rgba(0, 0, 0, 0.5);
49
+ --card-border: #4a5568;
39
50
  --code-bg: #2c2f33;
40
51
  --info-border: #4a8c4a;
41
52
  --setup-border: var(--primary);
42
- --divider-color: #444;
53
+ --divider-color: #4a5568;
54
+ --border-color: #4a5568;
43
55
  --btn-subtle-bg: #444;
44
56
  --btn-subtle-border: #666;
57
+ --text-muted: #a0aec0;
58
+ --link-color: #63b3ed;
59
+ --success-color: #68d391;
60
+ --success-bg: #1c4532;
61
+ --success-border: #276749;
62
+ --error-color: #fc8181;
63
+ --error-bg: #3d2d2d;
64
+ --error-border: #c53030;
45
65
  }
46
66
  }
47
67
  /* Info box styling */
@@ -66,6 +86,27 @@ def render_centered_html(inner_content):
66
86
  margin-top: 0;
67
87
  color: var(--setup-border);
68
88
  }
89
+ /* Status pill styling */
90
+ .status-pill {
91
+ display: inline-flex;
92
+ align-items: center;
93
+ gap: 6px;
94
+ padding: 6px 12px;
95
+ border-radius: 20px;
96
+ font-size: 0.9rem;
97
+ font-weight: 500;
98
+ margin: 8px 0;
99
+ }
100
+ .status-pill.success {
101
+ background: var(--success-bg);
102
+ color: var(--success-color);
103
+ border: 1px solid var(--success-border);
104
+ }
105
+ .status-pill.error {
106
+ background: var(--error-bg);
107
+ color: var(--error-color);
108
+ border: 1px solid var(--error-border);
109
+ }
69
110
  /* Subtle button styling (ghost style) */
70
111
  .btn-subtle {
71
112
  background: transparent;
@@ -112,7 +153,7 @@ def render_centered_html(inner_content):
112
153
  width: 100%;
113
154
  padding: 0.5rem;
114
155
  font-size: 1rem;
115
- border: 1px solid #ced4da;
156
+ border: 1px solid var(--card-border);
116
157
  border-radius: 0.5rem;
117
158
  background-color: var(--card-bg);
118
159
  color: var(--fg-color);
@@ -220,22 +261,36 @@ def render_centered_html(inner_content):
220
261
  box-shadow: 0 2px 6px rgba(108, 117, 125, 0.4);
221
262
  }
222
263
  a {
223
- color: var(--primary);
264
+ color: var(--link-color);
224
265
  text-decoration: none;
225
266
  }
226
267
  a:hover {
227
-
268
+ text-decoration: underline;
228
269
  }
229
270
  /* footer styling */
230
271
  footer {
231
272
  text-align: center;
232
273
  font-size: 0.75rem;
233
- color: var(--secondary);
274
+ color: var(--text-muted);
234
275
  padding: 0.5rem 0;
235
276
  }
277
+ footer a {
278
+ color: var(--text-muted);
279
+ margin: 0 0;
280
+ }
281
+ footer a:hover {
282
+ color: var(--fg-color);
283
+ }
236
284
  </style>
237
285
  </head>'''
238
286
 
287
+ # Build footer content
288
+ version_text = f"Quasarr v.{get_version()}"
289
+ if footer_content:
290
+ footer_html = f"{footer_content} · {version_text}"
291
+ else:
292
+ footer_html = version_text
293
+
239
294
  body = f'''
240
295
  {head}
241
296
  <body>
@@ -245,7 +300,7 @@ def render_centered_html(inner_content):
245
300
  </div>
246
301
  </div>
247
302
  <footer>
248
- Quasarr v.{get_version()}
303
+ {footer_html}
249
304
  </footer>
250
305
  </body>
251
306
  '''
@@ -260,14 +315,14 @@ def render_button(text, button_type="primary", attributes=None):
260
315
  return f'<button class="{cls}" {attr_str}>{text}</button>'
261
316
 
262
317
 
263
- def render_form(header, form="", script=""):
318
+ def render_form(header, form="", script="", footer_content=""):
264
319
  content = f'''
265
320
  <h1><img src="{images.logo}" type="image/png" alt="Quasarr logo" class="logo"/>Quasarr</h1>
266
321
  <h2>{header}</h2>
267
322
  {form}
268
323
  {script}
269
324
  '''
270
- return render_centered_html(content)
325
+ return render_centered_html(content, footer_content)
271
326
 
272
327
 
273
328
  def render_success(message, timeout=10, optional_text=""):
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "2.0.0"
11
+ return "2.1.1"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -208,7 +208,7 @@ def al_feed(shared_state, start_time, request_from, mirror=None):
208
208
 
209
209
  # Build payload using final_title
210
210
  mb = 0 # size not available in feed
211
- raw = f"{final_title}|{url}|{mirror}|{mb}|{release_id}|".encode("utf-8")
211
+ raw = f"{final_title}|{url}|{mirror}|{mb}|{release_id}||{hostname}".encode("utf-8")
212
212
  payload = urlsafe_b64encode(raw).decode("utf-8")
213
213
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
214
214
 
@@ -124,7 +124,7 @@ def _parse_posts(soup, shared_state, base_url, password, mirror_filter,
124
124
  imdb_id = None
125
125
 
126
126
  payload = urlsafe_b64encode(
127
- f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
127
+ f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}|{hostname}".encode()
128
128
  ).decode()
129
129
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
130
130
 
@@ -106,7 +106,8 @@ def dd_search(shared_state, start_time, request_from, search_string="", mirror=N
106
106
  mb = shared_state.convert_to_mb(size_item) * 1024 * 1024
107
107
  published = convert_to_rss_date(release.get("when"))
108
108
  payload = urlsafe_b64encode(
109
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
109
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
110
+ "utf-8")
110
111
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
111
112
 
112
113
  releases.append({
@@ -68,7 +68,7 @@ def dj_feed(shared_state, start_time, request_from, mirror=None):
68
68
  imdb_id = None
69
69
 
70
70
  payload = urlsafe_b64encode(
71
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
71
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
72
72
  ).decode("utf-8")
73
73
 
74
74
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
@@ -186,7 +186,7 @@ def dj_search(shared_state, start_time, request_from, search_string, mirror=None
186
186
  size = 0
187
187
 
188
188
  payload = urlsafe_b64encode(
189
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
189
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
190
190
  ).decode("utf-8")
191
191
 
192
192
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
@@ -123,7 +123,7 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
123
123
  password = ""
124
124
 
125
125
  payload = urlsafe_b64encode(
126
- f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
126
+ f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode("utf-8")
127
127
  ).decode("utf-8")
128
128
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
129
129
 
@@ -230,6 +230,11 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
230
230
  if not title_elem:
231
231
  continue
232
232
 
233
+ # Skip "Wird gesucht" threads
234
+ label = item.select_one('.contentRow-minor .label')
235
+ if label and 'wird gesucht' in label.get_text(strip=True).lower():
236
+ continue
237
+
233
238
  title = ''.join(title_elem.strings)
234
239
 
235
240
  title = re.sub(r'\s+', ' ', title)
@@ -261,7 +266,8 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
261
266
  password = ""
262
267
 
263
268
  payload = urlsafe_b64encode(
264
- f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
269
+ f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
270
+ "utf-8")
265
271
  ).decode("utf-8")
266
272
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
267
273
 
@@ -111,7 +111,7 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
111
111
  published = parse_published_datetime(article)
112
112
 
113
113
  payload = urlsafe_b64encode(
114
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
114
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
115
115
  ).decode("utf-8")
116
116
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
117
117
 
@@ -98,7 +98,7 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
98
98
  date = article.parent.parent.find("span", {"class": "date updated"}).text.strip()
99
99
  published = convert_to_rss_date(date)
100
100
  payload = urlsafe_b64encode(
101
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
101
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode("utf-8")
102
102
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
103
103
  except Exception as e:
104
104
  info(f"Error parsing {hostname.upper()} feed: {e}")
@@ -136,7 +136,6 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
136
136
  debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
137
137
  return releases
138
138
 
139
-
140
139
  if "Radarr" in request_from:
141
140
  search_type = "videocategory=filme"
142
141
  else:
@@ -168,10 +167,10 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
168
167
  title = result.a.text.strip()
169
168
 
170
169
  if not shared_state.is_valid_release(title,
171
- request_from,
172
- search_string,
173
- season,
174
- episode):
170
+ request_from,
171
+ search_string,
172
+ season,
173
+ episode):
175
174
  continue
176
175
 
177
176
  if not imdb_id:
@@ -188,7 +187,7 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
188
187
  date = result.parent.parent.find("span", {"class": "date updated"}).text.strip()
189
188
  published = convert_to_rss_date(date)
190
189
  payload = urlsafe_b64encode(
191
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
190
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode("utf-8")
192
191
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
192
  except Exception as e:
194
193
  info(f"Error parsing {hostname.upper()} search: {e}")
@@ -34,7 +34,6 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
34
34
  debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
35
35
  return releases
36
36
 
37
-
38
37
  if mirror and mirror not in supported_mirrors:
39
38
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
40
39
  ' Skipping search!')
@@ -81,7 +80,8 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
81
80
  mb = shared_state.convert_to_mb(size_item)
82
81
  size = mb * 1024 * 1024
83
82
  payload = urlsafe_b64encode(
84
- f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
83
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
84
+ "utf-8")
85
85
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
86
86
  except:
87
87
  continue
@@ -125,7 +125,6 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
125
125
  debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
126
126
  return releases
127
127
 
128
-
129
128
  if mirror and mirror not in supported_mirrors:
130
129
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
131
130
  ' Skipping search!')
@@ -188,7 +187,8 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
188
187
  mb = shared_state.convert_to_mb(size_item)
189
188
  size = mb * 1024 * 1024
190
189
  payload = urlsafe_b64encode(
191
- f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
190
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
191
+ "utf-8")
192
192
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
193
  except:
194
194
  continue
@@ -177,7 +177,7 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
177
177
 
178
178
  password = None
179
179
  payload = urlsafe_b64encode(
180
- f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
180
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode("utf-8")).decode()
181
181
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
182
182
 
183
183
  releases.append({
@@ -116,7 +116,7 @@ def _parse_posts(soup, shared_state, password, mirror_filter,
116
116
  size_bytes = mb * 1024 * 1024
117
117
 
118
118
  payload = urlsafe_b64encode(
119
- f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
119
+ f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}|{hostname}".encode()
120
120
  ).decode()
121
121
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
122
122
 
@@ -168,7 +168,7 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
168
168
  published = convert_to_rss_date(date_text) if date_text else ""
169
169
 
170
170
  payload = urlsafe_b64encode(
171
- f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
171
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode("utf-8")).decode()
172
172
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
173
173
 
174
174
  releases.append({
@@ -59,7 +59,7 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
59
59
  imdb_id = item.get('_media', {}).get('imdbid', None)
60
60
  mb = shared_state.convert_to_mb(item)
61
61
  payload = urlsafe_b64encode(
62
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode(
62
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
63
63
  "utf-8")
64
64
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
65
65
  except:
@@ -142,7 +142,8 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
142
142
  imdb_id = None # imdb info is missing here
143
143
 
144
144
  payload = urlsafe_b64encode(
145
- f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
145
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")).decode(
146
+ "utf-8")
146
147
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
147
148
  except:
148
149
  continue
@@ -349,7 +350,8 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
349
350
  episode):
350
351
  continue
351
352
 
352
- payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode()).decode()
353
+ payload = urlsafe_b64encode(
354
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode()).decode()
353
355
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
354
356
  size_bytes = mb * 1024 * 1024
355
357
 
@@ -68,7 +68,7 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
68
68
  imdb_id = None
69
69
 
70
70
  payload = urlsafe_b64encode(
71
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
71
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
72
72
  ).decode("utf-8")
73
73
 
74
74
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
@@ -186,7 +186,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
186
186
  size = 0
187
187
 
188
188
  payload = urlsafe_b64encode(
189
- f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
189
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}|{hostname}".encode("utf-8")
190
190
  ).decode("utf-8")
191
191
 
192
192
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"