quasarr 1.4.1__py3-none-any.whl → 1.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (67) hide show
  1. quasarr/__init__.py +157 -67
  2. quasarr/api/__init__.py +126 -43
  3. quasarr/api/arr/__init__.py +197 -78
  4. quasarr/api/captcha/__init__.py +885 -39
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +84 -22
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +236 -487
  9. quasarr/downloads/linkcrypters/al.py +237 -0
  10. quasarr/downloads/linkcrypters/filecrypt.py +178 -31
  11. quasarr/downloads/linkcrypters/hide.py +123 -0
  12. quasarr/downloads/packages/__init__.py +461 -0
  13. quasarr/downloads/sources/al.py +697 -0
  14. quasarr/downloads/sources/by.py +106 -0
  15. quasarr/downloads/sources/dd.py +6 -78
  16. quasarr/downloads/sources/dj.py +7 -0
  17. quasarr/downloads/sources/dt.py +1 -1
  18. quasarr/downloads/sources/dw.py +2 -2
  19. quasarr/downloads/sources/he.py +112 -0
  20. quasarr/downloads/sources/mb.py +47 -0
  21. quasarr/downloads/sources/nk.py +51 -0
  22. quasarr/downloads/sources/nx.py +36 -81
  23. quasarr/downloads/sources/sf.py +27 -4
  24. quasarr/downloads/sources/sj.py +7 -0
  25. quasarr/downloads/sources/sl.py +90 -0
  26. quasarr/downloads/sources/wd.py +110 -0
  27. quasarr/providers/cloudflare.py +204 -0
  28. quasarr/providers/html_images.py +20 -0
  29. quasarr/providers/html_templates.py +48 -39
  30. quasarr/providers/imdb_metadata.py +15 -2
  31. quasarr/providers/myjd_api.py +34 -5
  32. quasarr/providers/notifications.py +30 -5
  33. quasarr/providers/obfuscated.py +35 -0
  34. quasarr/providers/sessions/__init__.py +0 -0
  35. quasarr/providers/sessions/al.py +286 -0
  36. quasarr/providers/sessions/dd.py +78 -0
  37. quasarr/providers/sessions/nx.py +76 -0
  38. quasarr/providers/shared_state.py +347 -20
  39. quasarr/providers/statistics.py +154 -0
  40. quasarr/providers/version.py +1 -1
  41. quasarr/search/__init__.py +112 -36
  42. quasarr/search/sources/al.py +448 -0
  43. quasarr/search/sources/by.py +203 -0
  44. quasarr/search/sources/dd.py +17 -6
  45. quasarr/search/sources/dj.py +213 -0
  46. quasarr/search/sources/dt.py +37 -7
  47. quasarr/search/sources/dw.py +27 -47
  48. quasarr/search/sources/fx.py +27 -29
  49. quasarr/search/sources/he.py +196 -0
  50. quasarr/search/sources/mb.py +195 -0
  51. quasarr/search/sources/nk.py +188 -0
  52. quasarr/search/sources/nx.py +22 -6
  53. quasarr/search/sources/sf.py +143 -151
  54. quasarr/search/sources/sj.py +213 -0
  55. quasarr/search/sources/sl.py +246 -0
  56. quasarr/search/sources/wd.py +208 -0
  57. quasarr/storage/config.py +20 -4
  58. quasarr/storage/setup.py +216 -51
  59. quasarr-1.20.4.dist-info/METADATA +304 -0
  60. quasarr-1.20.4.dist-info/RECORD +72 -0
  61. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/WHEEL +1 -1
  62. quasarr/providers/tvmaze_metadata.py +0 -23
  63. quasarr-1.4.1.dist-info/METADATA +0 -174
  64. quasarr-1.4.1.dist-info/RECORD +0 -43
  65. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/entry_points.txt +0 -0
  66. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/licenses/LICENSE +0 -0
  67. {quasarr-1.4.1.dist-info → quasarr-1.20.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,188 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from datetime import datetime
9
+ from html import unescape
10
+ from urllib.parse import urljoin
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+
15
+ from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.log import info, debug
17
+
18
+ hostname = "nk"
19
+ supported_mirrors = ["rapidgator", "ddownload"]
20
+
21
+
22
+ def convert_to_rss_date(date_str: str) -> str:
23
+ date_str = date_str.strip()
24
+ for fmt in ("%d. %B %Y / %H:%M", "%d.%m.%Y / %H:%M", "%d.%m.%Y - %H:%M", "%Y-%m-%d %H:%M"):
25
+ try:
26
+ dt = datetime.strptime(date_str, fmt)
27
+ return dt.strftime("%a, %d %b %Y %H:%M:%S +0000")
28
+ except Exception:
29
+ continue
30
+ return ""
31
+
32
+
33
+ def extract_size(text: str) -> dict:
34
+ match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
35
+ if match:
36
+ size = match.group(1).replace(',', '.')
37
+ unit = match.group(2)
38
+ return {"size": size, "sizeunit": unit}
39
+ return {"size": "0", "sizeunit": "MB"}
40
+
41
+
42
+ def get_release_field(res, label):
43
+ for li in res.select('ul.release-infos li'):
44
+ sp = li.find('span')
45
+ if not sp:
46
+ return ''
47
+ if sp.get_text(strip=True).lower() == label.lower():
48
+ txt = li.get_text(' ', strip=True)
49
+ return txt[len(sp.get_text(strip=True)):].strip()
50
+ return ''
51
+
52
+
53
+ def nk_feed(*args, **kwargs):
54
+ return nk_search(*args, **kwargs)
55
+
56
+
57
+ def nk_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
58
+ releases = []
59
+ host = shared_state.values["config"]("Hostnames").get(hostname)
60
+
61
+ if not "arr" in request_from.lower():
62
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
63
+ return releases
64
+
65
+ if mirror and mirror not in supported_mirrors:
66
+ debug(f'Mirror "{mirror}" not supported by {hostname}.')
67
+ return releases
68
+
69
+ source_search = ""
70
+ if search_string != "":
71
+ imdb_id = shared_state.is_imdb_id(search_string)
72
+ if imdb_id:
73
+ local_title = get_localized_title(shared_state, imdb_id, 'de')
74
+ if not local_title:
75
+ info(f"{hostname}: no title for IMDb {imdb_id}")
76
+ return releases
77
+ source_search = local_title
78
+ else:
79
+ return releases
80
+ source_search = unescape(source_search)
81
+ else:
82
+ imdb_id = None
83
+
84
+ url = f'https://{host}/search'
85
+ headers = {"User-Agent": shared_state.values["user_agent"]}
86
+ data = {"search": source_search}
87
+
88
+ try:
89
+ r = requests.post(url, headers=headers, data=data, timeout=20)
90
+ soup = BeautifulSoup(r.content, 'html.parser')
91
+ results = soup.find_all('div', class_='article-right')
92
+ except Exception as e:
93
+ info(f"{hostname}: search load error: {e}")
94
+ return releases
95
+
96
+ if not results:
97
+ return releases
98
+
99
+ for result in results:
100
+ try:
101
+ imdb_a = result.select_one('a.imdb')
102
+ if imdb_a and imdb_a.get('href'):
103
+ try:
104
+ release_imdb_id = re.search(r'tt\d+', imdb_a['href']).group()
105
+ if imdb_id:
106
+ if release_imdb_id != imdb_id:
107
+ debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
108
+ continue
109
+ except Exception:
110
+ debug(f"{hostname}: could not extract IMDb ID")
111
+ continue
112
+ else:
113
+ debug(f"{hostname}: could not extract IMDb ID")
114
+ continue
115
+
116
+ a = result.find('a', class_='release-details', href=True)
117
+ if not a:
118
+ continue
119
+
120
+ sub_title = result.find('span', class_='subtitle')
121
+ if sub_title:
122
+ title = sub_title.get_text(strip=True)
123
+ else:
124
+ continue
125
+
126
+ if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
127
+ continue
128
+
129
+ source = urljoin(f'https://{host}', a['href'])
130
+
131
+ mb = 0
132
+ size_text = get_release_field(result, 'Größe')
133
+ if size_text:
134
+ size_item = extract_size(size_text)
135
+ mb = shared_state.convert_to_mb(size_item)
136
+
137
+ if season != "" and episode == "":
138
+ mb = 0 # Size unknown for season packs
139
+
140
+ size = mb * 1024 * 1024
141
+
142
+ password = ''
143
+ mirrors_p = result.find('p', class_='mirrors')
144
+ if mirrors_p:
145
+ strong = mirrors_p.find('strong')
146
+ if strong and strong.get_text(strip=True).lower().startswith('passwort'):
147
+ nxt = strong.next_sibling
148
+ if nxt:
149
+ val = str(nxt).strip()
150
+ if val:
151
+ password = val.split()[0]
152
+
153
+ date_text = ''
154
+ p_meta = result.find('p', class_='meta')
155
+ if p_meta:
156
+ spans = p_meta.find_all('span')
157
+ if len(spans) >= 2:
158
+ date_part = spans[0].get_text(strip=True)
159
+ time_part = spans[1].get_text(strip=True).replace('Uhr', '').strip()
160
+ date_text = f"{date_part} / {time_part}"
161
+
162
+ published = convert_to_rss_date(date_text) if date_text else ""
163
+
164
+ payload = urlsafe_b64encode(
165
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
166
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
167
+
168
+ releases.append({
169
+ "details": {
170
+ "title": title,
171
+ "hostname": hostname,
172
+ "imdb_id": release_imdb_id,
173
+ "link": link,
174
+ "mirror": mirror,
175
+ "size": size,
176
+ "date": published,
177
+ "source": source
178
+ },
179
+ "type": "protected"
180
+ })
181
+ except Exception as e:
182
+ info(e)
183
+ debug(f"{hostname}: error parsing search result: {e}")
184
+ continue
185
+
186
+ elapsed = time.time() - start_time
187
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
188
+ return releases
@@ -20,7 +20,9 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
20
20
  nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
21
21
  password = nx
22
22
 
23
- if "Radarr" in request_from:
23
+ if "lazylibrarian" in request_from.lower():
24
+ category = "ebook"
25
+ elif "radarr" in request_from.lower():
24
26
  category = "movie"
25
27
  else:
26
28
  category = "episode"
@@ -49,6 +51,10 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
49
51
 
50
52
  if title:
51
53
  try:
54
+ if 'lazylibrarian' in request_from.lower():
55
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
56
+ title = shared_state.normalize_magazine_title(title)
57
+
52
58
  source = f"https://{nx}/release/{item['slug']}"
53
59
  imdb_id = item.get('_media', {}).get('imdbid', None)
54
60
  mb = shared_state.convert_to_mb(item)
@@ -87,17 +93,19 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
87
93
  info(f"Error parsing {hostname.upper()} feed: {e}")
88
94
 
89
95
  elapsed_time = time.time() - start_time
90
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
96
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
91
97
 
92
98
  return releases
93
99
 
94
100
 
95
- def nx_search(shared_state, start_time, request_from, search_string, mirror=None):
101
+ def nx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
96
102
  releases = []
97
103
  nx = shared_state.values["config"]("Hostnames").get(hostname.lower())
98
104
  password = nx
99
105
 
100
- if "Radarr" in request_from:
106
+ if "lazylibrarian" in request_from.lower():
107
+ valid_type = "ebook"
108
+ elif "radarr" in request_from.lower():
101
109
  valid_type = "movie"
102
110
  else:
103
111
  valid_type = "episode"
@@ -133,9 +141,17 @@ def nx_search(shared_state, start_time, request_from, search_string, mirror=None
133
141
  if item['type'] == valid_type:
134
142
  title = item['name']
135
143
  if title:
136
- if not shared_state.search_string_in_sanitized_title(search_string, title):
144
+ if not shared_state.is_valid_release(title,
145
+ request_from,
146
+ search_string,
147
+ season,
148
+ episode):
137
149
  continue
138
150
 
151
+ if 'lazylibrarian' in request_from.lower():
152
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
153
+ title = shared_state.normalize_magazine_title(title)
154
+
139
155
  try:
140
156
  source = f"https://{nx}/release/{item['slug']}"
141
157
  if not imdb_id:
@@ -176,6 +192,6 @@ def nx_search(shared_state, start_time, request_from, search_string, mirror=None
176
192
  info(f"Error parsing {hostname.upper()} search: {e}")
177
193
 
178
194
  elapsed_time = time.time() - start_time
179
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
195
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
180
196
 
181
197
  return releases
@@ -18,6 +18,11 @@ supported_mirrors = ["1fichier", "ddownload", "katfile", "rapidgator", "turbobit
18
18
 
19
19
  from bs4 import BeautifulSoup
20
20
 
21
+ check = lambda s: s.replace(
22
+ ''.join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "ylhr"),
23
+ ''.join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "hu")
24
+ )
25
+
21
26
 
22
27
  def parse_mirrors(base_url, entry):
23
28
  """
@@ -29,7 +34,6 @@ def parse_mirrors(base_url, entry):
29
34
  """
30
35
 
31
36
  mirrors = {}
32
-
33
37
  try:
34
38
  host_map = {
35
39
  '1F': '1fichier',
@@ -94,10 +98,10 @@ def parse_mirrors(base_url, entry):
94
98
  def sf_feed(shared_state, start_time, request_from, mirror=None):
95
99
  releases = []
96
100
  sf = shared_state.values["config"]("Hostnames").get(hostname.lower())
97
- password = sf
101
+ password = check(sf)
98
102
 
99
- if "Radarr" in request_from:
100
- debug(f'Skipping Radarr search on "{hostname.upper()}" (unsupported media type at hostname)!')
103
+ if not "sonarr" in request_from.lower():
104
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
101
105
  return releases
102
106
 
103
107
  if mirror and mirror not in supported_mirrors:
@@ -172,23 +176,11 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
172
176
  info(f"Error parsing {hostname.upper()} feed: {e}")
173
177
 
174
178
  elapsed_time = time.time() - start_time
175
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
179
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
176
180
 
177
181
  return releases
178
182
 
179
183
 
180
- def extract_season_episode(search_string):
181
- try:
182
- match = re.search(r'(.*?)(S\d{1,3})(?:E(\d{1,3}))?', search_string, re.IGNORECASE)
183
- if match:
184
- season = int(match.group(2)[1:])
185
- episode = int(match.group(3)) if match.group(3) else None
186
- return season, episode
187
- except Exception as e:
188
- debug(f"Error extracting season / episode from {search_string}: {e}")
189
- return None, None
190
-
191
-
192
184
  def extract_size(text):
193
185
  match = re.match(r"(\d+(\.\d+)?) ([A-Za-z]+)", text)
194
186
  if match:
@@ -199,184 +191,184 @@ def extract_size(text):
199
191
  raise ValueError(f"Invalid size format: {text}")
200
192
 
201
193
 
202
- def sf_search(shared_state, start_time, request_from, search_string, mirror=None):
194
+ def sf_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
203
195
  releases = []
204
196
  sf = shared_state.values["config"]("Hostnames").get(hostname.lower())
205
- password = sf
197
+ password = check(sf)
206
198
 
207
- season, episode = extract_season_episode(search_string)
199
+ imdb_id_in_search = shared_state.is_imdb_id(search_string)
200
+ if imdb_id_in_search:
201
+ search_string = get_localized_title(shared_state, imdb_id_in_search, 'de')
202
+ if not search_string:
203
+ info(f"Could not extract title from IMDb-ID {imdb_id_in_search}")
204
+ return releases
205
+ search_string = html.unescape(search_string)
208
206
 
209
- if "Radarr" in request_from:
210
- debug(f'Skipping Radarr search on "{hostname.upper()}" (unsupported media type at hostname)!')
207
+ if not "sonarr" in request_from.lower():
208
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
211
209
  return releases
212
210
 
213
211
  if mirror and mirror not in supported_mirrors:
214
- debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
215
- ' Skipping search!')
212
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}.')
216
213
  return releases
217
214
 
218
- if re.match(r'^tt\d{7,8}$', search_string):
219
- imdb_id = search_string
220
- search_string = get_localized_title(shared_state, imdb_id, 'de')
221
- if not search_string:
222
- info(f"Could not extract title from IMDb-ID {imdb_id}")
223
- return releases
224
- search_string = html.unescape(search_string)
225
-
226
215
  one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
227
216
 
217
+ # search API
228
218
  url = f'https://{sf}/api/v2/search?q={search_string}&ql=DE'
229
- headers = {
230
- 'User-Agent': shared_state.values["user_agent"],
231
- }
219
+ headers = {'User-Agent': shared_state.values["user_agent"]}
232
220
 
233
221
  try:
234
- response = requests.get(url, headers, timeout=10)
222
+ response = requests.get(url, headers=headers, timeout=10)
235
223
  feed = response.json()
236
224
  except Exception as e:
237
225
  info(f"Error loading {hostname.upper()} search: {e}")
238
226
  return releases
239
227
 
240
- results = feed['result']
228
+ results = feed.get('result', [])
241
229
  for result in results:
242
230
  sanitized_search_string = shared_state.sanitize_string(search_string)
243
- sanitized_title = shared_state.sanitize_string(result["title"])
231
+ sanitized_title = shared_state.sanitize_string(result.get("title", ""))
232
+ if not re.search(rf'\b{re.escape(sanitized_search_string)}\b', sanitized_title):
233
+ debug(f"Search string '{search_string}' doesn't match '{result.get('title')}'")
234
+ continue
235
+ debug(f"Matched search string '{search_string}' with result '{result.get('title')}'")
236
+
237
+ series_id = result.get("url_id")
238
+ context = "recents_sf"
239
+ threshold = 60
240
+ recently_searched = shared_state.get_recently_searched(shared_state, context, threshold)
241
+ entry = recently_searched.get(series_id, {})
242
+ ts = entry.get("timestamp")
243
+ use_cache = ts and ts > datetime.now() - timedelta(seconds=threshold)
244
+
245
+ if use_cache and entry.get("content"):
246
+ debug(f"Using cached content for '/{series_id}'")
247
+ data_html = entry["content"]
248
+ imdb_cached = entry.get("imdb_id")
249
+ if imdb_cached:
250
+ imdb_id = imdb_cached
251
+ content = BeautifulSoup(data_html, "html.parser")
252
+ else:
253
+ # fresh fetch: record timestamp
254
+ entry = {"timestamp": datetime.now()}
244
255
 
245
- # Use word boundaries to ensure full word/phrase match
246
- if re.search(rf'\b{re.escape(sanitized_search_string)}\b', sanitized_title):
247
- debug(f"Matched search string '{search_string}' with result '{result['title']}'")
256
+ # load series page
257
+ series_url = f"https://{sf}/{series_id}"
248
258
  try:
249
- try:
250
- if not season:
251
- season = "ALL"
252
-
253
- series_id = result["url_id"]
254
- threshold = 15 # this should cut down duplicates in case Sonarr is searching variants of a title
255
- context = "recents_sf"
256
- recently_searched = shared_state.get_recently_searched(shared_state, context, threshold)
257
- if series_id in recently_searched:
258
- if recently_searched[series_id]["timestamp"] > datetime.now() - timedelta(seconds=threshold):
259
- debug(f"'/{series_id}' - requested within the last {threshold} seconds! Skipping...")
260
- continue
261
-
262
- recently_searched[series_id] = {"timestamp": datetime.now()}
263
- shared_state.update(context, recently_searched)
264
-
265
- series_url = f"https://{sf}/{series_id}"
266
- series_page = requests.get(series_url, headers, timeout=10).text
267
- try:
268
- imdb_link = (BeautifulSoup(series_page, "html.parser").
269
- find("a", href=re.compile(r"imdb\.com")))
270
- imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
271
- except:
272
- imdb_id = None
273
-
274
- season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
275
- epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
276
- api_url = 'https://' + sf + '/api/v1/' + season_id + f'/season/{season}?lang=ALL&_=' + epoch
277
-
278
- response = requests.get(api_url, headers=headers, timeout=10)
279
- data = response.json()["html"]
280
- content = BeautifulSoup(data, "html.parser")
259
+ series_page = requests.get(series_url, headers=headers, timeout=10).text
260
+ imdb_link = BeautifulSoup(series_page, "html.parser").find("a", href=re.compile(r"imdb\.com"))
261
+ imdb_id = re.search(r'tt\d+', str(imdb_link)).group() if imdb_link else None
262
+ season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
263
+ except Exception:
264
+ debug(f"Failed to load or parse series page for {series_id}")
265
+ continue
281
266
 
282
- items = content.find_all("h3")
283
- except:
267
+ # fetch API HTML
268
+ epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
269
+ api_url = f'https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}'
270
+ debug(f"Requesting SF API URL: {api_url}")
271
+ try:
272
+ api_resp = requests.get(api_url, headers=headers, timeout=10)
273
+ resp_json = api_resp.json()
274
+ if resp_json.get('error'):
275
+ info(f"SF API error for series '{series_id}' at URL {api_url}: {resp_json.get('message')}")
284
276
  continue
277
+ data_html = resp_json.get("html", "")
278
+ except Exception as e:
279
+ info(f"Error loading SF API for {series_id} at {api_url}: {e}")
280
+ continue
285
281
 
286
- for item in items:
287
- try:
288
- details = item.parent.parent.parent
289
- title = details.find("small").text.strip()
290
-
291
- if not shared_state.search_string_in_sanitized_title(search_string, title):
292
- continue
293
-
294
- size_string = item.find("span", {"class": "morespec"}).text.split("|")[1].strip()
295
- size_item = extract_size(size_string)
296
- mirrors = parse_mirrors(f"https://{sf}", details)
297
-
298
- if mirror:
299
- if mirror not in mirrors["season"]:
300
- continue
301
- source = mirrors["season"][mirror]
302
- if not source:
303
- info(f"Could not find mirror '{mirror}' for '{title}'")
304
- else:
305
- source = next(iter(mirrors["season"].values()))
306
- except:
307
- debug(f"Could not find link for '{search_string}'")
308
- continue
282
+ # cache content and imdb_id
283
+ entry["content"] = data_html
284
+ entry["imdb_id"] = imdb_id
285
+ recently_searched[series_id] = entry
286
+ shared_state.update(context, recently_searched)
287
+ content = BeautifulSoup(data_html, "html.parser")
309
288
 
310
- mb = shared_state.convert_to_mb(size_item)
289
+ # parse episodes/releases
290
+ for item in content.find_all("h3"):
291
+ try:
292
+ details = item.parent.parent.parent
293
+ title = details.find("small").text.strip()
311
294
 
312
- if episode:
313
- mb = 0
314
- try:
315
- if not re.search(r'S\d{1,3}E\d{1,3}', title):
316
- title = re.sub(r'(S\d{1,3})', rf'\1E{episode:02d}', title)
295
+ mirrors = parse_mirrors(f"https://{sf}", details)
296
+ source = mirror and mirrors["season"].get(mirror) or next(iter(mirrors["season"].values()), None)
297
+ if not source:
298
+ debug(f"No source mirror found for {title}")
299
+ continue
317
300
 
318
- # Count episodes
319
- episodes_in_release = len(mirrors["episodes"])
301
+ try:
302
+ size_string = item.find("span", {"class": "morespec"}).text.split("|")[1].strip()
303
+ size_item = extract_size(size_string)
304
+ mb = shared_state.convert_to_mb(size_item)
305
+ except Exception as e:
306
+ debug(f"Error extracting size for {title}: {e}")
307
+ mb = 0
320
308
 
321
- # Get the correct episode entry (episode numbers are 1-based, list index is 0-based)
322
- episode_data = next((e for e in mirrors["episodes"] if e["number"] == int(episode)),
323
- None)
309
+ if episode:
310
+ try:
311
+ if not re.search(r'S\d{1,3}E\d{1,3}', title):
312
+ episodes_in_release = len(mirrors["episodes"])
324
313
 
325
- if episode_data:
326
- if mirror:
327
- if mirror not in episode_data["links"]:
328
- debug(
329
- f"Mirror '{mirror}' does not exist for '{title}' episode {episode}'")
330
- else:
331
- source = episode_data["links"][mirror]
314
+ # Get the correct episode entry (episode numbers are 1-based, list index is 0-based)
315
+ episode_data = next((e for e in mirrors["episodes"] if e["number"] == int(episode)),
316
+ None)
332
317
 
318
+ if episode_data:
319
+ title = re.sub(r'(S\d{1,3})', rf'\1E{episode:02d}', title)
320
+ if mirror:
321
+ if mirror not in episode_data["links"]:
322
+ debug(
323
+ f"Mirror '{mirror}' does not exist for '{title}' episode {episode}'")
333
324
  else:
334
- source = next(iter(episode_data["links"].values()))
325
+ source = episode_data["links"][mirror]
326
+
335
327
  else:
336
- debug(f"Episode '{episode}' data not found in mirrors for '{title}'")
328
+ source = next(iter(episode_data["links"].values()))
329
+ else:
330
+ debug(f"Episode '{episode}' data not found in mirrors for '{title}'")
337
331
 
338
- if episodes_in_release:
332
+ if episodes_in_release:
333
+ try:
339
334
  mb = shared_state.convert_to_mb({
340
335
  "size": float(size_item["size"]) // episodes_in_release,
341
336
  "sizeunit": size_item["sizeunit"]
342
337
  })
343
- except:
344
- continue
345
-
346
- payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".
347
- encode("utf-8")).decode("utf-8")
348
- link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
349
-
350
- try:
351
- size = mb * 1024 * 1024
352
- except:
353
- continue
354
-
355
- try:
356
- published = one_hour_ago # release date is missing here
338
+ except Exception as e:
339
+ debug(f"Error calculating size for {title}: {e}")
340
+ mb = 0
357
341
  except:
358
342
  continue
359
343
 
360
- releases.append({
361
- "details": {
362
- "title": title,
363
- "hostname": hostname.lower(),
364
- "imdb_id": imdb_id,
365
- "link": link,
366
- "mirror": mirror,
367
- "size": size,
368
- "date": published,
369
- "source": f"{series_url}/{season}" if season else series_url
370
- },
371
- "type": "protected"
372
- })
344
+ # check down here on purpose, because the title may be modified at episode stage
345
+ if not shared_state.is_valid_release(title,
346
+ request_from,
347
+ search_string,
348
+ season,
349
+ episode):
350
+ continue
373
351
 
352
+ payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode()).decode()
353
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
354
+ size_bytes = mb * 1024 * 1024
355
+
356
+ releases.append({
357
+ "details": {
358
+ "title": title,
359
+ "hostname": hostname.lower(),
360
+ "imdb_id": imdb_id,
361
+ "link": link,
362
+ "mirror": mirror,
363
+ "size": size_bytes,
364
+ "date": one_hour_ago,
365
+ "source": f"https://{sf}/{series_id}/{season}" if season else f"https://{sf}/{series_id}"
366
+ },
367
+ "type": "protected"
368
+ })
374
369
  except Exception as e:
375
- info(f"Error parsing {hostname.upper()} search: {e}")
376
- else:
377
- debug(f"Search string '{search_string}' does not match result '{result['title']}'")
370
+ debug(f"Error parsing item for '{search_string}': {e}")
378
371
 
379
372
  elapsed_time = time.time() - start_time
380
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
381
-
373
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
382
374
  return releases