quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,374 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+
11
+ import requests
12
+
13
+ from quasarr.providers.imdb_metadata import get_localized_title
14
+ from quasarr.providers.log import info, debug
15
+
16
+ hostname = "sf"
17
+ supported_mirrors = ["1fichier", "ddownload", "katfile", "rapidgator", "turbobit"]
18
+
19
+ from bs4 import BeautifulSoup
20
+
21
+ check = lambda s: s.replace(
22
+ ''.join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "ylhr"),
23
+ ''.join(chr((ord(c) - 97 - 7) % 26 + 97) for c in "hu")
24
+ )
25
+
26
+
27
+ def parse_mirrors(base_url, entry):
28
+ """
29
+ entry: a BeautifulSoup Tag for <div class="entry">
30
+ returns a dict with:
31
+ - name: header text
32
+ - season: list of {host: link}
33
+ - episodes: list of {number, title, links}
34
+ """
35
+
36
+ mirrors = {}
37
+ try:
38
+ host_map = {
39
+ '1F': '1fichier',
40
+ 'DD': 'ddownload',
41
+ 'KA': 'katfile',
42
+ 'RG': 'rapidgator',
43
+ 'TB': 'turbobit'
44
+ }
45
+
46
+ h3 = entry.select_one('h3')
47
+ name = h3.get_text(separator=' ', strip=True) if h3 else ''
48
+
49
+ season = {}
50
+ for a in entry.select('a.dlb.row'):
51
+ if a.find_parent('div.list.simple'):
52
+ continue
53
+ host = a.get_text(strip=True)
54
+ if len(host) > 2: # episode hosts are 2 chars
55
+ season[host] = f"{base_url}{a['href']}"
56
+
57
+ # fallback: if mirrors are falsely missing a mirror title, return first season link as "filecrypt"
58
+ if not season:
59
+ fallback = next(
60
+ (a for a in entry.select('a.dlb.row') if not a.find_parent('div.list.simple')),
61
+ None
62
+ )
63
+ if fallback:
64
+ season['filecrypt'] = f"{base_url}{fallback['href']}"
65
+
66
+ episodes = []
67
+ for ep_row in entry.select('div.list.simple > div.row'):
68
+ if 'head' in ep_row.get('class', []):
69
+ continue
70
+
71
+ divs = ep_row.find_all('div', recursive=False)
72
+ number = int(divs[0].get_text(strip=True).rstrip('.'))
73
+ title = divs[1].get_text(strip=True)
74
+
75
+ ep_links = {}
76
+ for a in ep_row.select('div.row > a.dlb.row'):
77
+ host = a.get_text(strip=True)
78
+ full_host = host_map.get(host, host)
79
+ ep_links[full_host] = f"{base_url}{a['href']}"
80
+
81
+ episodes.append({
82
+ 'number': number,
83
+ 'title': title,
84
+ 'links': ep_links
85
+ })
86
+
87
+ mirrors = {
88
+ 'name': name,
89
+ 'season': season,
90
+ 'episodes': episodes
91
+ }
92
+ except Exception as e:
93
+ info(f"Error parsing mirrors: {e}")
94
+
95
+ return mirrors
96
+
97
+
98
+ def sf_feed(shared_state, start_time, request_from, mirror=None):
99
+ releases = []
100
+ sf = shared_state.values["config"]("Hostnames").get(hostname.lower())
101
+ password = check(sf)
102
+
103
+ if not "sonarr" in request_from.lower():
104
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
105
+ return releases
106
+
107
+ if mirror and mirror not in supported_mirrors:
108
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
109
+ ' Skipping search!')
110
+ return releases
111
+
112
+ headers = {
113
+ 'User-Agent': shared_state.values["user_agent"],
114
+ }
115
+
116
+ date = datetime.now()
117
+ days_to_cover = 2
118
+
119
+ while days_to_cover > 0:
120
+ days_to_cover -= 1
121
+ formatted_date = date.strftime('%Y-%m-%d')
122
+ date -= timedelta(days=1)
123
+
124
+ try:
125
+ response = requests.get(f"https://{sf}/updates/{formatted_date}#list", headers, timeout=10)
126
+ except Exception as e:
127
+ info(f"Error loading {hostname.upper()} feed: {e} for {formatted_date}")
128
+ return releases
129
+
130
+ content = BeautifulSoup(response.text, "html.parser")
131
+ items = content.find_all("div", {"class": "row"}, style=re.compile("order"))
132
+
133
+ for item in items:
134
+ try:
135
+ a = item.find("a", href=re.compile("/"))
136
+ title = a.text
137
+
138
+ if title:
139
+ try:
140
+ source = f"https://{sf}{a['href']}"
141
+ mb = 0 # size info is missing here
142
+ imdb_id = None # imdb info is missing here
143
+
144
+ payload = urlsafe_b64encode(
145
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
146
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
147
+ except:
148
+ continue
149
+
150
+ try:
151
+ size = mb * 1024 * 1024
152
+ except:
153
+ continue
154
+
155
+ try:
156
+ published_time = item.find("div", {"class": "datime"}).text
157
+ published = f"{formatted_date}T{published_time}:00"
158
+ except:
159
+ continue
160
+
161
+ releases.append({
162
+ "details": {
163
+ "title": title,
164
+ "hostname": hostname.lower(),
165
+ "imdb_id": imdb_id,
166
+ "link": link,
167
+ "mirror": mirror,
168
+ "size": size,
169
+ "date": published,
170
+ "source": source,
171
+ },
172
+ "type": "protected"
173
+ })
174
+
175
+ except Exception as e:
176
+ info(f"Error parsing {hostname.upper()} feed: {e}")
177
+
178
+ elapsed_time = time.time() - start_time
179
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
180
+
181
+ return releases
182
+
183
+
184
+ def extract_size(text):
185
+ match = re.match(r"(\d+(\.\d+)?) ([A-Za-z]+)", text)
186
+ if match:
187
+ size = match.group(1)
188
+ unit = match.group(3)
189
+ return {"size": size, "sizeunit": unit}
190
+ else:
191
+ raise ValueError(f"Invalid size format: {text}")
192
+
193
+
194
+ def sf_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
195
+ releases = []
196
+ sf = shared_state.values["config"]("Hostnames").get(hostname.lower())
197
+ password = check(sf)
198
+
199
+ imdb_id_in_search = shared_state.is_imdb_id(search_string)
200
+ if imdb_id_in_search:
201
+ search_string = get_localized_title(shared_state, imdb_id_in_search, 'de')
202
+ if not search_string:
203
+ info(f"Could not extract title from IMDb-ID {imdb_id_in_search}")
204
+ return releases
205
+ search_string = html.unescape(search_string)
206
+
207
+ if not "sonarr" in request_from.lower():
208
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
209
+ return releases
210
+
211
+ if mirror and mirror not in supported_mirrors:
212
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}.')
213
+ return releases
214
+
215
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
216
+
217
+ # search API
218
+ url = f'https://{sf}/api/v2/search?q={search_string}&ql=DE'
219
+ headers = {'User-Agent': shared_state.values["user_agent"]}
220
+
221
+ try:
222
+ response = requests.get(url, headers=headers, timeout=10)
223
+ feed = response.json()
224
+ except Exception as e:
225
+ info(f"Error loading {hostname.upper()} search: {e}")
226
+ return releases
227
+
228
+ results = feed.get('result', [])
229
+ for result in results:
230
+ sanitized_search_string = shared_state.sanitize_string(search_string)
231
+ sanitized_title = shared_state.sanitize_string(result.get("title", ""))
232
+ if not re.search(rf'\b{re.escape(sanitized_search_string)}\b', sanitized_title):
233
+ debug(f"Search string '{search_string}' doesn't match '{result.get('title')}'")
234
+ continue
235
+ debug(f"Matched search string '{search_string}' with result '{result.get('title')}'")
236
+
237
+ series_id = result.get("url_id")
238
+ context = "recents_sf"
239
+ threshold = 60
240
+ recently_searched = shared_state.get_recently_searched(shared_state, context, threshold)
241
+ entry = recently_searched.get(series_id, {})
242
+ ts = entry.get("timestamp")
243
+ use_cache = ts and ts > datetime.now() - timedelta(seconds=threshold)
244
+
245
+ if use_cache and entry.get("content"):
246
+ debug(f"Using cached content for '/{series_id}'")
247
+ data_html = entry["content"]
248
+ imdb_cached = entry.get("imdb_id")
249
+ if imdb_cached:
250
+ imdb_id = imdb_cached
251
+ content = BeautifulSoup(data_html, "html.parser")
252
+ else:
253
+ # fresh fetch: record timestamp
254
+ entry = {"timestamp": datetime.now()}
255
+
256
+ # load series page
257
+ series_url = f"https://{sf}/{series_id}"
258
+ try:
259
+ series_page = requests.get(series_url, headers=headers, timeout=10).text
260
+ imdb_link = BeautifulSoup(series_page, "html.parser").find("a", href=re.compile(r"imdb\.com"))
261
+ imdb_id = re.search(r'tt\d+', str(imdb_link)).group() if imdb_link else None
262
+ season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
263
+ except Exception:
264
+ debug(f"Failed to load or parse series page for {series_id}")
265
+ continue
266
+
267
+ # fetch API HTML
268
+ epoch = str(datetime.now().timestamp()).replace('.', '')[:-3]
269
+ api_url = f'https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}'
270
+ debug(f"Requesting SF API URL: {api_url}")
271
+ try:
272
+ api_resp = requests.get(api_url, headers=headers, timeout=10)
273
+ resp_json = api_resp.json()
274
+ if resp_json.get('error'):
275
+ info(f"SF API error for series '{series_id}' at URL {api_url}: {resp_json.get('message')}")
276
+ continue
277
+ data_html = resp_json.get("html", "")
278
+ except Exception as e:
279
+ info(f"Error loading SF API for {series_id} at {api_url}: {e}")
280
+ continue
281
+
282
+ # cache content and imdb_id
283
+ entry["content"] = data_html
284
+ entry["imdb_id"] = imdb_id
285
+ recently_searched[series_id] = entry
286
+ shared_state.update(context, recently_searched)
287
+ content = BeautifulSoup(data_html, "html.parser")
288
+
289
+ # parse episodes/releases
290
+ for item in content.find_all("h3"):
291
+ try:
292
+ details = item.parent.parent.parent
293
+ title = details.find("small").text.strip()
294
+
295
+ mirrors = parse_mirrors(f"https://{sf}", details)
296
+ source = mirror and mirrors["season"].get(mirror) or next(iter(mirrors["season"].values()), None)
297
+ if not source:
298
+ debug(f"No source mirror found for {title}")
299
+ continue
300
+
301
+ try:
302
+ size_string = item.find("span", {"class": "morespec"}).text.split("|")[1].strip()
303
+ size_item = extract_size(size_string)
304
+ mb = shared_state.convert_to_mb(size_item)
305
+ except Exception as e:
306
+ debug(f"Error extracting size for {title}: {e}")
307
+ mb = 0
308
+
309
+ if episode:
310
+ try:
311
+ if not re.search(r'S\d{1,3}E\d{1,3}', title):
312
+ episodes_in_release = len(mirrors["episodes"])
313
+
314
+ # Get the correct episode entry (episode numbers are 1-based, list index is 0-based)
315
+ episode_data = next((e for e in mirrors["episodes"] if e["number"] == int(episode)),
316
+ None)
317
+
318
+ if episode_data:
319
+ title = re.sub(r'(S\d{1,3})', rf'\1E{episode:02d}', title)
320
+ if mirror:
321
+ if mirror not in episode_data["links"]:
322
+ debug(
323
+ f"Mirror '{mirror}' does not exist for '{title}' episode {episode}'")
324
+ else:
325
+ source = episode_data["links"][mirror]
326
+
327
+ else:
328
+ source = next(iter(episode_data["links"].values()))
329
+ else:
330
+ debug(f"Episode '{episode}' data not found in mirrors for '{title}'")
331
+
332
+ if episodes_in_release:
333
+ try:
334
+ mb = shared_state.convert_to_mb({
335
+ "size": float(size_item["size"]) // episodes_in_release,
336
+ "sizeunit": size_item["sizeunit"]
337
+ })
338
+ except Exception as e:
339
+ debug(f"Error calculating size for {title}: {e}")
340
+ mb = 0
341
+ except:
342
+ continue
343
+
344
+ # check down here on purpose, because the title may be modified at episode stage
345
+ if not shared_state.is_valid_release(title,
346
+ request_from,
347
+ search_string,
348
+ season,
349
+ episode):
350
+ continue
351
+
352
+ payload = urlsafe_b64encode(f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode()).decode()
353
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
354
+ size_bytes = mb * 1024 * 1024
355
+
356
+ releases.append({
357
+ "details": {
358
+ "title": title,
359
+ "hostname": hostname.lower(),
360
+ "imdb_id": imdb_id,
361
+ "link": link,
362
+ "mirror": mirror,
363
+ "size": size_bytes,
364
+ "date": one_hour_ago,
365
+ "source": f"https://{sf}/{series_id}/{season}" if season else f"https://{sf}/{series_id}"
366
+ },
367
+ "type": "protected"
368
+ })
369
+ except Exception as e:
370
+ debug(f"Error parsing item for '{search_string}': {e}")
371
+
372
+ elapsed_time = time.time() - start_time
373
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
374
+ return releases
@@ -0,0 +1,213 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import json
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+
17
+ hostname = "sj"
18
+
19
+
20
+ def convert_to_rss_date(date_str):
21
+ try:
22
+ return datetime.fromisoformat(
23
+ date_str.replace("Z", "+00:00")
24
+ ).strftime("%a, %d %b %Y %H:%M:%S +0000")
25
+ except Exception:
26
+ return ""
27
+
28
+
29
+ def sj_feed(shared_state, start_time, request_from, mirror=None):
30
+ releases = []
31
+
32
+ if "sonarr" not in request_from.lower():
33
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
34
+ return releases
35
+
36
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
37
+ password = sj_host
38
+
39
+ url = f"https://{sj_host}/api/releases/latest/0"
40
+ headers = {"User-Agent": shared_state.values["user_agent"]}
41
+
42
+ try:
43
+ r = requests.get(url, headers=headers, timeout=10)
44
+ data = json.loads(r.content)
45
+ except Exception as e:
46
+ info(f"{hostname.upper()}: feed load error: {e}")
47
+ return releases
48
+
49
+ for release in data:
50
+ try:
51
+ title = release.get("name").rstrip(".")
52
+ if not title:
53
+ continue
54
+
55
+ published = convert_to_rss_date(release.get("createdAt"))
56
+ if not published:
57
+ continue
58
+
59
+ media = release.get("_media", {})
60
+ slug = media.get("slug")
61
+ if not slug:
62
+ continue
63
+
64
+ series_url = f"https://{sj_host}/serie/{slug}"
65
+
66
+ mb = 0
67
+ size = 0
68
+ imdb_id = None
69
+
70
+ payload = urlsafe_b64encode(
71
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
72
+ ).decode("utf-8")
73
+
74
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
75
+
76
+ releases.append({
77
+ "details": {
78
+ "title": title,
79
+ "hostname": hostname,
80
+ "imdb_id": imdb_id,
81
+ "link": link,
82
+ "mirror": mirror,
83
+ "size": size,
84
+ "date": published,
85
+ "source": series_url
86
+ },
87
+ "type": "protected"
88
+ })
89
+
90
+ except Exception as e:
91
+ debug(f"{hostname.upper()}: feed parse error: {e}")
92
+ continue
93
+
94
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
95
+ return releases
96
+
97
+
98
+ def sj_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
99
+ releases = []
100
+
101
+ if "sonarr" not in request_from.lower():
102
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
103
+ return releases
104
+
105
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
106
+ password = sj_host
107
+
108
+ imdb_id = shared_state.is_imdb_id(search_string)
109
+ if not imdb_id:
110
+ return releases
111
+
112
+ localized_title = get_localized_title(shared_state, imdb_id, "de")
113
+ if not localized_title:
114
+ info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
115
+ return releases
116
+
117
+ headers = {"User-Agent": shared_state.values["user_agent"]}
118
+ search_url = f"https://{sj_host}/serie/search"
119
+ params = {"q": localized_title}
120
+
121
+ try:
122
+ r = requests.get(search_url, headers=headers, params=params, timeout=10)
123
+ soup = BeautifulSoup(r.content, "html.parser")
124
+ results = soup.find_all("a", href=re.compile(r"^/serie/"))
125
+ except Exception as e:
126
+ info(f"{hostname.upper()}: search load error: {e}")
127
+ return releases
128
+
129
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
130
+ sanitized_search_string = shared_state.sanitize_string(localized_title)
131
+
132
+ for result in results:
133
+ try:
134
+ result_title = result.get_text(strip=True)
135
+
136
+ sanitized_title = shared_state.sanitize_string(result_title)
137
+
138
+ if not re.search(
139
+ rf"\b{re.escape(sanitized_search_string)}\b",
140
+ sanitized_title
141
+ ):
142
+ debug(
143
+ f"Search string '{localized_title}' doesn't match '{result_title}'"
144
+ )
145
+ continue
146
+
147
+ debug(
148
+ f"Matched search string '{localized_title}' with result '{result_title}'"
149
+ )
150
+
151
+ series_url = f"https://{sj_host}{result['href']}"
152
+
153
+ r = requests.get(series_url, headers=headers, timeout=10)
154
+ media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
155
+ if not media_id_match:
156
+ debug(f"{hostname.upper()}: no media id for {result_title}")
157
+ continue
158
+
159
+ media_id = media_id_match.group(1)
160
+ api_url = f"https://{sj_host}/api/media/{media_id}/releases"
161
+
162
+ r = requests.get(api_url, headers=headers, timeout=10)
163
+ data = json.loads(r.content)
164
+
165
+ for season_block in data.values():
166
+ for item in season_block.get("items", []):
167
+ title = item.get("name").rstrip(".")
168
+ if not title:
169
+ continue
170
+
171
+ if not shared_state.is_valid_release(
172
+ title,
173
+ request_from,
174
+ search_string,
175
+ season,
176
+ episode
177
+ ):
178
+ continue
179
+
180
+ published = convert_to_rss_date(item.get("createdAt"))
181
+ if not published:
182
+ debug(f"{hostname.upper()}: no published date for {title}")
183
+ published = one_hour_ago
184
+
185
+ mb = 0
186
+ size = 0
187
+
188
+ payload = urlsafe_b64encode(
189
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
190
+ ).decode("utf-8")
191
+
192
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
+
194
+ releases.append({
195
+ "details": {
196
+ "title": title,
197
+ "hostname": hostname,
198
+ "imdb_id": imdb_id,
199
+ "link": link,
200
+ "mirror": mirror,
201
+ "size": size,
202
+ "date": published,
203
+ "source": series_url
204
+ },
205
+ "type": "protected"
206
+ })
207
+
208
+ except Exception as e:
209
+ debug(f"{hostname.upper()}: search parse error: {e}")
210
+ continue
211
+
212
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
213
+ return releases