quasarr 1.20.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (72) hide show
  1. quasarr/__init__.py +460 -0
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +373 -0
  4. quasarr/api/captcha/__init__.py +1075 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +267 -0
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +467 -0
  14. quasarr/downloads/sources/__init__.py +0 -0
  15. quasarr/downloads/sources/al.py +697 -0
  16. quasarr/downloads/sources/by.py +106 -0
  17. quasarr/downloads/sources/dd.py +76 -0
  18. quasarr/downloads/sources/dj.py +7 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +65 -0
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +51 -0
  24. quasarr/downloads/sources/nx.py +105 -0
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/providers/__init__.py +0 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +20 -0
  32. quasarr/providers/html_templates.py +241 -0
  33. quasarr/providers/imdb_metadata.py +142 -0
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +917 -0
  36. quasarr/providers/notifications.py +124 -0
  37. quasarr/providers/obfuscated.py +51 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/nx.py +76 -0
  42. quasarr/providers/shared_state.py +826 -0
  43. quasarr/providers/statistics.py +154 -0
  44. quasarr/providers/version.py +118 -0
  45. quasarr/providers/web_server.py +49 -0
  46. quasarr/search/__init__.py +153 -0
  47. quasarr/search/sources/__init__.py +0 -0
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +203 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dt.py +265 -0
  53. quasarr/search/sources/dw.py +214 -0
  54. quasarr/search/sources/fx.py +223 -0
  55. quasarr/search/sources/he.py +196 -0
  56. quasarr/search/sources/mb.py +195 -0
  57. quasarr/search/sources/nk.py +188 -0
  58. quasarr/search/sources/nx.py +197 -0
  59. quasarr/search/sources/sf.py +374 -0
  60. quasarr/search/sources/sj.py +213 -0
  61. quasarr/search/sources/sl.py +246 -0
  62. quasarr/search/sources/wd.py +208 -0
  63. quasarr/storage/__init__.py +0 -0
  64. quasarr/storage/config.py +163 -0
  65. quasarr/storage/setup.py +458 -0
  66. quasarr/storage/sqlite_database.py +80 -0
  67. quasarr-1.20.6.dist-info/METADATA +304 -0
  68. quasarr-1.20.6.dist-info/RECORD +72 -0
  69. quasarr-1.20.6.dist-info/WHEEL +5 -0
  70. quasarr-1.20.6.dist-info/entry_points.txt +2 -0
  71. quasarr-1.20.6.dist-info/licenses/LICENSE +21 -0
  72. quasarr-1.20.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,214 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import datetime
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+
10
+ import requests
11
+ from bs4 import BeautifulSoup
12
+
13
+ from quasarr.providers.log import info, debug
14
+
15
+ hostname = "dw"
16
+ supported_mirrors = ["1fichier", "rapidgator", "ddownload", "katfile"]
17
+
18
+
19
+ def convert_to_rss_date(date_str):
20
+ german_months = ["Januar", "Februar", "März", "April", "Mai", "Juni",
21
+ "Juli", "August", "September", "Oktober", "November", "Dezember"]
22
+ english_months = ["January", "February", "March", "April", "May", "June",
23
+ "July", "August", "September", "October", "November", "December"]
24
+
25
+ for german, english in zip(german_months, english_months):
26
+ if german in date_str:
27
+ date_str = date_str.replace(german, english)
28
+ break
29
+
30
+ parsed_date = datetime.datetime.strptime(date_str, '%d. %B %Y / %H:%M')
31
+ rss_date = parsed_date.strftime('%a, %d %b %Y %H:%M:%S %z')
32
+
33
+ return rss_date
34
+
35
+
36
+ def extract_size(text):
37
+ # First try the normal pattern: number + space + unit (e.g., "1024 MB")
38
+ match = re.match(r"(\d+)\s+([A-Za-z]+)", text)
39
+ if match:
40
+ size = match.group(1)
41
+ unit = match.group(2)
42
+ return {"size": size, "sizeunit": unit}
43
+
44
+ # If that fails, try pattern with just unit (e.g., "MB")
45
+ unit_match = re.match(r"([A-Za-z]+)", text.strip())
46
+ if unit_match:
47
+ unit = unit_match.group(1)
48
+ # Fall back to 0 when size is missing
49
+ return {"size": "0", "sizeunit": unit}
50
+
51
+ # If neither pattern matches, raise the original error
52
+ raise ValueError(f"Invalid size format: {text}")
53
+
54
+
55
+ def dw_feed(shared_state, start_time, request_from, mirror=None):
56
+ releases = []
57
+ dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
58
+ password = dw
59
+
60
+ if not "arr" in request_from.lower():
61
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
62
+ return releases
63
+
64
+ if "Radarr" in request_from:
65
+ feed_type = "videos/filme/"
66
+ else:
67
+ feed_type = "videos/serien/"
68
+
69
+ if mirror and mirror not in supported_mirrors:
70
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
71
+ ' Skipping search!')
72
+ return releases
73
+
74
+ url = f'https://{dw}/{feed_type}'
75
+ headers = {
76
+ 'User-Agent': shared_state.values["user_agent"],
77
+ }
78
+
79
+ try:
80
+ request = requests.get(url, headers=headers, timeout=10).content
81
+ feed = BeautifulSoup(request, "html.parser")
82
+ articles = feed.find_all('h4')
83
+
84
+ for article in articles:
85
+ try:
86
+ source = article.a["href"]
87
+ title = article.a.text.strip()
88
+
89
+ try:
90
+ imdb_id = re.search(r'tt\d+', str(article)).group()
91
+ except:
92
+ imdb_id = None
93
+
94
+ size_info = article.find("span").text.strip()
95
+ size_item = extract_size(size_info)
96
+ mb = shared_state.convert_to_mb(size_item)
97
+ size = mb * 1024 * 1024
98
+ date = article.parent.parent.find("span", {"class": "date updated"}).text.strip()
99
+ published = convert_to_rss_date(date)
100
+ payload = urlsafe_b64encode(
101
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
102
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
103
+ except Exception as e:
104
+ info(f"Error parsing {hostname.upper()} feed: {e}")
105
+ continue
106
+
107
+ releases.append({
108
+ "details": {
109
+ "title": title,
110
+ "hostname": hostname.lower(),
111
+ "imdb_id": imdb_id,
112
+ "link": link,
113
+ "mirror": mirror,
114
+ "size": size,
115
+ "date": published,
116
+ "source": source
117
+ },
118
+ "type": "protected"
119
+ })
120
+
121
+ except Exception as e:
122
+ info(f"Error loading {hostname.upper()} feed: {e}")
123
+
124
+ elapsed_time = time.time() - start_time
125
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
126
+
127
+ return releases
128
+
129
+
130
+ def dw_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
131
+ releases = []
132
+ dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
133
+ password = dw
134
+
135
+ if not "arr" in request_from.lower():
136
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
137
+ return releases
138
+
139
+
140
+ if "Radarr" in request_from:
141
+ search_type = "videocategory=filme"
142
+ else:
143
+ search_type = "videocategory=serien"
144
+
145
+ if mirror and mirror not in ["1fichier", "rapidgator", "ddownload", "katfile"]:
146
+ debug(f'Mirror "{mirror}" not not supported by {hostname.upper()}. Skipping search!')
147
+ return releases
148
+
149
+ url = f'https://{dw}/?s={search_string}&{search_type}'
150
+ headers = {
151
+ 'User-Agent': shared_state.values["user_agent"],
152
+ }
153
+
154
+ try:
155
+ request = requests.get(url, headers=headers, timeout=10).content
156
+ search = BeautifulSoup(request, "html.parser")
157
+ results = search.find_all('h4')
158
+
159
+ except Exception as e:
160
+ info(f"Error loading {hostname.upper()} search feed: {e}")
161
+ return releases
162
+
163
+ imdb_id = shared_state.is_imdb_id(search_string)
164
+
165
+ if results:
166
+ for result in results:
167
+ try:
168
+ title = result.a.text.strip()
169
+
170
+ if not shared_state.is_valid_release(title,
171
+ request_from,
172
+ search_string,
173
+ season,
174
+ episode):
175
+ continue
176
+
177
+ if not imdb_id:
178
+ try:
179
+ imdb_id = re.search(r'tt\d+', str(result)).group()
180
+ except:
181
+ imdb_id = None
182
+
183
+ source = result.a["href"]
184
+ size_info = result.find("span").text.strip()
185
+ size_item = extract_size(size_info)
186
+ mb = shared_state.convert_to_mb(size_item)
187
+ size = mb * 1024 * 1024
188
+ date = result.parent.parent.find("span", {"class": "date updated"}).text.strip()
189
+ published = convert_to_rss_date(date)
190
+ payload = urlsafe_b64encode(
191
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
192
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
+ except Exception as e:
194
+ info(f"Error parsing {hostname.upper()} search: {e}")
195
+ continue
196
+
197
+ releases.append({
198
+ "details": {
199
+ "title": title,
200
+ "hostname": hostname.lower(),
201
+ "imdb_id": imdb_id,
202
+ "link": link,
203
+ "mirror": mirror,
204
+ "size": size,
205
+ "date": published,
206
+ "source": source
207
+ },
208
+ "type": "protected"
209
+ })
210
+
211
+ elapsed_time = time.time() - start_time
212
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
213
+
214
+ return releases
@@ -0,0 +1,223 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+
9
+ import requests
10
+ from bs4 import BeautifulSoup
11
+
12
+ from quasarr.providers.log import info, debug
13
+
14
+ hostname = "fx"
15
+ supported_mirrors = ["rapidgator"]
16
+
17
+
18
+ def extract_size(text):
19
+ match = re.match(r"(\d+)\s*([A-Za-z]+)", text)
20
+ if match:
21
+ size = match.group(1)
22
+ unit = match.group(2)
23
+ return {"size": size, "sizeunit": unit}
24
+ else:
25
+ raise ValueError(f"Invalid size format: {text}")
26
+
27
+
28
+ def fx_feed(shared_state, start_time, request_from, mirror=None):
29
+ releases = []
30
+
31
+ fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
32
+
33
+ if not "arr" in request_from.lower():
34
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
35
+ return releases
36
+
37
+
38
+ if mirror and mirror not in supported_mirrors:
39
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
40
+ ' Skipping search!')
41
+ return releases
42
+
43
+ password = fx.split(".")[0]
44
+ url = f'https://{fx}/'
45
+ headers = {
46
+ 'User-Agent': shared_state.values["user_agent"],
47
+ }
48
+
49
+ try:
50
+ request = requests.get(url, headers=headers, timeout=10).content
51
+ feed = BeautifulSoup(request, "html.parser")
52
+ items = feed.find_all("article")
53
+ except Exception as e:
54
+ info(f"Error loading {hostname.upper()} feed: {e}")
55
+ return releases
56
+
57
+ if items:
58
+ for item in items:
59
+ try:
60
+ article = BeautifulSoup(str(item), "html.parser")
61
+ try:
62
+ source = article.find('h2', class_='entry-title').a["href"]
63
+ titles = article.find_all("a", href=re.compile("(filecrypt|safe." + fx + ")"))
64
+ except:
65
+ continue
66
+ i = 0
67
+ for title in titles:
68
+ link = title["href"]
69
+ title = shared_state.sanitize_title(title.text)
70
+
71
+ try:
72
+ imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
73
+ imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
74
+ except:
75
+ imdb_id = None
76
+
77
+ try:
78
+ size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
79
+ i].next.next.text.replace("|", "").strip()
80
+ size_item = extract_size(size_info)
81
+ mb = shared_state.convert_to_mb(size_item)
82
+ size = mb * 1024 * 1024
83
+ payload = urlsafe_b64encode(
84
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
85
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
86
+ except:
87
+ continue
88
+
89
+ try:
90
+ dates = article.find_all("time")
91
+ for date in dates:
92
+ published = date["datetime"]
93
+ except:
94
+ continue
95
+
96
+ releases.append({
97
+ "details": {
98
+ "title": title,
99
+ "hostname": hostname.lower(),
100
+ "imdb_id": imdb_id,
101
+ "link": link,
102
+ "mirror": mirror,
103
+ "size": size,
104
+ "date": published,
105
+ "source": source
106
+ },
107
+ "type": "protected"
108
+ })
109
+
110
+ except Exception as e:
111
+ info(f"Error parsing {hostname.upper()} feed: {e}")
112
+
113
+ elapsed_time = time.time() - start_time
114
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
115
+
116
+ return releases
117
+
118
+
119
+ def fx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
120
+ releases = []
121
+ fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
122
+ password = fx.split(".")[0]
123
+
124
+ if not "arr" in request_from.lower():
125
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
126
+ return releases
127
+
128
+
129
+ if mirror and mirror not in supported_mirrors:
130
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
131
+ ' Skipping search!')
132
+ return releases
133
+
134
+ url = f'https://{fx}/?s={search_string}'
135
+ headers = {
136
+ 'User-Agent': shared_state.values["user_agent"],
137
+ }
138
+
139
+ try:
140
+ request = requests.get(url, headers=headers, timeout=10).content
141
+ search = BeautifulSoup(request, "html.parser")
142
+ results = search.find('h2', class_='entry-title')
143
+
144
+ except Exception as e:
145
+ info(f"Error loading {hostname.upper()} feed: {e}")
146
+ return releases
147
+
148
+ if results:
149
+ for result in results:
150
+ try:
151
+ result_source = result["href"]
152
+ request = requests.get(result_source, headers=headers, timeout=10).content
153
+ feed = BeautifulSoup(request, "html.parser")
154
+ items = feed.find_all("article")
155
+ except Exception as e:
156
+ info(f"Error loading {hostname.upper()} feed: {e}")
157
+ return releases
158
+
159
+ for item in items:
160
+ try:
161
+ article = BeautifulSoup(str(item), "html.parser")
162
+ try:
163
+ titles = article.find_all("a", href=re.compile(r"filecrypt\."))
164
+ except:
165
+ continue
166
+ i = 0
167
+ for title in titles:
168
+ link = title["href"]
169
+ title = shared_state.sanitize_title(title.text)
170
+
171
+ if not shared_state.is_valid_release(title,
172
+ request_from,
173
+ search_string,
174
+ season,
175
+ episode):
176
+ continue
177
+
178
+ try:
179
+ imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
180
+ imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
181
+ except:
182
+ imdb_id = None
183
+
184
+ try:
185
+ size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
186
+ i].next.next.text.replace("|", "").strip()
187
+ size_item = extract_size(size_info)
188
+ mb = shared_state.convert_to_mb(size_item)
189
+ size = mb * 1024 * 1024
190
+ payload = urlsafe_b64encode(
191
+ f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
192
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
+ except:
194
+ continue
195
+
196
+ try:
197
+ dates = article.find_all("time")
198
+ for date in dates:
199
+ published = date["datetime"]
200
+ except:
201
+ continue
202
+
203
+ releases.append({
204
+ "details": {
205
+ "title": title,
206
+ "hostname": hostname.lower(),
207
+ "imdb_id": imdb_id,
208
+ "link": link,
209
+ "mirror": mirror,
210
+ "size": size,
211
+ "date": published,
212
+ "source": result_source
213
+ },
214
+ "type": "protected"
215
+ })
216
+
217
+ except Exception as e:
218
+ info(f"Error parsing {hostname.upper()} search: {e}")
219
+
220
+ elapsed_time = time.time() - start_time
221
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
222
+
223
+ return releases
@@ -0,0 +1,196 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from datetime import datetime, timedelta
9
+ from html import unescape
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+
17
+ hostname = "he"
18
+ supported_mirrors = ["rapidgator", "nitroflare"]
19
+
20
+
21
+ def parse_posted_ago(txt):
22
+ try:
23
+ m = re.search(r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE)
24
+ if not m:
25
+ return ''
26
+ value = int(m.group(1))
27
+ unit = m.group(2).lower()
28
+ now = datetime.utcnow()
29
+ if unit.startswith('sec'):
30
+ delta = timedelta(seconds=value)
31
+ elif unit.startswith('min'):
32
+ delta = timedelta(minutes=value)
33
+ elif unit.startswith('hour'):
34
+ delta = timedelta(hours=value)
35
+ elif unit.startswith('day'):
36
+ delta = timedelta(days=value)
37
+ elif unit.startswith('week'):
38
+ delta = timedelta(weeks=value)
39
+ elif unit.startswith('month'):
40
+ delta = timedelta(days=30 * value)
41
+ else:
42
+ delta = timedelta(days=365 * value)
43
+ return (datetime.utcnow() - delta).strftime("%a, %d %b %Y %H:%M:%S +0000")
44
+ except Exception:
45
+ return ''
46
+
47
+
48
+ def extract_size(text: str) -> dict:
49
+ match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
50
+ if match:
51
+ size = match.group(1).replace(',', '.')
52
+ unit = match.group(2)
53
+ return {"size": size, "sizeunit": unit}
54
+ return {"size": "0", "sizeunit": "MB"}
55
+
56
+
57
+ def he_feed(*args, **kwargs):
58
+ return he_search(*args, **kwargs)
59
+
60
+
61
+ def he_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
62
+ releases = []
63
+ host = shared_state.values["config"]("Hostnames").get(hostname)
64
+
65
+ if not "arr" in request_from.lower():
66
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
67
+ return releases
68
+
69
+ if "radarr" in request_from.lower():
70
+ tag = "movies"
71
+ else:
72
+ tag = "tv-shows"
73
+
74
+ if mirror and mirror not in supported_mirrors:
75
+ debug(f'Mirror "{mirror}" not supported by {hostname}.')
76
+ return releases
77
+
78
+ source_search = ""
79
+ if search_string != "":
80
+ imdb_id = shared_state.is_imdb_id(search_string)
81
+ if imdb_id:
82
+ local_title = get_localized_title(shared_state, imdb_id, 'en')
83
+ if not local_title:
84
+ info(f"{hostname}: no title for IMDb {imdb_id}")
85
+ return releases
86
+ source_search = local_title
87
+ else:
88
+ return releases
89
+ source_search = unescape(source_search)
90
+ else:
91
+ imdb_id = None
92
+
93
+ url = f'https://{host}/tag/{tag}/'
94
+
95
+ headers = {"User-Agent": shared_state.values["user_agent"]}
96
+ params = {"s": source_search}
97
+
98
+ try:
99
+ r = requests.get(url, headers=headers, params=params, timeout=10)
100
+ soup = BeautifulSoup(r.content, 'html.parser')
101
+ results = soup.find_all('div', class_='item')
102
+ except Exception as e:
103
+ info(f"{hostname}: search load error: {e}")
104
+ return releases
105
+
106
+ if not results:
107
+ return releases
108
+
109
+ for result in results:
110
+ try:
111
+ data = result.find('div', class_='data')
112
+ if not data:
113
+ continue
114
+
115
+ headline = data.find('h5')
116
+ if not headline:
117
+ continue
118
+
119
+ a = headline.find('a', href=True)
120
+ if not a:
121
+ continue
122
+
123
+ source = a['href'].strip()
124
+
125
+ head_title = a.get_text(strip=True)
126
+ if not head_title:
127
+ continue
128
+
129
+ head_split = head_title.split(" – ")
130
+ title = head_split[0].strip()
131
+
132
+ if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
133
+ continue
134
+
135
+ size_item = extract_size(head_split[1].strip())
136
+ mb = shared_state.convert_to_mb(size_item)
137
+
138
+ size = mb * 1024 * 1024
139
+
140
+ published = None
141
+ p_meta = data.find('p', class_='meta')
142
+ if p_meta:
143
+ posted_span = None
144
+ for sp in p_meta.find_all('span'):
145
+ txt = sp.get_text(' ', strip=True)
146
+ if txt.lower().startswith('posted') or 'ago' in txt.lower():
147
+ posted_span = txt
148
+ break
149
+
150
+ if posted_span:
151
+ published = parse_posted_ago(posted_span)
152
+
153
+ if published is None:
154
+ continue
155
+
156
+ release_imdb_id = None
157
+ try:
158
+ r = requests.get(source, headers=headers, timeout=10)
159
+ soup = BeautifulSoup(r.content, 'html.parser')
160
+ imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
161
+ if imdb_link:
162
+ release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
163
+ if imdb_id and release_imdb_id != imdb_id:
164
+ debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
165
+ continue
166
+ else:
167
+ debug(f"{hostname}: imdb link not found for title {title}")
168
+ except Exception as e:
169
+ debug(f"{hostname}: failed to determine imdb_id for title {title}")
170
+ continue
171
+
172
+ password = None
173
+ payload = urlsafe_b64encode(
174
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
175
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
176
+
177
+ releases.append({
178
+ "details": {
179
+ "title": title,
180
+ "hostname": hostname,
181
+ "imdb_id": release_imdb_id,
182
+ "link": link,
183
+ "mirror": mirror,
184
+ "size": size,
185
+ "date": published,
186
+ "source": source
187
+ },
188
+ "type": "protected"
189
+ })
190
+ except Exception as e:
191
+ debug(f"{hostname}: error parsing search result: {e}")
192
+ continue
193
+
194
+ elapsed = time.time() - start_time
195
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
196
+ return releases