quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,135 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from datetime import datetime, timezone
9
+
10
+ from quasarr.providers.imdb_metadata import get_localized_title
11
+ from quasarr.providers.log import info, debug
12
+ from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
13
+
14
+ hostname = "dd"
15
+ supported_mirrors = ["ironfiles", "rapidgator", "filefactory"]
16
+
17
+
18
+ def convert_to_rss_date(unix_timestamp):
19
+ parsed_date = datetime.fromtimestamp(unix_timestamp, tz=timezone.utc)
20
+ rss_date = parsed_date.strftime('%a, %d %b %Y %H:%M:%S %z')
21
+
22
+ return rss_date
23
+
24
+
25
+ def extract_size(size_in_bytes):
26
+ return {"size": size_in_bytes, "sizeunit": "B"}
27
+
28
+
29
+ def dd_feed(*args, **kwargs):
30
+ return dd_search(*args, **kwargs)
31
+
32
+
33
+ def dd_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
34
+ releases = []
35
+ dd = shared_state.values["config"]("Hostnames").get(hostname.lower())
36
+ password = dd
37
+
38
+ if not "arr" in request_from.lower():
39
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
40
+ return releases
41
+
42
+ dd_session = retrieve_and_validate_session(shared_state)
43
+ if not dd_session:
44
+ info(f"Could not retrieve valid session for {dd}")
45
+ return releases
46
+
47
+ if mirror and mirror not in supported_mirrors:
48
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
49
+ ' Skipping search!')
50
+ return releases
51
+
52
+ imdb_id = shared_state.is_imdb_id(search_string)
53
+ if imdb_id:
54
+ search_string = get_localized_title(shared_state, imdb_id, 'en')
55
+ if not search_string:
56
+ info(f"Could not extract title from IMDb-ID {imdb_id}")
57
+ return releases
58
+ search_string = html.unescape(search_string)
59
+
60
+ qualities = [
61
+ "disk-480p",
62
+ "web-480p",
63
+ "movie-480p-x265",
64
+ "disk-1080p-x265",
65
+ "web-1080p",
66
+ "web-1080p-x265",
67
+ "web-2160p-x265-hdr",
68
+ "movie-1080p-x265",
69
+ "movie-2160p-webdl-x265-hdr"
70
+ ]
71
+
72
+ headers = {
73
+ 'User-Agent': shared_state.values["user_agent"],
74
+ }
75
+
76
+ try:
77
+ release_list = []
78
+ for page in range(0, 100, 20):
79
+ url = f'https://{dd}/index/search/keyword/{search_string}/qualities/{','.join(qualities)}/from/{page}/search'
80
+
81
+ releases_on_page = dd_session.get(url, headers=headers, timeout=10).json()
82
+ if releases_on_page:
83
+ release_list.extend(releases_on_page)
84
+
85
+ for release in release_list:
86
+ try:
87
+ if release.get("fake"):
88
+ debug(
89
+ f"Release {release.get('release')} marked as fake. Invalidating {hostname.upper()} session...")
90
+ create_and_persist_session(shared_state)
91
+ return []
92
+ else:
93
+ title = release.get("release")
94
+
95
+ if not shared_state.is_valid_release(title,
96
+ request_from,
97
+ search_string,
98
+ season,
99
+ episode):
100
+ continue
101
+
102
+ imdb_id = release.get("imdbid", None)
103
+
104
+ source = f"https://{dd}/"
105
+ size_item = extract_size(release.get("size"))
106
+ mb = shared_state.convert_to_mb(size_item) * 1024 * 1024
107
+ published = convert_to_rss_date(release.get("when"))
108
+ payload = urlsafe_b64encode(
109
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
110
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
111
+
112
+ releases.append({
113
+ "details": {
114
+ "title": title,
115
+ "hostname": hostname.lower(),
116
+ "imdb_id": imdb_id,
117
+ "link": link,
118
+ "mirror": mirror,
119
+ "size": mb,
120
+ "date": published,
121
+ "source": source
122
+ },
123
+ "type": "protected"
124
+ })
125
+ except Exception as e:
126
+ info(f"Error parsing {hostname.upper()} feed: {e}")
127
+ continue
128
+
129
+ except Exception as e:
130
+ info(f"Error loading {hostname.upper()} feed: {e}")
131
+
132
+ elapsed_time = time.time() - start_time
133
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
134
+
135
+ return releases
@@ -0,0 +1,213 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import json
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+
17
+ hostname = "dj"
18
+
19
+
20
+ def convert_to_rss_date(date_str):
21
+ try:
22
+ return datetime.fromisoformat(
23
+ date_str.replace("Z", "+00:00")
24
+ ).strftime("%a, %d %b %Y %H:%M:%S +0000")
25
+ except Exception:
26
+ return ""
27
+
28
+
29
+ def dj_feed(shared_state, start_time, request_from, mirror=None):
30
+ releases = []
31
+
32
+ if "sonarr" not in request_from.lower():
33
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
34
+ return releases
35
+
36
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
37
+ password = sj_host
38
+
39
+ url = f"https://{sj_host}/api/releases/latest/0"
40
+ headers = {"User-Agent": shared_state.values["user_agent"]}
41
+
42
+ try:
43
+ r = requests.get(url, headers=headers, timeout=10)
44
+ data = json.loads(r.content)
45
+ except Exception as e:
46
+ info(f"{hostname.upper()}: feed load error: {e}")
47
+ return releases
48
+
49
+ for release in data:
50
+ try:
51
+ title = release.get("name").rstrip(".")
52
+ if not title:
53
+ continue
54
+
55
+ published = convert_to_rss_date(release.get("createdAt"))
56
+ if not published:
57
+ continue
58
+
59
+ media = release.get("_media", {})
60
+ slug = media.get("slug")
61
+ if not slug:
62
+ continue
63
+
64
+ series_url = f"https://{sj_host}/serie/{slug}"
65
+
66
+ mb = 0
67
+ size = 0
68
+ imdb_id = None
69
+
70
+ payload = urlsafe_b64encode(
71
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
72
+ ).decode("utf-8")
73
+
74
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
75
+
76
+ releases.append({
77
+ "details": {
78
+ "title": title,
79
+ "hostname": hostname,
80
+ "imdb_id": imdb_id,
81
+ "link": link,
82
+ "mirror": mirror,
83
+ "size": size,
84
+ "date": published,
85
+ "source": series_url
86
+ },
87
+ "type": "protected"
88
+ })
89
+
90
+ except Exception as e:
91
+ debug(f"{hostname.upper()}: feed parse error: {e}")
92
+ continue
93
+
94
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
95
+ return releases
96
+
97
+
98
+ def dj_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
99
+ releases = []
100
+
101
+ if "sonarr" not in request_from.lower():
102
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
103
+ return releases
104
+
105
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
106
+ password = sj_host
107
+
108
+ imdb_id = shared_state.is_imdb_id(search_string)
109
+ if not imdb_id:
110
+ return releases
111
+
112
+ localized_title = get_localized_title(shared_state, imdb_id, "de")
113
+ if not localized_title:
114
+ info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
115
+ return releases
116
+
117
+ headers = {"User-Agent": shared_state.values["user_agent"]}
118
+ search_url = f"https://{sj_host}/serie/search"
119
+ params = {"q": localized_title}
120
+
121
+ try:
122
+ r = requests.get(search_url, headers=headers, params=params, timeout=10)
123
+ soup = BeautifulSoup(r.content, "html.parser")
124
+ results = soup.find_all("a", href=re.compile(r"^/serie/"))
125
+ except Exception as e:
126
+ info(f"{hostname.upper()}: search load error: {e}")
127
+ return releases
128
+
129
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
130
+ sanitized_search_string = shared_state.sanitize_string(localized_title)
131
+
132
+ for result in results:
133
+ try:
134
+ result_title = result.get_text(strip=True)
135
+
136
+ sanitized_title = shared_state.sanitize_string(result_title)
137
+
138
+ if not re.search(
139
+ rf"\b{re.escape(sanitized_search_string)}\b",
140
+ sanitized_title
141
+ ):
142
+ debug(
143
+ f"Search string '{localized_title}' doesn't match '{result_title}'"
144
+ )
145
+ continue
146
+
147
+ debug(
148
+ f"Matched search string '{localized_title}' with result '{result_title}'"
149
+ )
150
+
151
+ series_url = f"https://{sj_host}{result['href']}"
152
+
153
+ r = requests.get(series_url, headers=headers, timeout=10)
154
+ media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
155
+ if not media_id_match:
156
+ debug(f"{hostname.upper()}: no media id for {result_title}")
157
+ continue
158
+
159
+ media_id = media_id_match.group(1)
160
+ api_url = f"https://{sj_host}/api/media/{media_id}/releases"
161
+
162
+ r = requests.get(api_url, headers=headers, timeout=10)
163
+ data = json.loads(r.content)
164
+
165
+ for season_block in data.values():
166
+ for item in season_block.get("items", []):
167
+ title = item.get("name").rstrip(".")
168
+ if not title:
169
+ continue
170
+
171
+ if not shared_state.is_valid_release(
172
+ title,
173
+ request_from,
174
+ search_string,
175
+ season,
176
+ episode
177
+ ):
178
+ continue
179
+
180
+ published = convert_to_rss_date(item.get("createdAt"))
181
+ if not published:
182
+ debug(f"{hostname.upper()}: no published date for {title}")
183
+ published = one_hour_ago
184
+
185
+ mb = 0
186
+ size = 0
187
+
188
+ payload = urlsafe_b64encode(
189
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
190
+ ).decode("utf-8")
191
+
192
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
+
194
+ releases.append({
195
+ "details": {
196
+ "title": title,
197
+ "hostname": hostname,
198
+ "imdb_id": imdb_id,
199
+ "link": link,
200
+ "mirror": mirror,
201
+ "size": size,
202
+ "date": published,
203
+ "source": series_url
204
+ },
205
+ "type": "protected"
206
+ })
207
+
208
+ except Exception as e:
209
+ debug(f"{hostname.upper()}: search parse error: {e}")
210
+ continue
211
+
212
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
213
+ return releases
@@ -0,0 +1,354 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from datetime import datetime
9
+ from html import unescape
10
+
11
+ from bs4 import BeautifulSoup
12
+
13
+ from quasarr.providers.imdb_metadata import get_localized_title
14
+ from quasarr.providers.log import info, debug
15
+ from quasarr.providers.sessions.dl import retrieve_and_validate_session, invalidate_session, fetch_via_requests_session
16
+
17
+ hostname = "dl"
18
+
19
+ RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
20
+ CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
21
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
22
+
23
+
24
+ def convert_to_rss_date(iso_date_str):
25
+ """
26
+ Convert ISO format datetime to RSS date format.
27
+ DL date format: '2025-12-15T20:43:06+0100'
28
+ Returns: 'Sun, 15 Dec 2025 20:43:06 +0100'
29
+ Falls back to current time if conversion fails.
30
+ """
31
+ if not iso_date_str:
32
+ return datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
33
+
34
+ try:
35
+ dt_obj = datetime.fromisoformat(iso_date_str)
36
+ return dt_obj.strftime("%a, %d %b %Y %H:%M:%S %z")
37
+ except Exception:
38
+ return datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
39
+
40
+
41
+ def normalize_title_for_sonarr(title):
42
+ """
43
+ Normalize title for Sonarr by replacing spaces with dots.
44
+ """
45
+ title = title.replace(' ', '.')
46
+ title = re.sub(r'\s*-\s*', '-', title)
47
+ title = re.sub(r'\.\-\.', '-', title)
48
+ title = re.sub(r'\.{2,}', '.', title)
49
+ title = title.strip('.')
50
+ return title
51
+
52
+
53
+ def dl_feed(shared_state, start_time, request_from, mirror=None):
54
+ """
55
+ Parse the correct forum and return releases.
56
+ """
57
+ releases = []
58
+ host = shared_state.values["config"]("Hostnames").get(hostname)
59
+
60
+ if "lazylibrarian" in request_from.lower():
61
+ forum = "magazine-zeitschriften.72"
62
+ elif "radarr" in request_from.lower():
63
+ forum = "hd.8"
64
+ else:
65
+ forum = "hd.14"
66
+
67
+ if not host:
68
+ debug(f"{hostname}: hostname not configured")
69
+ return releases
70
+
71
+ try:
72
+ sess = retrieve_and_validate_session(shared_state)
73
+ if not sess:
74
+ info(f"Could not retrieve valid session for {host}")
75
+ return releases
76
+
77
+ forum_url = f'https://www.{host}/forums/{forum}/?order=post_date&direction=desc'
78
+ response = sess.get(forum_url, timeout=30)
79
+
80
+ if response.status_code != 200:
81
+ info(f"{hostname}: Forum request failed with {response.status_code}")
82
+ return releases
83
+
84
+ soup = BeautifulSoup(response.content, 'html.parser')
85
+
86
+ # Find all thread items in the forum
87
+ items = soup.select('div.structItem.structItem--thread')
88
+
89
+ if not items:
90
+ debug(f"{hostname}: No entries found in Forum")
91
+ return releases
92
+
93
+ for item in items:
94
+ try:
95
+ # Extract title from the thread
96
+ title_elem = item.select_one('div.structItem-title a')
97
+ if not title_elem:
98
+ continue
99
+
100
+ title = title_elem.get_text(strip=True)
101
+ if not title:
102
+ continue
103
+
104
+ title = unescape(title)
105
+ title = normalize_title_for_sonarr(title)
106
+
107
+ # Extract thread URL
108
+ thread_url = title_elem.get('href')
109
+ if not thread_url:
110
+ continue
111
+
112
+ # Make sure URL is absolute
113
+ if thread_url.startswith('/'):
114
+ thread_url = f"https://www.{host}{thread_url}"
115
+
116
+ # Extract date and convert to RFC 2822 format
117
+ date_elem = item.select_one('time.u-dt')
118
+ iso_date = date_elem.get('datetime', '') if date_elem else ''
119
+ published = convert_to_rss_date(iso_date)
120
+
121
+ mb = 0
122
+ imdb_id = None
123
+ password = ""
124
+
125
+ payload = urlsafe_b64encode(
126
+ f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
127
+ ).decode("utf-8")
128
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
129
+
130
+ releases.append({
131
+ "details": {
132
+ "title": title,
133
+ "hostname": hostname,
134
+ "imdb_id": imdb_id,
135
+ "link": link,
136
+ "mirror": mirror,
137
+ "size": mb * 1024 * 1024,
138
+ "date": published,
139
+ "source": thread_url
140
+ },
141
+ "type": "protected"
142
+ })
143
+
144
+ except Exception as e:
145
+ debug(f"{hostname}: error parsing Forum item: {e}")
146
+ continue
147
+
148
+ except Exception as e:
149
+ info(f"{hostname}: Forum feed error: {e}")
150
+ invalidate_session(shared_state)
151
+
152
+ elapsed = time.time() - start_time
153
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
154
+ return releases
155
+
156
+
157
+ def _replace_umlauts(text):
158
+ replacements = {
159
+ 'ä': 'ae',
160
+ 'ö': 'oe',
161
+ 'ü': 'ue',
162
+ 'Ä': 'Ae',
163
+ 'Ö': 'Oe',
164
+ 'Ü': 'Ue',
165
+ 'ß': 'ss'
166
+ }
167
+
168
+ for umlaut, replacement in replacements.items():
169
+ text = text.replace(umlaut, replacement)
170
+
171
+ return text
172
+
173
+
174
+ def _search_single_page(shared_state, host, search_string, search_id, page_num, imdb_id, mirror, request_from, season,
175
+ episode):
176
+ """
177
+ Search a single page. This function is called in parallel for each page.
178
+ """
179
+ page_releases = []
180
+
181
+ search_string = _replace_umlauts(search_string)
182
+
183
+ try:
184
+ if page_num == 1:
185
+ search_params = {
186
+ 'keywords': search_string,
187
+ 'c[title_only]': 1
188
+ }
189
+ search_url = f'https://www.{host}/search/search'
190
+ else:
191
+ if not search_id:
192
+ return page_releases, None
193
+
194
+ search_params = {
195
+ 'page': page_num,
196
+ 'q': search_string,
197
+ 'o': 'relevance'
198
+ }
199
+ search_url = f'https://www.{host}/search/{search_id}/'
200
+
201
+ search_response = fetch_via_requests_session(shared_state, method="GET",
202
+ target_url=search_url,
203
+ get_params=search_params,
204
+ timeout=10)
205
+
206
+ if search_response.status_code != 200:
207
+ debug(f"{hostname}: [Page {page_num}] returned status {search_response.status_code}")
208
+ return page_releases, None
209
+
210
+ # Extract search ID from first page
211
+ extracted_search_id = None
212
+ if page_num == 1:
213
+ match = re.search(r'/search/(\d+)/', search_response.url)
214
+ if match:
215
+ extracted_search_id = match.group(1)
216
+ debug(f"{hostname}: [Page 1] Extracted search ID: {extracted_search_id}")
217
+
218
+ soup = BeautifulSoup(search_response.text, 'html.parser')
219
+ result_items = soup.select('li.block-row')
220
+
221
+ if not result_items:
222
+ debug(f"{hostname}: [Page {page_num}] found 0 results")
223
+ return page_releases, extracted_search_id
224
+
225
+ debug(f"{hostname}: [Page {page_num}] found {len(result_items)} results")
226
+
227
+ for item in result_items:
228
+ try:
229
+ title_elem = item.select_one('h3.contentRow-title a')
230
+ if not title_elem:
231
+ continue
232
+
233
+ title = title_elem.get_text(separator=' ', strip=True)
234
+ title = re.sub(r'\s+', ' ', title)
235
+ title = unescape(title)
236
+ title_normalized = normalize_title_for_sonarr(title)
237
+
238
+ # Filter: Skip if no resolution or codec info (unless LazyLibrarian)
239
+ if 'lazylibrarian' not in request_from.lower():
240
+ if not (RESOLUTION_REGEX.search(title_normalized) or CODEC_REGEX.search(title_normalized)):
241
+ continue
242
+
243
+ # Filter: Skip XXX content unless explicitly searched for
244
+ if XXX_REGEX.search(title_normalized) and 'xxx' not in search_string.lower():
245
+ continue
246
+
247
+ thread_url = title_elem.get('href')
248
+ if thread_url.startswith('/'):
249
+ thread_url = f"https://www.{host}{thread_url}"
250
+
251
+ if not shared_state.is_valid_release(title_normalized, request_from, search_string, season, episode):
252
+ continue
253
+
254
+ # Extract date and convert to RFC 2822 format
255
+ date_elem = item.select_one('time.u-dt')
256
+ iso_date = date_elem.get('datetime', '') if date_elem else ''
257
+ published = convert_to_rss_date(iso_date)
258
+
259
+ mb = 0
260
+ password = ""
261
+
262
+ payload = urlsafe_b64encode(
263
+ f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
264
+ ).decode("utf-8")
265
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
266
+
267
+ page_releases.append({
268
+ "details": {
269
+ "title": title_normalized,
270
+ "hostname": hostname,
271
+ "imdb_id": imdb_id,
272
+ "link": link,
273
+ "mirror": mirror,
274
+ "size": mb * 1024 * 1024,
275
+ "date": published,
276
+ "source": thread_url
277
+ },
278
+ "type": "protected"
279
+ })
280
+
281
+ except Exception as e:
282
+ debug(f"{hostname}: [Page {page_num}] error parsing item: {e}")
283
+
284
+ return page_releases, extracted_search_id
285
+
286
+ except Exception as e:
287
+ info(f"{hostname}: [Page {page_num}] error: {e}")
288
+ return page_releases, None
289
+
290
+
291
+ def dl_search(shared_state, start_time, request_from, search_string,
292
+ mirror=None, season=None, episode=None):
293
+ """
294
+ Search with sequential pagination (max 5 pages) to find best quality releases.
295
+ Stops searching if a page returns 0 results.
296
+ """
297
+ releases = []
298
+ host = shared_state.values["config"]("Hostnames").get(hostname)
299
+
300
+ imdb_id = shared_state.is_imdb_id(search_string)
301
+ if imdb_id:
302
+ title = get_localized_title(shared_state, imdb_id, 'de')
303
+ if not title:
304
+ info(f"{hostname}: no title for IMDb {imdb_id}")
305
+ return releases
306
+ search_string = title
307
+
308
+ search_string = unescape(search_string)
309
+ max_pages = 5
310
+
311
+ debug(
312
+ f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
313
+
314
+ try:
315
+ sess = retrieve_and_validate_session(shared_state)
316
+ if not sess:
317
+ info(f"Could not retrieve valid session for {host}")
318
+ return releases
319
+
320
+ search_id = None
321
+
322
+ # Sequential search through pages
323
+ for page_num in range(1, max_pages + 1):
324
+ page_releases, extracted_search_id = _search_single_page(
325
+ shared_state, host, search_string, search_id, page_num,
326
+ imdb_id, mirror, request_from, season, episode
327
+ )
328
+
329
+ # Update search_id from first page
330
+ if page_num == 1:
331
+ search_id = extracted_search_id
332
+ if not search_id:
333
+ info(f"{hostname}: Could not extract search ID, stopping pagination")
334
+ break
335
+
336
+ # Add releases from this page
337
+ releases.extend(page_releases)
338
+ debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
339
+
340
+ # Stop if this page returned 0 results
341
+ if len(page_releases) == 0:
342
+ debug(f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination")
343
+ break
344
+
345
+ except Exception as e:
346
+ info(f"{hostname}: search error: {e}")
347
+ invalidate_session(shared_state)
348
+
349
+ debug(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
350
+
351
+ elapsed = time.time() - start_time
352
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
353
+
354
+ return releases