quasarr 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quasarr/__init__.py +134 -70
- quasarr/api/__init__.py +40 -31
- quasarr/api/arr/__init__.py +116 -108
- quasarr/api/captcha/__init__.py +262 -137
- quasarr/api/config/__init__.py +76 -46
- quasarr/api/packages/__init__.py +138 -102
- quasarr/api/sponsors_helper/__init__.py +29 -16
- quasarr/api/statistics/__init__.py +19 -19
- quasarr/downloads/__init__.py +165 -72
- quasarr/downloads/linkcrypters/al.py +35 -18
- quasarr/downloads/linkcrypters/filecrypt.py +107 -52
- quasarr/downloads/linkcrypters/hide.py +5 -6
- quasarr/downloads/packages/__init__.py +342 -177
- quasarr/downloads/sources/al.py +191 -100
- quasarr/downloads/sources/by.py +31 -13
- quasarr/downloads/sources/dd.py +27 -14
- quasarr/downloads/sources/dj.py +1 -3
- quasarr/downloads/sources/dl.py +126 -71
- quasarr/downloads/sources/dt.py +11 -5
- quasarr/downloads/sources/dw.py +28 -14
- quasarr/downloads/sources/he.py +32 -24
- quasarr/downloads/sources/mb.py +19 -9
- quasarr/downloads/sources/nk.py +14 -10
- quasarr/downloads/sources/nx.py +8 -18
- quasarr/downloads/sources/sf.py +45 -20
- quasarr/downloads/sources/sj.py +1 -3
- quasarr/downloads/sources/sl.py +9 -5
- quasarr/downloads/sources/wd.py +32 -12
- quasarr/downloads/sources/wx.py +35 -21
- quasarr/providers/auth.py +42 -37
- quasarr/providers/cloudflare.py +28 -30
- quasarr/providers/hostname_issues.py +2 -1
- quasarr/providers/html_images.py +2 -2
- quasarr/providers/html_templates.py +22 -14
- quasarr/providers/imdb_metadata.py +149 -80
- quasarr/providers/jd_cache.py +131 -39
- quasarr/providers/log.py +1 -1
- quasarr/providers/myjd_api.py +260 -196
- quasarr/providers/notifications.py +53 -41
- quasarr/providers/obfuscated.py +9 -4
- quasarr/providers/sessions/al.py +71 -55
- quasarr/providers/sessions/dd.py +21 -14
- quasarr/providers/sessions/dl.py +30 -19
- quasarr/providers/sessions/nx.py +23 -14
- quasarr/providers/shared_state.py +292 -141
- quasarr/providers/statistics.py +75 -43
- quasarr/providers/utils.py +33 -27
- quasarr/providers/version.py +45 -14
- quasarr/providers/web_server.py +10 -5
- quasarr/search/__init__.py +30 -18
- quasarr/search/sources/al.py +124 -73
- quasarr/search/sources/by.py +110 -59
- quasarr/search/sources/dd.py +57 -35
- quasarr/search/sources/dj.py +69 -48
- quasarr/search/sources/dl.py +159 -100
- quasarr/search/sources/dt.py +110 -74
- quasarr/search/sources/dw.py +121 -61
- quasarr/search/sources/fx.py +108 -62
- quasarr/search/sources/he.py +78 -49
- quasarr/search/sources/mb.py +96 -48
- quasarr/search/sources/nk.py +80 -50
- quasarr/search/sources/nx.py +91 -62
- quasarr/search/sources/sf.py +171 -106
- quasarr/search/sources/sj.py +69 -48
- quasarr/search/sources/sl.py +115 -71
- quasarr/search/sources/wd.py +67 -44
- quasarr/search/sources/wx.py +188 -123
- quasarr/storage/config.py +65 -52
- quasarr/storage/setup.py +238 -140
- quasarr/storage/sqlite_database.py +10 -4
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
- quasarr-2.4.9.dist-info/RECORD +81 -0
- quasarr-2.4.8.dist-info/RECORD +0 -81
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
- {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
quasarr/search/sources/dl.py
CHANGED
|
@@ -10,10 +10,14 @@ from html import unescape
|
|
|
10
10
|
|
|
11
11
|
from bs4 import BeautifulSoup
|
|
12
12
|
|
|
13
|
-
from quasarr.providers.hostname_issues import
|
|
13
|
+
from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
|
|
14
14
|
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
-
from quasarr.providers.log import
|
|
16
|
-
from quasarr.providers.sessions.dl import
|
|
15
|
+
from quasarr.providers.log import debug, info
|
|
16
|
+
from quasarr.providers.sessions.dl import (
|
|
17
|
+
fetch_via_requests_session,
|
|
18
|
+
invalidate_session,
|
|
19
|
+
retrieve_and_validate_session,
|
|
20
|
+
)
|
|
17
21
|
|
|
18
22
|
hostname = "dl"
|
|
19
23
|
|
|
@@ -43,11 +47,11 @@ def normalize_title_for_sonarr(title):
|
|
|
43
47
|
"""
|
|
44
48
|
Normalize title for Sonarr by replacing spaces with dots.
|
|
45
49
|
"""
|
|
46
|
-
title = title.replace(
|
|
47
|
-
title = re.sub(r
|
|
48
|
-
title = re.sub(r
|
|
49
|
-
title = re.sub(r
|
|
50
|
-
title = title.strip(
|
|
50
|
+
title = title.replace(" ", ".")
|
|
51
|
+
title = re.sub(r"\s*-\s*", "-", title)
|
|
52
|
+
title = re.sub(r"\.\-\.", "-", title)
|
|
53
|
+
title = re.sub(r"\.{2,}", ".", title)
|
|
54
|
+
title = title.strip(".")
|
|
51
55
|
return title
|
|
52
56
|
|
|
53
57
|
|
|
@@ -75,14 +79,14 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
75
79
|
info(f"Could not retrieve valid session for {host}")
|
|
76
80
|
return releases
|
|
77
81
|
|
|
78
|
-
forum_url = f
|
|
82
|
+
forum_url = f"https://www.{host}/forums/{forum}/?order=post_date&direction=desc"
|
|
79
83
|
r = sess.get(forum_url, timeout=30)
|
|
80
84
|
r.raise_for_status()
|
|
81
85
|
|
|
82
|
-
soup = BeautifulSoup(r.content,
|
|
86
|
+
soup = BeautifulSoup(r.content, "html.parser")
|
|
83
87
|
|
|
84
88
|
# Find all thread items in the forum
|
|
85
|
-
items = soup.select(
|
|
89
|
+
items = soup.select("div.structItem.structItem--thread")
|
|
86
90
|
|
|
87
91
|
if not items:
|
|
88
92
|
debug(f"{hostname}: No entries found in Forum")
|
|
@@ -91,11 +95,11 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
91
95
|
for item in items:
|
|
92
96
|
try:
|
|
93
97
|
# Extract title from the thread
|
|
94
|
-
title_elem = item.select_one(
|
|
98
|
+
title_elem = item.select_one("div.structItem-title a")
|
|
95
99
|
if not title_elem:
|
|
96
100
|
continue
|
|
97
101
|
|
|
98
|
-
title =
|
|
102
|
+
title = "".join(title_elem.strings)
|
|
99
103
|
if not title:
|
|
100
104
|
continue
|
|
101
105
|
|
|
@@ -103,17 +107,17 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
103
107
|
title = normalize_title_for_sonarr(title)
|
|
104
108
|
|
|
105
109
|
# Extract thread URL
|
|
106
|
-
thread_url = title_elem.get(
|
|
110
|
+
thread_url = title_elem.get("href")
|
|
107
111
|
if not thread_url:
|
|
108
112
|
continue
|
|
109
113
|
|
|
110
114
|
# Make sure URL is absolute
|
|
111
|
-
if thread_url.startswith(
|
|
115
|
+
if thread_url.startswith("/"):
|
|
112
116
|
thread_url = f"https://www.{host}{thread_url}"
|
|
113
117
|
|
|
114
118
|
# Extract date and convert to RFC 2822 format
|
|
115
|
-
date_elem = item.select_one(
|
|
116
|
-
iso_date = date_elem.get(
|
|
119
|
+
date_elem = item.select_one("time.u-dt")
|
|
120
|
+
iso_date = date_elem.get("datetime", "") if date_elem else ""
|
|
117
121
|
published = convert_to_rss_date(iso_date)
|
|
118
122
|
|
|
119
123
|
mb = 0
|
|
@@ -121,23 +125,27 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
121
125
|
password = ""
|
|
122
126
|
|
|
123
127
|
payload = urlsafe_b64encode(
|
|
124
|
-
f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
|
|
128
|
+
f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
|
|
129
|
+
"utf-8"
|
|
130
|
+
)
|
|
125
131
|
).decode("utf-8")
|
|
126
132
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
127
133
|
|
|
128
|
-
releases.append(
|
|
129
|
-
|
|
130
|
-
"
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
134
|
+
releases.append(
|
|
135
|
+
{
|
|
136
|
+
"details": {
|
|
137
|
+
"title": title,
|
|
138
|
+
"hostname": hostname,
|
|
139
|
+
"imdb_id": imdb_id,
|
|
140
|
+
"link": link,
|
|
141
|
+
"mirror": mirror,
|
|
142
|
+
"size": mb * 1024 * 1024,
|
|
143
|
+
"date": published,
|
|
144
|
+
"source": thread_url,
|
|
145
|
+
},
|
|
146
|
+
"type": "protected",
|
|
147
|
+
}
|
|
148
|
+
)
|
|
141
149
|
|
|
142
150
|
except Exception as e:
|
|
143
151
|
debug(f"{hostname}: error parsing Forum item: {e}")
|
|
@@ -145,7 +153,9 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
145
153
|
|
|
146
154
|
except Exception as e:
|
|
147
155
|
info(f"{hostname}: Forum feed error: {e}")
|
|
148
|
-
mark_hostname_issue(
|
|
156
|
+
mark_hostname_issue(
|
|
157
|
+
hostname, "feed", str(e) if "e" in dir() else "Error occurred"
|
|
158
|
+
)
|
|
149
159
|
invalidate_session(shared_state)
|
|
150
160
|
|
|
151
161
|
elapsed = time.time() - start_time
|
|
@@ -158,13 +168,13 @@ def dl_feed(shared_state, start_time, request_from, mirror=None):
|
|
|
158
168
|
|
|
159
169
|
def _replace_umlauts(text):
|
|
160
170
|
replacements = {
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
171
|
+
"ä": "ae",
|
|
172
|
+
"ö": "oe",
|
|
173
|
+
"ü": "ue",
|
|
174
|
+
"Ä": "Ae",
|
|
175
|
+
"Ö": "Oe",
|
|
176
|
+
"Ü": "Ue",
|
|
177
|
+
"ß": "ss",
|
|
168
178
|
}
|
|
169
179
|
|
|
170
180
|
for umlaut, replacement in replacements.items():
|
|
@@ -173,8 +183,18 @@ def _replace_umlauts(text):
|
|
|
173
183
|
return text
|
|
174
184
|
|
|
175
185
|
|
|
176
|
-
def _search_single_page(
|
|
177
|
-
|
|
186
|
+
def _search_single_page(
|
|
187
|
+
shared_state,
|
|
188
|
+
host,
|
|
189
|
+
search_string,
|
|
190
|
+
search_id,
|
|
191
|
+
page_num,
|
|
192
|
+
imdb_id,
|
|
193
|
+
mirror,
|
|
194
|
+
request_from,
|
|
195
|
+
season,
|
|
196
|
+
episode,
|
|
197
|
+
):
|
|
178
198
|
"""
|
|
179
199
|
Search a single page. This function is called in parallel for each page.
|
|
180
200
|
"""
|
|
@@ -184,41 +204,41 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
|
|
|
184
204
|
|
|
185
205
|
try:
|
|
186
206
|
if page_num == 1:
|
|
187
|
-
search_params = {
|
|
188
|
-
|
|
189
|
-
'c[title_only]': 1
|
|
190
|
-
}
|
|
191
|
-
search_url = f'https://www.{host}/search/search'
|
|
207
|
+
search_params = {"keywords": search_string, "c[title_only]": 1}
|
|
208
|
+
search_url = f"https://www.{host}/search/search"
|
|
192
209
|
else:
|
|
193
210
|
if not search_id:
|
|
194
211
|
return page_releases, None
|
|
195
212
|
|
|
196
|
-
search_params = {
|
|
197
|
-
|
|
198
|
-
'q': search_string,
|
|
199
|
-
'o': 'relevance'
|
|
200
|
-
}
|
|
201
|
-
search_url = f'https://www.{host}/search/{search_id}/'
|
|
213
|
+
search_params = {"page": page_num, "q": search_string, "o": "relevance"}
|
|
214
|
+
search_url = f"https://www.{host}/search/{search_id}/"
|
|
202
215
|
|
|
203
|
-
search_response = fetch_via_requests_session(
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
216
|
+
search_response = fetch_via_requests_session(
|
|
217
|
+
shared_state,
|
|
218
|
+
method="GET",
|
|
219
|
+
target_url=search_url,
|
|
220
|
+
get_params=search_params,
|
|
221
|
+
timeout=10,
|
|
222
|
+
)
|
|
207
223
|
|
|
208
224
|
if search_response.status_code != 200:
|
|
209
|
-
debug(
|
|
225
|
+
debug(
|
|
226
|
+
f"{hostname}: [Page {page_num}] returned status {search_response.status_code}"
|
|
227
|
+
)
|
|
210
228
|
return page_releases, None
|
|
211
229
|
|
|
212
230
|
# Extract search ID from first page
|
|
213
231
|
extracted_search_id = None
|
|
214
232
|
if page_num == 1:
|
|
215
|
-
match = re.search(r
|
|
233
|
+
match = re.search(r"/search/(\d+)/", search_response.url)
|
|
216
234
|
if match:
|
|
217
235
|
extracted_search_id = match.group(1)
|
|
218
|
-
debug(
|
|
236
|
+
debug(
|
|
237
|
+
f"{hostname}: [Page 1] Extracted search ID: {extracted_search_id}"
|
|
238
|
+
)
|
|
219
239
|
|
|
220
|
-
soup = BeautifulSoup(search_response.text,
|
|
221
|
-
result_items = soup.select(
|
|
240
|
+
soup = BeautifulSoup(search_response.text, "html.parser")
|
|
241
|
+
result_items = soup.select("li.block-row")
|
|
222
242
|
|
|
223
243
|
if not result_items:
|
|
224
244
|
debug(f"{hostname}: [Page {page_num}] found 0 results")
|
|
@@ -228,40 +248,48 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
|
|
|
228
248
|
|
|
229
249
|
for item in result_items:
|
|
230
250
|
try:
|
|
231
|
-
title_elem = item.select_one(
|
|
251
|
+
title_elem = item.select_one("h3.contentRow-title a")
|
|
232
252
|
if not title_elem:
|
|
233
253
|
continue
|
|
234
254
|
|
|
235
255
|
# Skip "Wird gesucht" threads
|
|
236
|
-
label = item.select_one(
|
|
237
|
-
if label and
|
|
256
|
+
label = item.select_one(".contentRow-minor .label")
|
|
257
|
+
if label and "wird gesucht" in label.get_text(strip=True).lower():
|
|
238
258
|
continue
|
|
239
259
|
|
|
240
|
-
title =
|
|
260
|
+
title = "".join(title_elem.strings)
|
|
241
261
|
|
|
242
|
-
title = re.sub(r
|
|
262
|
+
title = re.sub(r"\s+", " ", title)
|
|
243
263
|
title = unescape(title)
|
|
244
264
|
title_normalized = normalize_title_for_sonarr(title)
|
|
245
265
|
|
|
246
266
|
# Filter: Skip if no resolution or codec info (unless LazyLibrarian)
|
|
247
|
-
if
|
|
248
|
-
if not (
|
|
267
|
+
if "lazylibrarian" not in request_from.lower():
|
|
268
|
+
if not (
|
|
269
|
+
RESOLUTION_REGEX.search(title_normalized)
|
|
270
|
+
or CODEC_REGEX.search(title_normalized)
|
|
271
|
+
):
|
|
249
272
|
continue
|
|
250
273
|
|
|
251
274
|
# Filter: Skip XXX content unless explicitly searched for
|
|
252
|
-
if
|
|
275
|
+
if (
|
|
276
|
+
XXX_REGEX.search(title_normalized)
|
|
277
|
+
and "xxx" not in search_string.lower()
|
|
278
|
+
):
|
|
253
279
|
continue
|
|
254
280
|
|
|
255
|
-
thread_url = title_elem.get(
|
|
256
|
-
if thread_url.startswith(
|
|
281
|
+
thread_url = title_elem.get("href")
|
|
282
|
+
if thread_url.startswith("/"):
|
|
257
283
|
thread_url = f"https://www.{host}{thread_url}"
|
|
258
284
|
|
|
259
|
-
if not shared_state.is_valid_release(
|
|
285
|
+
if not shared_state.is_valid_release(
|
|
286
|
+
title_normalized, request_from, search_string, season, episode
|
|
287
|
+
):
|
|
260
288
|
continue
|
|
261
289
|
|
|
262
290
|
# Extract date and convert to RFC 2822 format
|
|
263
|
-
date_elem = item.select_one(
|
|
264
|
-
iso_date = date_elem.get(
|
|
291
|
+
date_elem = item.select_one("time.u-dt")
|
|
292
|
+
iso_date = date_elem.get("datetime", "") if date_elem else ""
|
|
265
293
|
published = convert_to_rss_date(iso_date)
|
|
266
294
|
|
|
267
295
|
mb = 0
|
|
@@ -269,23 +297,26 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
|
|
|
269
297
|
|
|
270
298
|
payload = urlsafe_b64encode(
|
|
271
299
|
f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}|{hostname}".encode(
|
|
272
|
-
"utf-8"
|
|
300
|
+
"utf-8"
|
|
301
|
+
)
|
|
273
302
|
).decode("utf-8")
|
|
274
303
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
275
304
|
|
|
276
|
-
page_releases.append(
|
|
277
|
-
|
|
278
|
-
"
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
305
|
+
page_releases.append(
|
|
306
|
+
{
|
|
307
|
+
"details": {
|
|
308
|
+
"title": title_normalized,
|
|
309
|
+
"hostname": hostname,
|
|
310
|
+
"imdb_id": imdb_id,
|
|
311
|
+
"link": link,
|
|
312
|
+
"mirror": mirror,
|
|
313
|
+
"size": mb * 1024 * 1024,
|
|
314
|
+
"date": published,
|
|
315
|
+
"source": thread_url,
|
|
316
|
+
},
|
|
317
|
+
"type": "protected",
|
|
318
|
+
}
|
|
319
|
+
)
|
|
289
320
|
|
|
290
321
|
except Exception as e:
|
|
291
322
|
debug(f"{hostname}: [Page {page_num}] error parsing item: {e}")
|
|
@@ -294,12 +325,21 @@ def _search_single_page(shared_state, host, search_string, search_id, page_num,
|
|
|
294
325
|
|
|
295
326
|
except Exception as e:
|
|
296
327
|
info(f"{hostname}: [Page {page_num}] error: {e}")
|
|
297
|
-
mark_hostname_issue(
|
|
328
|
+
mark_hostname_issue(
|
|
329
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
330
|
+
)
|
|
298
331
|
return page_releases, None
|
|
299
332
|
|
|
300
333
|
|
|
301
|
-
def dl_search(
|
|
302
|
-
|
|
334
|
+
def dl_search(
|
|
335
|
+
shared_state,
|
|
336
|
+
start_time,
|
|
337
|
+
request_from,
|
|
338
|
+
search_string,
|
|
339
|
+
mirror=None,
|
|
340
|
+
season=None,
|
|
341
|
+
episode=None,
|
|
342
|
+
):
|
|
303
343
|
"""
|
|
304
344
|
Search with sequential pagination to find best quality releases.
|
|
305
345
|
Stops searching if a page returns 0 results or 10 seconds have elapsed.
|
|
@@ -309,7 +349,7 @@ def dl_search(shared_state, start_time, request_from, search_string,
|
|
|
309
349
|
|
|
310
350
|
imdb_id = shared_state.is_imdb_id(search_string)
|
|
311
351
|
if imdb_id:
|
|
312
|
-
title = get_localized_title(shared_state, imdb_id,
|
|
352
|
+
title = get_localized_title(shared_state, imdb_id, "de")
|
|
313
353
|
if not title:
|
|
314
354
|
info(f"{hostname}: no title for IMDb {imdb_id}")
|
|
315
355
|
return releases
|
|
@@ -319,7 +359,8 @@ def dl_search(shared_state, start_time, request_from, search_string,
|
|
|
319
359
|
max_search_duration = 7
|
|
320
360
|
|
|
321
361
|
debug(
|
|
322
|
-
f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - max {max_search_duration}s"
|
|
362
|
+
f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - max {max_search_duration}s"
|
|
363
|
+
)
|
|
323
364
|
|
|
324
365
|
try:
|
|
325
366
|
sess = retrieve_and_validate_session(shared_state)
|
|
@@ -336,32 +377,50 @@ def dl_search(shared_state, start_time, request_from, search_string,
|
|
|
336
377
|
page_num += 1
|
|
337
378
|
|
|
338
379
|
page_releases, extracted_search_id = _search_single_page(
|
|
339
|
-
shared_state,
|
|
340
|
-
|
|
380
|
+
shared_state,
|
|
381
|
+
host,
|
|
382
|
+
search_string,
|
|
383
|
+
search_id,
|
|
384
|
+
page_num,
|
|
385
|
+
imdb_id,
|
|
386
|
+
mirror,
|
|
387
|
+
request_from,
|
|
388
|
+
season,
|
|
389
|
+
episode,
|
|
341
390
|
)
|
|
342
391
|
|
|
343
392
|
# Update search_id from first page
|
|
344
393
|
if page_num == 1:
|
|
345
394
|
search_id = extracted_search_id
|
|
346
395
|
if not search_id:
|
|
347
|
-
info(
|
|
396
|
+
info(
|
|
397
|
+
f"{hostname}: Could not extract search ID, stopping pagination"
|
|
398
|
+
)
|
|
348
399
|
break
|
|
349
400
|
|
|
350
401
|
# Add releases from this page
|
|
351
402
|
releases.extend(page_releases)
|
|
352
|
-
debug(
|
|
403
|
+
debug(
|
|
404
|
+
f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases"
|
|
405
|
+
)
|
|
353
406
|
|
|
354
407
|
# Stop if this page returned 0 results
|
|
355
408
|
if len(page_releases) == 0:
|
|
356
|
-
debug(
|
|
409
|
+
debug(
|
|
410
|
+
f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination"
|
|
411
|
+
)
|
|
357
412
|
break
|
|
358
413
|
|
|
359
414
|
except Exception as e:
|
|
360
415
|
info(f"{hostname}: search error: {e}")
|
|
361
|
-
mark_hostname_issue(
|
|
416
|
+
mark_hostname_issue(
|
|
417
|
+
hostname, "search", str(e) if "e" in dir() else "Error occurred"
|
|
418
|
+
)
|
|
362
419
|
invalidate_session(shared_state)
|
|
363
420
|
|
|
364
|
-
debug(
|
|
421
|
+
debug(
|
|
422
|
+
f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}"
|
|
423
|
+
)
|
|
365
424
|
|
|
366
425
|
elapsed = time.time() - start_time
|
|
367
426
|
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|