quasarr 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (57) hide show
  1. quasarr/api/__init__.py +94 -23
  2. quasarr/api/captcha/__init__.py +0 -12
  3. quasarr/api/config/__init__.py +22 -11
  4. quasarr/api/packages/__init__.py +32 -43
  5. quasarr/api/statistics/__init__.py +15 -15
  6. quasarr/downloads/__init__.py +9 -1
  7. quasarr/downloads/packages/__init__.py +6 -6
  8. quasarr/downloads/sources/al.py +6 -0
  9. quasarr/downloads/sources/by.py +29 -20
  10. quasarr/downloads/sources/dd.py +9 -1
  11. quasarr/downloads/sources/dl.py +3 -0
  12. quasarr/downloads/sources/dt.py +16 -7
  13. quasarr/downloads/sources/dw.py +22 -17
  14. quasarr/downloads/sources/he.py +11 -6
  15. quasarr/downloads/sources/mb.py +9 -3
  16. quasarr/downloads/sources/nk.py +9 -3
  17. quasarr/downloads/sources/nx.py +21 -17
  18. quasarr/downloads/sources/sf.py +21 -13
  19. quasarr/downloads/sources/sl.py +10 -2
  20. quasarr/downloads/sources/wd.py +18 -9
  21. quasarr/downloads/sources/wx.py +7 -11
  22. quasarr/providers/auth.py +1 -1
  23. quasarr/providers/cloudflare.py +1 -1
  24. quasarr/providers/hostname_issues.py +63 -0
  25. quasarr/providers/html_images.py +1 -18
  26. quasarr/providers/html_templates.py +104 -12
  27. quasarr/providers/obfuscated.py +11 -11
  28. quasarr/providers/sessions/al.py +27 -11
  29. quasarr/providers/sessions/dd.py +12 -4
  30. quasarr/providers/sessions/dl.py +19 -11
  31. quasarr/providers/sessions/nx.py +12 -4
  32. quasarr/providers/version.py +1 -1
  33. quasarr/search/sources/al.py +12 -1
  34. quasarr/search/sources/by.py +15 -4
  35. quasarr/search/sources/dd.py +22 -3
  36. quasarr/search/sources/dj.py +12 -1
  37. quasarr/search/sources/dl.py +12 -6
  38. quasarr/search/sources/dt.py +17 -4
  39. quasarr/search/sources/dw.py +15 -4
  40. quasarr/search/sources/fx.py +19 -6
  41. quasarr/search/sources/he.py +15 -2
  42. quasarr/search/sources/mb.py +15 -4
  43. quasarr/search/sources/nk.py +15 -2
  44. quasarr/search/sources/nx.py +15 -4
  45. quasarr/search/sources/sf.py +25 -8
  46. quasarr/search/sources/sj.py +14 -1
  47. quasarr/search/sources/sl.py +17 -2
  48. quasarr/search/sources/wd.py +15 -4
  49. quasarr/search/sources/wx.py +16 -18
  50. quasarr/storage/setup.py +150 -35
  51. {quasarr-2.1.4.dist-info → quasarr-2.2.0.dist-info}/METADATA +6 -3
  52. quasarr-2.2.0.dist-info/RECORD +82 -0
  53. {quasarr-2.1.4.dist-info → quasarr-2.2.0.dist-info}/WHEEL +1 -1
  54. quasarr-2.1.4.dist-info/RECORD +0 -81
  55. {quasarr-2.1.4.dist-info → quasarr-2.2.0.dist-info}/entry_points.txt +0 -0
  56. {quasarr-2.1.4.dist-info → quasarr-2.2.0.dist-info}/licenses/LICENSE +0 -0
  57. {quasarr-2.1.4.dist-info → quasarr-2.2.0.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,7 @@ from urllib.parse import quote_plus
13
13
  import requests
14
14
  from bs4 import BeautifulSoup
15
15
 
16
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
16
17
  from quasarr.providers.imdb_metadata import get_localized_title
17
18
  from quasarr.providers.log import info, debug
18
19
 
@@ -75,8 +76,9 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
75
76
  headers = {'User-Agent': shared_state.values["user_agent"]}
76
77
 
77
78
  try:
78
- resp = requests.get(url, headers=headers, timeout=10).content
79
- feed = BeautifulSoup(resp, "html.parser")
79
+ r = requests.get(url, headers=headers, timeout=30)
80
+ r.raise_for_status()
81
+ feed = BeautifulSoup(r.content, "html.parser")
80
82
 
81
83
  for article in feed.find_all('article'):
82
84
  try:
@@ -117,6 +119,7 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
117
119
 
118
120
  except Exception as e:
119
121
  info(f"Error parsing {hostname.upper()} feed: {e}")
122
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
120
123
  continue
121
124
 
122
125
  releases.append({
@@ -135,9 +138,13 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
135
138
 
136
139
  except Exception as e:
137
140
  info(f"Error loading {hostname.upper()} feed: {e}")
141
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
138
142
 
139
143
  elapsed = time.time() - start_time
140
144
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
145
+
146
+ if releases:
147
+ clear_hostname_issue(hostname)
141
148
  return releases
142
149
 
143
150
 
@@ -188,8 +195,9 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
188
195
  )
189
196
  headers = {"User-Agent": shared_state.values["user_agent"]}
190
197
 
191
- resp = requests.get(url, headers=headers, timeout=10).content
192
- page = BeautifulSoup(resp, "html.parser")
198
+ r = requests.get(url, headers=headers, timeout=10)
199
+ r.raise_for_status()
200
+ page = BeautifulSoup(r.content, "html.parser")
193
201
 
194
202
  for article in page.find_all("article"):
195
203
  try:
@@ -241,6 +249,7 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
241
249
 
242
250
  except Exception as e:
243
251
  info(f"Error parsing {hostname.upper()} search item: {e}")
252
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
244
253
  continue
245
254
 
246
255
  releases.append({
@@ -259,7 +268,11 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
259
268
 
260
269
  except Exception as e:
261
270
  info(f"Error loading {hostname.upper()} search page: {e}")
271
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
262
272
 
263
273
  elapsed = time.time() - start_time
264
274
  debug(f"Search time: {elapsed:.2f}s ({hostname})")
275
+
276
+ if releases:
277
+ clear_hostname_issue(hostname)
265
278
  return releases
@@ -10,6 +10,7 @@ from base64 import urlsafe_b64encode
10
10
  import requests
11
11
  from bs4 import BeautifulSoup
12
12
 
13
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
13
14
  from quasarr.providers.log import info, debug
14
15
 
15
16
  hostname = "dw"
@@ -77,8 +78,9 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
77
78
  }
78
79
 
79
80
  try:
80
- request = requests.get(url, headers=headers, timeout=10).content
81
- feed = BeautifulSoup(request, "html.parser")
81
+ r = requests.get(url, headers=headers, timeout=30)
82
+ r.raise_for_status()
83
+ feed = BeautifulSoup(r.content, "html.parser")
82
84
  articles = feed.find_all('h4')
83
85
 
84
86
  for article in articles:
@@ -102,6 +104,7 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
102
104
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
103
105
  except Exception as e:
104
106
  info(f"Error parsing {hostname.upper()} feed: {e}")
107
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
105
108
  continue
106
109
 
107
110
  releases.append({
@@ -120,10 +123,13 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
120
123
 
121
124
  except Exception as e:
122
125
  info(f"Error loading {hostname.upper()} feed: {e}")
126
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
123
127
 
124
128
  elapsed_time = time.time() - start_time
125
129
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
126
130
 
131
+ if releases:
132
+ clear_hostname_issue(hostname)
127
133
  return releases
128
134
 
129
135
 
@@ -151,12 +157,14 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
151
157
  }
152
158
 
153
159
  try:
154
- request = requests.get(url, headers=headers, timeout=10).content
155
- search = BeautifulSoup(request, "html.parser")
160
+ r = requests.get(url, headers=headers, timeout=10)
161
+ r.raise_for_status()
162
+ search = BeautifulSoup(r.content, "html.parser")
156
163
  results = search.find_all('h4')
157
164
 
158
165
  except Exception as e:
159
166
  info(f"Error loading {hostname.upper()} search feed: {e}")
167
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
160
168
  return releases
161
169
 
162
170
  imdb_id = shared_state.is_imdb_id(search_string)
@@ -191,6 +199,7 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
191
199
  link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
192
200
  except Exception as e:
193
201
  info(f"Error parsing {hostname.upper()} search: {e}")
202
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
194
203
  continue
195
204
 
196
205
  releases.append({
@@ -210,4 +219,6 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
210
219
  elapsed_time = time.time() - start_time
211
220
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
212
221
 
222
+ if releases:
223
+ clear_hostname_issue(hostname)
213
224
  return releases
@@ -9,6 +9,7 @@ from base64 import urlsafe_b64encode
9
9
  import requests
10
10
  from bs4 import BeautifulSoup
11
11
 
12
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
12
13
  from quasarr.providers.log import info, debug
13
14
 
14
15
  hostname = "fx"
@@ -46,11 +47,13 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
46
47
  }
47
48
 
48
49
  try:
49
- request = requests.get(url, headers=headers, timeout=10).content
50
- feed = BeautifulSoup(request, "html.parser")
50
+ r = requests.get(url, headers=headers, timeout=30)
51
+ r.raise_for_status()
52
+ feed = BeautifulSoup(r.content, "html.parser")
51
53
  items = feed.find_all("article")
52
54
  except Exception as e:
53
55
  info(f"Error loading {hostname.upper()} feed: {e}")
56
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
54
57
  return releases
55
58
 
56
59
  if items:
@@ -109,10 +112,13 @@ def fx_feed(shared_state, start_time, request_from, mirror=None):
109
112
 
110
113
  except Exception as e:
111
114
  info(f"Error parsing {hostname.upper()} feed: {e}")
115
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
112
116
 
113
117
  elapsed_time = time.time() - start_time
114
118
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
115
119
 
120
+ if releases:
121
+ clear_hostname_issue(hostname)
116
122
  return releases
117
123
 
118
124
 
@@ -136,23 +142,27 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
136
142
  }
137
143
 
138
144
  try:
139
- request = requests.get(url, headers=headers, timeout=10).content
140
- search = BeautifulSoup(request, "html.parser")
145
+ r = requests.get(url, headers=headers, timeout=10)
146
+ r.raise_for_status()
147
+ search = BeautifulSoup(r.content, "html.parser")
141
148
  results = search.find('h2', class_='entry-title')
142
149
 
143
150
  except Exception as e:
144
151
  info(f"Error loading {hostname.upper()} feed: {e}")
152
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
145
153
  return releases
146
154
 
147
155
  if results:
148
156
  for result in results:
149
157
  try:
150
158
  result_source = result["href"]
151
- request = requests.get(result_source, headers=headers, timeout=10).content
152
- feed = BeautifulSoup(request, "html.parser")
159
+ result_r = requests.get(result_source, headers=headers, timeout=10)
160
+ result_r.raise_for_status()
161
+ feed = BeautifulSoup(result_r.content, "html.parser")
153
162
  items = feed.find_all("article")
154
163
  except Exception as e:
155
164
  info(f"Error loading {hostname.upper()} feed: {e}")
165
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
156
166
  return releases
157
167
 
158
168
  for item in items:
@@ -216,8 +226,11 @@ def fx_search(shared_state, start_time, request_from, search_string, mirror=None
216
226
 
217
227
  except Exception as e:
218
228
  info(f"Error parsing {hostname.upper()} search: {e}")
229
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
219
230
 
220
231
  elapsed_time = time.time() - start_time
221
232
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
222
233
 
234
+ if releases:
235
+ clear_hostname_issue(hostname)
223
236
  return releases
@@ -11,6 +11,7 @@ from html import unescape
11
11
  import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
14
15
  from quasarr.providers.imdb_metadata import get_localized_title
15
16
  from quasarr.providers.log import info, debug
16
17
 
@@ -90,6 +91,13 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
90
91
  else:
91
92
  imdb_id = None
92
93
 
94
+ if not source_search:
95
+ search_type = "feed"
96
+ timeout=30
97
+ else:
98
+ search_type = "search"
99
+ timeout = 10
100
+
93
101
  if season:
94
102
  source_search += f" S{int(season):02d}"
95
103
 
@@ -102,11 +110,13 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
102
110
  params = {"s": source_search}
103
111
 
104
112
  try:
105
- r = requests.get(url, headers=headers, params=params, timeout=10)
113
+ r = requests.get(url, headers=headers, params=params, timeout=timeout)
114
+ r.raise_for_status()
106
115
  soup = BeautifulSoup(r.content, 'html.parser')
107
116
  results = soup.find_all('div', class_='item')
108
117
  except Exception as e:
109
- info(f"{hostname}: search load error: {e}")
118
+ info(f"{hostname}: {search_type} load error: {e}")
119
+ mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
110
120
  return releases
111
121
 
112
122
  if not results:
@@ -199,4 +209,7 @@ def he_search(shared_state, start_time, request_from, search_string="", mirror=N
199
209
 
200
210
  elapsed = time.time() - start_time
201
211
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
212
+
213
+ if releases:
214
+ clear_hostname_issue(hostname)
202
215
  return releases
@@ -12,6 +12,7 @@ from urllib.parse import quote_plus
12
12
  import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
16
  from quasarr.providers.imdb_metadata import get_localized_title
16
17
  from quasarr.providers.log import info, debug
17
18
 
@@ -151,13 +152,18 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
151
152
  url = f"https://{mb}/category/{section}/"
152
153
  headers = {'User-Agent': shared_state.values["user_agent"]}
153
154
  try:
154
- html_doc = requests.get(url, headers=headers, timeout=10).content
155
- soup = BeautifulSoup(html_doc, "html.parser")
155
+ r = requests.get(url, headers=headers, timeout=30)
156
+ r.raise_for_status()
157
+ soup = BeautifulSoup(r.content, "html.parser")
156
158
  releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
157
159
  except Exception as e:
158
160
  info(f"Error loading {hostname.upper()} feed: {e}")
161
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
159
162
  releases = []
160
163
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
164
+
165
+ if releases:
166
+ clear_hostname_issue(hostname)
161
167
  return releases
162
168
 
163
169
 
@@ -181,8 +187,9 @@ def mb_search(shared_state, start_time, request_from, search_string, mirror=None
181
187
  url = f"https://{mb}/?s={q}&id=20&post_type=post"
182
188
  headers = {'User-Agent': shared_state.values["user_agent"]}
183
189
  try:
184
- html_doc = requests.get(url, headers=headers, timeout=10).content
185
- soup = BeautifulSoup(html_doc, "html.parser")
190
+ r = requests.get(url, headers=headers, timeout=10)
191
+ r.raise_for_status()
192
+ soup = BeautifulSoup(r.content, "html.parser")
186
193
  releases = _parse_posts(
187
194
  soup, shared_state, password, mirror_filter=mirror,
188
195
  is_search=True, request_from=request_from,
@@ -190,6 +197,10 @@ def mb_search(shared_state, start_time, request_from, search_string, mirror=None
190
197
  )
191
198
  except Exception as e:
192
199
  info(f"Error loading {hostname.upper()} search: {e}")
200
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
193
201
  releases = []
194
202
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
203
+
204
+ if releases:
205
+ clear_hostname_issue(hostname)
195
206
  return releases
@@ -12,6 +12,7 @@ from urllib.parse import urljoin
12
12
  import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
16
  from quasarr.providers.imdb_metadata import get_localized_title
16
17
  from quasarr.providers.log import info, debug
17
18
 
@@ -81,6 +82,13 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
81
82
  else:
82
83
  imdb_id = None
83
84
 
85
+ if not source_search:
86
+ search_type = "feed"
87
+ timeout = 30
88
+ else:
89
+ search_type = "search"
90
+ timeout = 10
91
+
84
92
  if season:
85
93
  source_search += f" S{int(season):02d}"
86
94
 
@@ -92,11 +100,13 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
92
100
  data = {"search": source_search}
93
101
 
94
102
  try:
95
- r = requests.post(url, headers=headers, data=data, timeout=20)
103
+ r = requests.post(url, headers=headers, data=data, timeout=timeout)
104
+ r.raise_for_status()
96
105
  soup = BeautifulSoup(r.content, 'html.parser')
97
106
  results = soup.find_all('div', class_='article-right')
98
107
  except Exception as e:
99
- info(f"{hostname}: search load error: {e}")
108
+ info(f"{hostname}: {search_type} load error: {e}")
109
+ mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
100
110
  return releases
101
111
 
102
112
  if not results:
@@ -191,4 +201,7 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
191
201
 
192
202
  elapsed = time.time() - start_time
193
203
  debug(f"Time taken: {elapsed:.2f}s ({hostname})")
204
+
205
+ if releases:
206
+ clear_hostname_issue(hostname)
194
207
  return releases
@@ -8,6 +8,7 @@ from base64 import urlsafe_b64encode
8
8
 
9
9
  import requests
10
10
 
11
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
11
12
  from quasarr.providers.imdb_metadata import get_localized_title
12
13
  from quasarr.providers.log import info, debug
13
14
 
@@ -38,10 +39,12 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
38
39
  }
39
40
 
40
41
  try:
41
- response = requests.get(url, headers, timeout=10)
42
- feed = response.json()
42
+ r = requests.get(url, headers, timeout=30)
43
+ r.raise_for_status()
44
+ feed = r.json()
43
45
  except Exception as e:
44
46
  info(f"Error loading {hostname.upper()} feed: {e}")
47
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
45
48
  return releases
46
49
 
47
50
  items = feed['result']['list']
@@ -91,10 +94,13 @@ def nx_feed(shared_state, start_time, request_from, mirror=None):
91
94
 
92
95
  except Exception as e:
93
96
  info(f"Error parsing {hostname.upper()} feed: {e}")
97
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
94
98
 
95
99
  elapsed_time = time.time() - start_time
96
100
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
97
101
 
102
+ if releases:
103
+ clear_hostname_issue(hostname)
98
104
  return releases
99
105
 
100
106
 
@@ -129,10 +135,12 @@ def nx_search(shared_state, start_time, request_from, search_string, mirror=None
129
135
  }
130
136
 
131
137
  try:
132
- response = requests.get(url, headers, timeout=10)
133
- feed = response.json()
138
+ r = requests.get(url, headers, timeout=10)
139
+ r.raise_for_status()
140
+ feed = r.json()
134
141
  except Exception as e:
135
142
  info(f"Error loading {hostname.upper()} search: {e}")
143
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
136
144
  return releases
137
145
 
138
146
  items = feed['result']['releases']
@@ -190,8 +198,11 @@ def nx_search(shared_state, start_time, request_from, search_string, mirror=None
190
198
 
191
199
  except Exception as e:
192
200
  info(f"Error parsing {hostname.upper()} search: {e}")
201
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
193
202
 
194
203
  elapsed_time = time.time() - start_time
195
204
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
196
205
 
206
+ if releases:
207
+ clear_hostname_issue(hostname)
197
208
  return releases
@@ -10,6 +10,7 @@ from datetime import datetime, timedelta
10
10
 
11
11
  import requests
12
12
 
13
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
13
14
  from quasarr.providers.imdb_metadata import get_localized_title
14
15
  from quasarr.providers.log import info, debug
15
16
 
@@ -91,6 +92,7 @@ def parse_mirrors(base_url, entry):
91
92
  }
92
93
  except Exception as e:
93
94
  info(f"Error parsing mirrors: {e}")
95
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
94
96
 
95
97
  return mirrors
96
98
 
@@ -122,12 +124,14 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
122
124
  date -= timedelta(days=1)
123
125
 
124
126
  try:
125
- response = requests.get(f"https://{sf}/updates/{formatted_date}#list", headers, timeout=10)
127
+ r = requests.get(f"https://{sf}/updates/{formatted_date}#list", headers, timeout=30)
128
+ r.raise_for_status()
126
129
  except Exception as e:
127
130
  info(f"Error loading {hostname.upper()} feed: {e} for {formatted_date}")
131
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
128
132
  return releases
129
133
 
130
- content = BeautifulSoup(response.text, "html.parser")
134
+ content = BeautifulSoup(r.text, "html.parser")
131
135
  items = content.find_all("div", {"class": "row"}, style=re.compile("order"))
132
136
 
133
137
  for item in items:
@@ -175,10 +179,13 @@ def sf_feed(shared_state, start_time, request_from, mirror=None):
175
179
 
176
180
  except Exception as e:
177
181
  info(f"Error parsing {hostname.upper()} feed: {e}")
182
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
178
183
 
179
184
  elapsed_time = time.time() - start_time
180
185
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
181
186
 
187
+ if releases:
188
+ clear_hostname_issue(hostname)
182
189
  return releases
183
190
 
184
191
 
@@ -220,10 +227,12 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
220
227
  headers = {'User-Agent': shared_state.values["user_agent"]}
221
228
 
222
229
  try:
223
- response = requests.get(url, headers=headers, timeout=10)
224
- feed = response.json()
230
+ r = requests.get(url, headers=headers, timeout=10)
231
+ r.raise_for_status()
232
+ feed = r.json()
225
233
  except Exception as e:
226
234
  info(f"Error loading {hostname.upper()} search: {e}")
235
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
227
236
  return releases
228
237
 
229
238
  results = feed.get('result', [])
@@ -257,12 +266,15 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
257
266
  # load series page
258
267
  series_url = f"https://{sf}/{series_id}"
259
268
  try:
260
- series_page = requests.get(series_url, headers=headers, timeout=10).text
269
+ r = requests.get(series_url, headers=headers, timeout=10)
270
+ r.raise_for_status()
271
+ series_page = r.text
261
272
  imdb_link = BeautifulSoup(series_page, "html.parser").find("a", href=re.compile(r"imdb\.com"))
262
273
  imdb_id = re.search(r'tt\d+', str(imdb_link)).group() if imdb_link else None
263
274
  season_id = re.findall(r"initSeason\('(.+?)\',", series_page)[0]
264
- except Exception:
275
+ except Exception as e:
265
276
  debug(f"Failed to load or parse series page for {series_id}")
277
+ mark_hostname_issue(hostname, "search", str(e))
266
278
  continue
267
279
 
268
280
  # fetch API HTML
@@ -270,14 +282,16 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
270
282
  api_url = f'https://{sf}/api/v1/{season_id}/season/ALL?lang=ALL&_={epoch}'
271
283
  debug(f"Requesting SF API URL: {api_url}")
272
284
  try:
273
- api_resp = requests.get(api_url, headers=headers, timeout=10)
274
- resp_json = api_resp.json()
285
+ r = requests.get(api_url, headers=headers, timeout=10)
286
+ r.raise_for_status()
287
+ resp_json = r.json()
275
288
  if resp_json.get('error'):
276
289
  info(f"SF API error for series '{series_id}' at URL {api_url}: {resp_json.get('message')}")
277
290
  continue
278
291
  data_html = resp_json.get("html", "")
279
292
  except Exception as e:
280
293
  info(f"Error loading SF API for {series_id} at {api_url}: {e}")
294
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
281
295
  continue
282
296
 
283
297
  # cache content and imdb_id
@@ -373,4 +387,7 @@ def sf_search(shared_state, start_time, request_from, search_string, mirror=None
373
387
 
374
388
  elapsed_time = time.time() - start_time
375
389
  debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
390
+
391
+ if releases:
392
+ clear_hostname_issue(hostname)
376
393
  return releases
@@ -11,6 +11,7 @@ from datetime import datetime, timedelta
11
11
  import requests
12
12
  from bs4 import BeautifulSoup
13
13
 
14
+ from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
14
15
  from quasarr.providers.imdb_metadata import get_localized_title
15
16
  from quasarr.providers.log import info, debug
16
17
 
@@ -40,10 +41,12 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
40
41
  headers = {"User-Agent": shared_state.values["user_agent"]}
41
42
 
42
43
  try:
43
- r = requests.get(url, headers=headers, timeout=10)
44
+ r = requests.get(url, headers=headers, timeout=30)
45
+ r.raise_for_status()
44
46
  data = json.loads(r.content)
45
47
  except Exception as e:
46
48
  info(f"{hostname.upper()}: feed load error: {e}")
49
+ mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
47
50
  return releases
48
51
 
49
52
  for release in data:
@@ -92,6 +95,9 @@ def sj_feed(shared_state, start_time, request_from, mirror=None):
92
95
  continue
93
96
 
94
97
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
98
+
99
+ if releases:
100
+ clear_hostname_issue(hostname)
95
101
  return releases
96
102
 
97
103
 
@@ -120,10 +126,12 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
120
126
 
121
127
  try:
122
128
  r = requests.get(search_url, headers=headers, params=params, timeout=10)
129
+ r.raise_for_status()
123
130
  soup = BeautifulSoup(r.content, "html.parser")
124
131
  results = soup.find_all("a", href=re.compile(r"^/serie/"))
125
132
  except Exception as e:
126
133
  info(f"{hostname.upper()}: search load error: {e}")
134
+ mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
127
135
  return releases
128
136
 
129
137
  one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
@@ -151,6 +159,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
151
159
  series_url = f"https://{sj_host}{result['href']}"
152
160
 
153
161
  r = requests.get(series_url, headers=headers, timeout=10)
162
+ r.raise_for_status()
154
163
  media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
155
164
  if not media_id_match:
156
165
  debug(f"{hostname.upper()}: no media id for {result_title}")
@@ -160,6 +169,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
160
169
  api_url = f"https://{sj_host}/api/media/{media_id}/releases"
161
170
 
162
171
  r = requests.get(api_url, headers=headers, timeout=10)
172
+ r.raise_for_status()
163
173
  data = json.loads(r.content)
164
174
 
165
175
  for season_block in data.values():
@@ -210,4 +220,7 @@ def sj_search(shared_state, start_time, request_from, search_string, mirror=None
210
220
  continue
211
221
 
212
222
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
223
+
224
+ if releases:
225
+ clear_hostname_issue(hostname)
213
226
  return releases