quasarr 1.20.8__py3-none-any.whl → 1.21.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

@@ -0,0 +1,175 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import base64
6
+ import pickle
7
+
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+
11
+ from quasarr.providers.log import info, debug
12
+
13
+ hostname = "dl"
14
+
15
+
16
+ def create_and_persist_session(shared_state):
17
+ """
18
+ Create and persist a session using user and password.
19
+
20
+ Args:
21
+ shared_state: Shared state object
22
+
23
+ Returns:
24
+ requests.Session or None
25
+ """
26
+ cfg = shared_state.values["config"]("Hostnames")
27
+ host = cfg.get(hostname)
28
+ credentials_cfg = shared_state.values["config"](hostname.upper())
29
+
30
+ user = credentials_cfg.get("user")
31
+ password = credentials_cfg.get("password")
32
+
33
+ if not user or not password:
34
+ info(f'Missing credentials for: "{hostname}" - user and password are required')
35
+ return None
36
+
37
+ sess = requests.Session()
38
+
39
+ # Set user agent
40
+ ua = shared_state.values["user_agent"]
41
+ sess.headers.update({'User-Agent': ua})
42
+
43
+ try:
44
+ # Step 1: Get login page to retrieve CSRF token
45
+ login_page_url = f'https://www.{host}/login/'
46
+ login_page = sess.get(login_page_url, timeout=30)
47
+
48
+ if login_page.status_code != 200:
49
+ info(f'Failed to load login page for: "{hostname}" - Status {login_page.status_code}')
50
+ return None
51
+
52
+ # Extract CSRF token from login form
53
+ soup = BeautifulSoup(login_page.text, 'html.parser')
54
+ csrf_input = soup.find('input', {'name': '_xfToken'})
55
+
56
+ if not csrf_input or not csrf_input.get('value'):
57
+ info(f'Could not find CSRF token on login page for: "{hostname}"')
58
+ return None
59
+
60
+ csrf_token = csrf_input['value']
61
+
62
+ # Step 2: Submit login form
63
+ login_data = {
64
+ 'login': user,
65
+ 'password': password,
66
+ '_xfToken': csrf_token,
67
+ 'remember': '1',
68
+ '_xfRedirect': f'https://www.{host}/'
69
+ }
70
+
71
+ login_url = f'https://www.{host}/login/login'
72
+ login_response = sess.post(login_url, data=login_data, timeout=30)
73
+
74
+ # Step 3: Verify login success
75
+ # Check if we're logged in by accessing the main page
76
+ verify_response = sess.get(f'https://www.{host}/', timeout=30)
77
+
78
+ if 'data-logged-in="true"' not in verify_response.text:
79
+ info(f'Login verification failed for: "{hostname}" - invalid credentials or login failed')
80
+ return None
81
+
82
+ info(f'Session successfully created for: "{hostname}" using user/password')
83
+ except Exception as e:
84
+ info(f'Failed to create session for: "{hostname}" - {e}')
85
+ return None
86
+
87
+ # Persist session to database
88
+ blob = pickle.dumps(sess)
89
+ token = base64.b64encode(blob).decode("utf-8")
90
+ shared_state.values["database"]("sessions").update_store(hostname, token)
91
+
92
+ return sess
93
+
94
+
95
+ def retrieve_and_validate_session(shared_state):
96
+ """
97
+ Retrieve session from database or create a new one.
98
+
99
+ Args:
100
+ shared_state: Shared state object
101
+
102
+ Returns:
103
+ requests.Session or None
104
+ """
105
+ db = shared_state.values["database"]("sessions")
106
+ token = db.retrieve(hostname)
107
+ if not token:
108
+ return create_and_persist_session(shared_state)
109
+
110
+ try:
111
+ blob = base64.b64decode(token.encode("utf-8"))
112
+ sess = pickle.loads(blob)
113
+ if not isinstance(sess, requests.Session):
114
+ raise ValueError("Not a Session")
115
+ except Exception as e:
116
+ debug(f"{hostname}: session load failed: {e}")
117
+ return create_and_persist_session(shared_state)
118
+
119
+ return sess
120
+
121
+
122
+ def invalidate_session(shared_state):
123
+ """
124
+ Invalidate the current session.
125
+
126
+ Args:
127
+ shared_state: Shared state object
128
+ """
129
+ db = shared_state.values["database"]("sessions")
130
+ db.delete(hostname)
131
+ debug(f'Session for "{hostname}" marked as invalid!')
132
+
133
+
134
+ def _persist_session_to_db(shared_state, sess):
135
+ """
136
+ Serialize & store the given requests.Session into the database under `hostname`.
137
+
138
+ Args:
139
+ shared_state: Shared state object
140
+ sess: requests.Session to persist
141
+ """
142
+ blob = pickle.dumps(sess)
143
+ token = base64.b64encode(blob).decode("utf-8")
144
+ shared_state.values["database"]("sessions").update_store(hostname, token)
145
+
146
+
147
+ def fetch_via_requests_session(shared_state, method: str, target_url: str, post_data: dict = None, get_params: dict = None, timeout: int = 30):
148
+ """
149
+ Execute request using the session.
150
+
151
+ Args:
152
+ shared_state: Shared state object
153
+ method: "GET" or "POST"
154
+ target_url: URL to fetch
155
+ post_data: POST data (for POST requests)
156
+ get_params: URL parameters (for GET requests)
157
+ timeout: Request timeout in seconds
158
+
159
+ Returns:
160
+ Response object
161
+ """
162
+ sess = retrieve_and_validate_session(shared_state)
163
+ if not sess:
164
+ raise Exception(f"Could not retrieve valid session for {hostname}")
165
+
166
+ # Execute request
167
+ if method.upper() == "GET":
168
+ resp = sess.get(target_url, params=get_params, timeout=timeout)
169
+ else: # POST
170
+ resp = sess.post(target_url, data=post_data, timeout=timeout)
171
+
172
+ # Re-persist cookies, since the site might have modified them during the request
173
+ _persist_session_to_db(shared_state, sess)
174
+
175
+ return resp
@@ -188,6 +188,7 @@ def connect_device():
188
188
 
189
189
  def get_device():
190
190
  attempts = 0
191
+ last_backoff_change = 0 # Track when we last changed backoff strategy
191
192
 
192
193
  while True:
193
194
  try:
@@ -199,14 +200,30 @@ def get_device():
199
200
 
200
201
  update("device", False)
201
202
 
202
- if attempts % 10 == 0:
203
- info(
204
- f"WARNING: {attempts} consecutive JDownloader connection errors. Please check your credentials!")
205
- time.sleep(3)
203
+ # Determine sleep time based on failure count
204
+ if attempts <= 10:
205
+ # First 10 failures: 3 seconds
206
+ sleep_time = 3
207
+ if attempts == 10:
208
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Switching to 1-minute intervals.")
209
+ elif attempts <= 15:
210
+ # Next 5 failures (11-15): 1 minute
211
+ sleep_time = 60
212
+ if attempts % 10 == 0:
213
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Please check your credentials!")
214
+ if attempts == 15:
215
+ info(f"WARNING: Still failing after {attempts} attempts. Switching to 5-minute intervals.")
216
+ else:
217
+ # After 15 failures: 5 minutes
218
+ sleep_time = 300
219
+ if attempts % 10 == 0:
220
+ info(f"WARNING: {attempts} consecutive JDownloader connection errors. Please check your credentials!")
206
221
 
207
222
  if connect_device():
208
223
  break
209
224
 
225
+ time.sleep(sleep_time)
226
+
210
227
  return values["device"]
211
228
 
212
229
 
@@ -608,7 +625,6 @@ def is_valid_release(title: str,
608
625
  debug(f"Skipping {title!r} as it doesn't match sanitized search string: {search_string!r}")
609
626
  return False
610
627
 
611
-
612
628
  # if it's a movie search, don't allow any TV show titles (check for NO season or episode tags in the title)
613
629
  if is_movie_search:
614
630
  if not MOVIE_REGEX.match(title):
@@ -8,7 +8,7 @@ import requests
8
8
 
9
9
 
10
10
  def get_version():
11
- return "1.20.8"
11
+ return "1.21.1"
12
12
 
13
13
 
14
14
  def get_latest_version():
@@ -10,6 +10,7 @@ from quasarr.search.sources.al import al_feed, al_search
10
10
  from quasarr.search.sources.by import by_feed, by_search
11
11
  from quasarr.search.sources.dd import dd_search, dd_feed
12
12
  from quasarr.search.sources.dj import dj_search, dj_feed
13
+ from quasarr.search.sources.dl import dl_search, dl_feed
13
14
  from quasarr.search.sources.dt import dt_feed, dt_search
14
15
  from quasarr.search.sources.dw import dw_feed, dw_search
15
16
  from quasarr.search.sources.fx import fx_feed, fx_search
@@ -21,6 +22,7 @@ from quasarr.search.sources.sf import sf_feed, sf_search
21
22
  from quasarr.search.sources.sj import sj_search, sj_feed
22
23
  from quasarr.search.sources.sl import sl_feed, sl_search
23
24
  from quasarr.search.sources.wd import wd_feed, wd_search
25
+ from quasarr.search.sources.wx import wx_feed, wx_search
24
26
 
25
27
 
26
28
  def get_search_results(shared_state, request_from, imdb_id="", search_phrase="", mirror=None, season="", episode=""):
@@ -34,6 +36,7 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
34
36
  al = shared_state.values["config"]("Hostnames").get("al")
35
37
  by = shared_state.values["config"]("Hostnames").get("by")
36
38
  dd = shared_state.values["config"]("Hostnames").get("dd")
39
+ dl = shared_state.values["config"]("Hostnames").get("dl")
37
40
  dt = shared_state.values["config"]("Hostnames").get("dt")
38
41
  dj = shared_state.values["config"]("Hostnames").get("dj")
39
42
  dw = shared_state.values["config"]("Hostnames").get("dw")
@@ -46,6 +49,7 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
46
49
  sj = shared_state.values["config"]("Hostnames").get("sj")
47
50
  sl = shared_state.values["config"]("Hostnames").get("sl")
48
51
  wd = shared_state.values["config"]("Hostnames").get("wd")
52
+ wx = shared_state.values["config"]("Hostnames").get("wx")
49
53
 
50
54
  start_time = time.time()
51
55
 
@@ -56,6 +60,7 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
56
60
  (al, al_search),
57
61
  (by, by_search),
58
62
  (dd, dd_search),
63
+ (dl, dl_search),
59
64
  (dt, dt_search),
60
65
  (dj, dj_search),
61
66
  (dw, dw_search),
@@ -68,11 +73,13 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
68
73
  (sj, sj_search),
69
74
  (sl, sl_search),
70
75
  (wd, wd_search),
76
+ (wx, wx_search),
71
77
  ]
72
78
 
73
79
  # LazyLibrarian uses search_phrase for searches
74
80
  phrase_map = [
75
81
  (by, by_search),
82
+ (dl, dl_search),
76
83
  (dt, dt_search),
77
84
  (nx, nx_search),
78
85
  (sl, sl_search),
@@ -85,6 +92,7 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
85
92
  (by, by_feed),
86
93
  (dd, dd_feed),
87
94
  (dj, dj_feed),
95
+ (dl, dl_feed),
88
96
  (dt, dt_feed),
89
97
  (dw, dw_feed),
90
98
  (fx, fx_feed),
@@ -96,6 +104,7 @@ def get_search_results(shared_state, request_from, imdb_id="", search_phrase="",
96
104
  (sj, sj_feed),
97
105
  (sl, sl_feed),
98
106
  (wd, wd_feed),
107
+ (wx, wx_feed),
99
108
  ]
100
109
 
101
110
  if imdb_id: # only Radarr/Sonarr are using imdb_id
@@ -0,0 +1,344 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import re
6
+ import time
7
+ from base64 import urlsafe_b64encode
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ from datetime import datetime
10
+ from html import unescape
11
+
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+ from quasarr.providers.sessions.dl import retrieve_and_validate_session, invalidate_session, fetch_via_requests_session
17
+
18
+ hostname = "dl"
19
+ supported_mirrors = []
20
+
21
+
22
+ def normalize_title_for_sonarr(title):
23
+ """
24
+ Normalize title for Sonarr by replacing spaces with dots.
25
+ """
26
+ title = title.replace(' ', '.')
27
+ title = re.sub(r'\s*-\s*', '-', title)
28
+ title = re.sub(r'\.\-\.', '-', title)
29
+ title = re.sub(r'\.{2,}', '.', title)
30
+ title = title.strip('.')
31
+ return title
32
+
33
+
34
+ def dl_feed(shared_state, start_time, request_from, mirror=None):
35
+ """
36
+ Parse the correct forum and return releases.
37
+ """
38
+ releases = []
39
+ host = shared_state.values["config"]("Hostnames").get(hostname)
40
+
41
+ if "lazylibrarian" in request_from.lower():
42
+ forum = "magazine-zeitschriften.72"
43
+ elif "radarr" in request_from.lower():
44
+ forum = "hd.8"
45
+ else:
46
+ forum = "hd.14"
47
+
48
+ if not host:
49
+ debug(f"{hostname}: hostname not configured")
50
+ return releases
51
+
52
+ try:
53
+ sess = retrieve_and_validate_session(shared_state)
54
+ if not sess:
55
+ info(f"Could not retrieve valid session for {host}")
56
+ return releases
57
+
58
+ forum_url = f'https://www.{host}/forums/{forum}/?order=post_date&direction=desc'
59
+ response = sess.get(forum_url, timeout=30)
60
+
61
+ if response.status_code != 200:
62
+ info(f"{hostname}: Forum request failed with {response.status_code}")
63
+ return releases
64
+
65
+ soup = BeautifulSoup(response.content, 'html.parser')
66
+
67
+ # Find all thread items in the forum
68
+ items = soup.select('div.structItem.structItem--thread')
69
+
70
+ if not items:
71
+ debug(f"{hostname}: No entries found in Forum")
72
+ return releases
73
+
74
+ for item in items:
75
+ try:
76
+ # Extract title from the thread
77
+ title_elem = item.select_one('div.structItem-title a')
78
+ if not title_elem:
79
+ continue
80
+
81
+ title = title_elem.get_text(strip=True)
82
+ if not title:
83
+ continue
84
+
85
+ title = unescape(title)
86
+ title = normalize_title_for_sonarr(title)
87
+
88
+ # Extract thread URL
89
+ thread_url = title_elem.get('href')
90
+ if not thread_url:
91
+ continue
92
+
93
+ # Make sure URL is absolute
94
+ if thread_url.startswith('/'):
95
+ thread_url = f"https://www.{host}{thread_url}"
96
+
97
+ # Extract date and convert to RFC 2822 format
98
+ date_str = None
99
+ date_elem = item.select_one('time.u-dt')
100
+ if date_elem:
101
+ iso_date = date_elem.get('datetime', '')
102
+ if iso_date:
103
+ try:
104
+ # Parse ISO format and convert to RFC 2822
105
+ dt = datetime.fromisoformat(iso_date.replace('Z', '+00:00'))
106
+ date_str = dt.strftime("%a, %d %b %Y %H:%M:%S %z")
107
+ except Exception:
108
+ date_str = None
109
+
110
+ # Fallback: use current time if no date found
111
+ if not date_str:
112
+ date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %z")
113
+
114
+ mb = 0
115
+ imdb_id = None
116
+ password = ""
117
+
118
+ payload = urlsafe_b64encode(
119
+ f"{title}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
120
+ ).decode("utf-8")
121
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
122
+
123
+ releases.append({
124
+ "details": {
125
+ "title": title,
126
+ "hostname": hostname,
127
+ "imdb_id": imdb_id,
128
+ "link": link,
129
+ "mirror": mirror,
130
+ "size": mb * 1024 * 1024,
131
+ "date": date_str,
132
+ "source": thread_url
133
+ },
134
+ "type": "protected"
135
+ })
136
+
137
+ except Exception as e:
138
+ debug(f"{hostname}: error parsing Forum item: {e}")
139
+ continue
140
+
141
+ except Exception as e:
142
+ info(f"{hostname}: Forum feed error: {e}")
143
+ invalidate_session(shared_state)
144
+
145
+ elapsed = time.time() - start_time
146
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
147
+ return releases
148
+
149
+
150
+ def _replace_umlauts(text):
151
+ replacements = {
152
+ 'ä': 'ae',
153
+ 'ö': 'oe',
154
+ 'ü': 'ue',
155
+ 'Ä': 'Ae',
156
+ 'Ö': 'Oe',
157
+ 'Ü': 'Ue',
158
+ 'ß': 'ss'
159
+ }
160
+
161
+ for umlaut, replacement in replacements.items():
162
+ text = text.replace(umlaut, replacement)
163
+
164
+ return text
165
+
166
+
167
+ def _search_single_page(shared_state, host, search_string, search_id, page_num, imdb_id, mirror, request_from, season,
168
+ episode):
169
+ """
170
+ Search a single page. This function is called in parallel for each page.
171
+ """
172
+ page_releases = []
173
+
174
+ search_string = _replace_umlauts(search_string)
175
+
176
+ try:
177
+ if page_num == 1:
178
+ search_params = {
179
+ 'keywords': search_string,
180
+ 'c[title_only]': 1
181
+ }
182
+ search_url = f'https://www.{host}/search/search'
183
+ else:
184
+ if not search_id:
185
+ return page_releases, None
186
+
187
+ search_params = {
188
+ 'page': page_num,
189
+ 'q': search_string,
190
+ 'o': 'relevance'
191
+ }
192
+ search_url = f'https://www.{host}/search/{search_id}/'
193
+
194
+ search_response = fetch_via_requests_session(shared_state, method="GET",
195
+ target_url=search_url,
196
+ get_params=search_params,
197
+ timeout=10)
198
+
199
+ if search_response.status_code != 200:
200
+ debug(f"{hostname}: [Page {page_num}] returned status {search_response.status_code}")
201
+ return page_releases, None
202
+
203
+ # Extract search ID from first page
204
+ extracted_search_id = None
205
+ if page_num == 1:
206
+ match = re.search(r'/search/(\d+)/', search_response.url)
207
+ if match:
208
+ extracted_search_id = match.group(1)
209
+ debug(f"{hostname}: [Page 1] Extracted search ID: {extracted_search_id}")
210
+
211
+ soup = BeautifulSoup(search_response.text, 'html.parser')
212
+ result_items = soup.select('li.block-row')
213
+
214
+ if not result_items:
215
+ debug(f"{hostname}: [Page {page_num}] found 0 results")
216
+ return page_releases, extracted_search_id
217
+
218
+ debug(f"{hostname}: [Page {page_num}] found {len(result_items)} results")
219
+
220
+ for item in result_items:
221
+ try:
222
+ title_elem = item.select_one('h3.contentRow-title a')
223
+ if not title_elem:
224
+ continue
225
+
226
+ title = title_elem.get_text(separator=' ', strip=True)
227
+ title = re.sub(r'\s+', ' ', title)
228
+ title = unescape(title)
229
+ title_normalized = normalize_title_for_sonarr(title)
230
+
231
+ thread_url = title_elem.get('href')
232
+ if thread_url.startswith('/'):
233
+ thread_url = f"https://www.{host}{thread_url}"
234
+
235
+ if not shared_state.is_valid_release(title_normalized, request_from, search_string, season, episode):
236
+ continue
237
+
238
+ minor_info = item.select_one('div.contentRow-minor')
239
+ date_str = ""
240
+ if minor_info:
241
+ date_elem = minor_info.select_one('time.u-dt')
242
+ if date_elem:
243
+ date_str = date_elem.get('datetime', '')
244
+
245
+ # Fallback: use current time if no date found
246
+ if not date_str:
247
+ date_str = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
248
+
249
+ mb = 0
250
+ password = ""
251
+
252
+ payload = urlsafe_b64encode(
253
+ f"{title_normalized}|{thread_url}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
254
+ ).decode("utf-8")
255
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
256
+
257
+ page_releases.append({
258
+ "details": {
259
+ "title": title_normalized,
260
+ "hostname": hostname,
261
+ "imdb_id": imdb_id,
262
+ "link": link,
263
+ "mirror": mirror,
264
+ "size": mb * 1024 * 1024,
265
+ "date": date_str,
266
+ "source": thread_url
267
+ },
268
+ "type": "protected"
269
+ })
270
+
271
+ except Exception as e:
272
+ debug(f"{hostname}: [Page {page_num}] error parsing item: {e}")
273
+
274
+ return page_releases, extracted_search_id
275
+
276
+ except Exception as e:
277
+ info(f"{hostname}: [Page {page_num}] error: {e}")
278
+ return page_releases, None
279
+
280
+
281
+ def dl_search(shared_state, start_time, request_from, search_string,
282
+ mirror=None, season=None, episode=None):
283
+ """
284
+ Search with sequential pagination (max 5 pages) to find best quality releases.
285
+ Stops searching if a page returns 0 results.
286
+ """
287
+ releases = []
288
+ host = shared_state.values["config"]("Hostnames").get(hostname)
289
+
290
+ imdb_id = shared_state.is_imdb_id(search_string)
291
+ if imdb_id:
292
+ title = get_localized_title(shared_state, imdb_id, 'de')
293
+ if not title:
294
+ info(f"{hostname}: no title for IMDb {imdb_id}")
295
+ return releases
296
+ search_string = title
297
+
298
+ search_string = unescape(search_string)
299
+ max_pages = 5
300
+
301
+ debug(
302
+ f"{hostname}: Starting sequential paginated search for '{search_string}' (Season: {season}, Episode: {episode}) - up to {max_pages} pages")
303
+
304
+ try:
305
+ sess = retrieve_and_validate_session(shared_state)
306
+ if not sess:
307
+ info(f"Could not retrieve valid session for {host}")
308
+ return releases
309
+
310
+ search_id = None
311
+
312
+ # Sequential search through pages
313
+ for page_num in range(1, max_pages + 1):
314
+ page_releases, extracted_search_id = _search_single_page(
315
+ shared_state, host, search_string, search_id, page_num,
316
+ imdb_id, mirror, request_from, season, episode
317
+ )
318
+
319
+ # Update search_id from first page
320
+ if page_num == 1:
321
+ search_id = extracted_search_id
322
+ if not search_id:
323
+ info(f"{hostname}: Could not extract search ID, stopping pagination")
324
+ break
325
+
326
+ # Add releases from this page
327
+ releases.extend(page_releases)
328
+ debug(f"{hostname}: [Page {page_num}] completed with {len(page_releases)} valid releases")
329
+
330
+ # Stop if this page returned 0 results
331
+ if len(page_releases) == 0:
332
+ debug(f"{hostname}: [Page {page_num}] returned 0 results, stopping pagination")
333
+ break
334
+
335
+ except Exception as e:
336
+ info(f"{hostname}: search error: {e}")
337
+ invalidate_session(shared_state)
338
+
339
+ debug(f"{hostname}: FINAL - Found {len(releases)} valid releases - providing to {request_from}")
340
+
341
+ elapsed = time.time() - start_time
342
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
343
+
344
+ return releases