quasarr 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. quasarr/__init__.py +134 -70
  2. quasarr/api/__init__.py +40 -31
  3. quasarr/api/arr/__init__.py +116 -108
  4. quasarr/api/captcha/__init__.py +262 -137
  5. quasarr/api/config/__init__.py +76 -46
  6. quasarr/api/packages/__init__.py +138 -102
  7. quasarr/api/sponsors_helper/__init__.py +29 -16
  8. quasarr/api/statistics/__init__.py +19 -19
  9. quasarr/downloads/__init__.py +165 -72
  10. quasarr/downloads/linkcrypters/al.py +35 -18
  11. quasarr/downloads/linkcrypters/filecrypt.py +107 -52
  12. quasarr/downloads/linkcrypters/hide.py +5 -6
  13. quasarr/downloads/packages/__init__.py +342 -177
  14. quasarr/downloads/sources/al.py +191 -100
  15. quasarr/downloads/sources/by.py +31 -13
  16. quasarr/downloads/sources/dd.py +27 -14
  17. quasarr/downloads/sources/dj.py +1 -3
  18. quasarr/downloads/sources/dl.py +126 -71
  19. quasarr/downloads/sources/dt.py +11 -5
  20. quasarr/downloads/sources/dw.py +28 -14
  21. quasarr/downloads/sources/he.py +32 -24
  22. quasarr/downloads/sources/mb.py +19 -9
  23. quasarr/downloads/sources/nk.py +14 -10
  24. quasarr/downloads/sources/nx.py +8 -18
  25. quasarr/downloads/sources/sf.py +45 -20
  26. quasarr/downloads/sources/sj.py +1 -3
  27. quasarr/downloads/sources/sl.py +9 -5
  28. quasarr/downloads/sources/wd.py +32 -12
  29. quasarr/downloads/sources/wx.py +35 -21
  30. quasarr/providers/auth.py +42 -37
  31. quasarr/providers/cloudflare.py +28 -30
  32. quasarr/providers/hostname_issues.py +2 -1
  33. quasarr/providers/html_images.py +2 -2
  34. quasarr/providers/html_templates.py +22 -14
  35. quasarr/providers/imdb_metadata.py +149 -80
  36. quasarr/providers/jd_cache.py +131 -39
  37. quasarr/providers/log.py +1 -1
  38. quasarr/providers/myjd_api.py +260 -196
  39. quasarr/providers/notifications.py +53 -41
  40. quasarr/providers/obfuscated.py +9 -4
  41. quasarr/providers/sessions/al.py +71 -55
  42. quasarr/providers/sessions/dd.py +21 -14
  43. quasarr/providers/sessions/dl.py +30 -19
  44. quasarr/providers/sessions/nx.py +23 -14
  45. quasarr/providers/shared_state.py +292 -141
  46. quasarr/providers/statistics.py +75 -43
  47. quasarr/providers/utils.py +33 -27
  48. quasarr/providers/version.py +45 -14
  49. quasarr/providers/web_server.py +10 -5
  50. quasarr/search/__init__.py +30 -18
  51. quasarr/search/sources/al.py +124 -73
  52. quasarr/search/sources/by.py +110 -59
  53. quasarr/search/sources/dd.py +57 -35
  54. quasarr/search/sources/dj.py +69 -48
  55. quasarr/search/sources/dl.py +159 -100
  56. quasarr/search/sources/dt.py +110 -74
  57. quasarr/search/sources/dw.py +121 -61
  58. quasarr/search/sources/fx.py +108 -62
  59. quasarr/search/sources/he.py +78 -49
  60. quasarr/search/sources/mb.py +96 -48
  61. quasarr/search/sources/nk.py +80 -50
  62. quasarr/search/sources/nx.py +91 -62
  63. quasarr/search/sources/sf.py +171 -106
  64. quasarr/search/sources/sj.py +69 -48
  65. quasarr/search/sources/sl.py +115 -71
  66. quasarr/search/sources/wd.py +67 -44
  67. quasarr/search/sources/wx.py +188 -123
  68. quasarr/storage/config.py +65 -52
  69. quasarr/storage/setup.py +238 -140
  70. quasarr/storage/sqlite_database.py +10 -4
  71. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
  72. quasarr-2.4.9.dist-info/RECORD +81 -0
  73. quasarr-2.4.8.dist-info/RECORD +0 -81
  74. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
  75. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
  76. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
@@ -12,9 +12,9 @@ from urllib.parse import quote_plus
12
12
  import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
16
  from quasarr.providers.imdb_metadata import get_localized_title
17
- from quasarr.providers.log import info, debug
17
+ from quasarr.providers.log import debug, info
18
18
 
19
19
  hostname = "mb"
20
20
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -25,8 +25,18 @@ IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
25
25
 
26
26
  # map German month names to numbers
27
27
  GERMAN_MONTHS = {
28
- 'Januar': '01', 'Februar': '02', 'März': '03', 'April': '04', 'Mai': '05', 'Juni': '06',
29
- 'Juli': '07', 'August': '08', 'September': '09', 'Oktober': '10', 'November': '11', 'Dezember': '12'
28
+ "Januar": "01",
29
+ "Februar": "02",
30
+ "März": "03",
31
+ "April": "04",
32
+ "Mai": "05",
33
+ "Juni": "06",
34
+ "Juli": "07",
35
+ "August": "08",
36
+ "September": "09",
37
+ "Oktober": "10",
38
+ "November": "11",
39
+ "Dezember": "12",
30
40
  }
31
41
 
32
42
 
@@ -42,11 +52,19 @@ def extract_size(text):
42
52
  return {"size": m.group(1), "sizeunit": m.group(2)}
43
53
 
44
54
 
45
- def _parse_posts(soup, shared_state, password, mirror_filter,
46
- is_search=False, request_from=None, search_string=None,
47
- season=None, episode=None):
55
+ def _parse_posts(
56
+ soup,
57
+ shared_state,
58
+ password,
59
+ mirror_filter,
60
+ is_search=False,
61
+ request_from=None,
62
+ search_string=None,
63
+ season=None,
64
+ episode=None,
65
+ ):
48
66
  releases = []
49
- one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
67
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
50
68
 
51
69
  for post in soup.select("div.post"):
52
70
  try:
@@ -61,23 +79,25 @@ def _parse_posts(soup, shared_state, password, mirror_filter,
61
79
  date_txt = date_p.get_text(strip=True) if date_p else None
62
80
  published = one_hour_ago
63
81
  if date_txt:
64
- m_date = re.search(r'(?:\w+, )?(\d{1,2})\.\s*(\w+)\s+(\d{4})\s+(\d{2}:\d{2})', date_txt)
82
+ m_date = re.search(
83
+ r"(?:\w+, )?(\d{1,2})\.\s*(\w+)\s+(\d{4})\s+(\d{2}:\d{2})", date_txt
84
+ )
65
85
  if m_date:
66
86
  day, mon_name, year, hm = m_date.groups()
67
- mon = GERMAN_MONTHS.get(mon_name, '01')
68
- dt_obj = datetime.strptime(f"{day}.{mon}.{year} {hm}", "%d.%m.%Y %H:%M")
87
+ mon = GERMAN_MONTHS.get(mon_name, "01")
88
+ dt_obj = datetime.strptime(
89
+ f"{day}.{mon}.{year} {hm}", "%d.%m.%Y %H:%M"
90
+ )
69
91
  published = dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
70
92
 
71
93
  if is_search:
72
- if not shared_state.is_valid_release(title,
73
- request_from,
74
- search_string,
75
- season,
76
- episode):
94
+ if not shared_state.is_valid_release(
95
+ title, request_from, search_string, season, episode
96
+ ):
77
97
  continue
78
98
 
79
99
  # drop .XXX. unless user explicitly searched xxx
80
- if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
100
+ if XXX_REGEX.search(title) and "xxx" not in search_string.lower():
81
101
  continue
82
102
  # require resolution/codec
83
103
  if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
@@ -91,10 +111,12 @@ def _parse_posts(soup, shared_state, password, mirror_filter,
91
111
  continue
92
112
  else:
93
113
  mirror_candidates = []
94
- for strong in post.find_all('strong', string=re.compile(r'^Download', re.I)):
95
- link_tag = strong.find_next_sibling('a')
114
+ for strong in post.find_all(
115
+ "strong", string=re.compile(r"^Download", re.I)
116
+ ):
117
+ link_tag = strong.find_next_sibling("a")
96
118
  if link_tag and link_tag.get_text(strip=True):
97
- host = link_tag.get_text(strip=True).split('.')[0].lower()
119
+ host = link_tag.get_text(strip=True).split(".")[0].lower()
98
120
  mirror_candidates.append(host)
99
121
  valid = [m for m in mirror_candidates if m in supported_mirrors]
100
122
  if not valid or (mirror_filter and mirror_filter not in valid):
@@ -102,8 +124,8 @@ def _parse_posts(soup, shared_state, password, mirror_filter,
102
124
 
103
125
  # extract IMDb ID
104
126
  imdb_id = None
105
- for tag in post.find_all('a', href=True):
106
- m = IMDB_REGEX.search(tag['href'])
127
+ for tag in post.find_all("a", href=True):
128
+ m = IMDB_REGEX.search(tag["href"])
107
129
  if m:
108
130
  imdb_id = m.group(1)
109
131
  break
@@ -119,21 +141,25 @@ def _parse_posts(soup, shared_state, password, mirror_filter,
119
141
  payload = urlsafe_b64encode(
120
142
  f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}|{hostname}".encode()
121
143
  ).decode()
122
- link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
123
-
124
- releases.append({
125
- "details": {
126
- "title": title,
127
- "hostname": hostname,
128
- "imdb_id": imdb_id,
129
- "link": link,
130
- "mirror": mirror_filter,
131
- "size": size_bytes,
132
- "date": published,
133
- "source": source
134
- },
135
- "type": "protected"
136
- })
144
+ link = (
145
+ f"{shared_state.values['internal_address']}/download/?payload={payload}"
146
+ )
147
+
148
+ releases.append(
149
+ {
150
+ "details": {
151
+ "title": title,
152
+ "hostname": hostname,
153
+ "imdb_id": imdb_id,
154
+ "link": link,
155
+ "mirror": mirror_filter,
156
+ "size": size_bytes,
157
+ "date": published,
158
+ "source": source,
159
+ },
160
+ "type": "protected",
161
+ }
162
+ )
137
163
  except Exception as e:
138
164
  debug(f"Error parsing {hostname.upper()} post: {e}")
139
165
  continue
@@ -144,13 +170,15 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
144
170
  mb = shared_state.values["config"]("Hostnames").get(hostname)
145
171
 
146
172
  if not "arr" in request_from.lower():
147
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
173
+ debug(
174
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
175
+ )
148
176
  return []
149
177
 
150
178
  password = mb
151
179
  section = "neuerscheinungen" if "Radarr" in request_from else "serie"
152
180
  url = f"https://{mb}/category/{section}/"
153
- headers = {'User-Agent': shared_state.values["user_agent"]}
181
+ headers = {"User-Agent": shared_state.values["user_agent"]}
154
182
  try:
155
183
  r = requests.get(url, headers=headers, timeout=30)
156
184
  r.raise_for_status()
@@ -158,7 +186,9 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
158
186
  releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
159
187
  except Exception as e:
160
188
  info(f"Error loading {hostname.upper()} feed: {e}")
161
- mark_hostname_issue(hostname, "feed", str(e) if "e" in dir() else "Error occurred")
189
+ mark_hostname_issue(
190
+ hostname, "feed", str(e) if "e" in dir() else "Error occurred"
191
+ )
162
192
  releases = []
163
193
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
164
194
 
@@ -167,17 +197,27 @@ def mb_feed(shared_state, start_time, request_from, mirror=None):
167
197
  return releases
168
198
 
169
199
 
170
- def mb_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
200
+ def mb_search(
201
+ shared_state,
202
+ start_time,
203
+ request_from,
204
+ search_string,
205
+ mirror=None,
206
+ season=None,
207
+ episode=None,
208
+ ):
171
209
  mb = shared_state.values["config"]("Hostnames").get(hostname)
172
210
 
173
211
  if not "arr" in request_from.lower():
174
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
212
+ debug(
213
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
214
+ )
175
215
  return []
176
216
 
177
217
  password = mb
178
218
  imdb_id = shared_state.is_imdb_id(search_string)
179
219
  if imdb_id:
180
- title = get_localized_title(shared_state, imdb_id, 'de')
220
+ title = get_localized_title(shared_state, imdb_id, "de")
181
221
  if not title:
182
222
  info(f"Could not extract title from IMDb-ID {imdb_id}")
183
223
  return []
@@ -185,19 +225,27 @@ def mb_search(shared_state, start_time, request_from, search_string, mirror=None
185
225
 
186
226
  q = quote_plus(search_string)
187
227
  url = f"https://{mb}/?s={q}&id=20&post_type=post"
188
- headers = {'User-Agent': shared_state.values["user_agent"]}
228
+ headers = {"User-Agent": shared_state.values["user_agent"]}
189
229
  try:
190
230
  r = requests.get(url, headers=headers, timeout=10)
191
231
  r.raise_for_status()
192
232
  soup = BeautifulSoup(r.content, "html.parser")
193
233
  releases = _parse_posts(
194
- soup, shared_state, password, mirror_filter=mirror,
195
- is_search=True, request_from=request_from,
196
- search_string=search_string, season=season, episode=episode
234
+ soup,
235
+ shared_state,
236
+ password,
237
+ mirror_filter=mirror,
238
+ is_search=True,
239
+ request_from=request_from,
240
+ search_string=search_string,
241
+ season=season,
242
+ episode=episode,
197
243
  )
198
244
  except Exception as e:
199
245
  info(f"Error loading {hostname.upper()} search: {e}")
200
- mark_hostname_issue(hostname, "search", str(e) if "e" in dir() else "Error occurred")
246
+ mark_hostname_issue(
247
+ hostname, "search", str(e) if "e" in dir() else "Error occurred"
248
+ )
201
249
  releases = []
202
250
  debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
203
251
 
@@ -12,9 +12,9 @@ from urllib.parse import urljoin
12
12
  import requests
13
13
  from bs4 import BeautifulSoup
14
14
 
15
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
15
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
16
16
  from quasarr.providers.imdb_metadata import get_localized_title, get_year
17
- from quasarr.providers.log import info, debug
17
+ from quasarr.providers.log import debug, info
18
18
 
19
19
  hostname = "nk"
20
20
  supported_mirrors = ["rapidgator", "ddownload"]
@@ -22,7 +22,12 @@ supported_mirrors = ["rapidgator", "ddownload"]
22
22
 
23
23
  def convert_to_rss_date(date_str: str) -> str:
24
24
  date_str = date_str.strip()
25
- for fmt in ("%d. %B %Y / %H:%M", "%d.%m.%Y / %H:%M", "%d.%m.%Y - %H:%M", "%Y-%m-%d %H:%M"):
25
+ for fmt in (
26
+ "%d. %B %Y / %H:%M",
27
+ "%d.%m.%Y / %H:%M",
28
+ "%d.%m.%Y - %H:%M",
29
+ "%Y-%m-%d %H:%M",
30
+ ):
26
31
  try:
27
32
  dt = datetime.strptime(date_str, fmt)
28
33
  return dt.strftime("%a, %d %b %Y %H:%M:%S +0000")
@@ -34,33 +39,43 @@ def convert_to_rss_date(date_str: str) -> str:
34
39
  def extract_size(text: str) -> dict:
35
40
  match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
36
41
  if match:
37
- size = match.group(1).replace(',', '.')
42
+ size = match.group(1).replace(",", ".")
38
43
  unit = match.group(2)
39
44
  return {"size": size, "sizeunit": unit}
40
45
  return {"size": "0", "sizeunit": "MB"}
41
46
 
42
47
 
43
48
  def get_release_field(res, label):
44
- for li in res.select('ul.release-infos li'):
45
- sp = li.find('span')
49
+ for li in res.select("ul.release-infos li"):
50
+ sp = li.find("span")
46
51
  if not sp:
47
- return ''
52
+ return ""
48
53
  if sp.get_text(strip=True).lower() == label.lower():
49
- txt = li.get_text(' ', strip=True)
50
- return txt[len(sp.get_text(strip=True)):].strip()
51
- return ''
54
+ txt = li.get_text(" ", strip=True)
55
+ return txt[len(sp.get_text(strip=True)) :].strip()
56
+ return ""
52
57
 
53
58
 
54
59
  def nk_feed(*args, **kwargs):
55
60
  return nk_search(*args, **kwargs)
56
61
 
57
62
 
58
- def nk_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
63
+ def nk_search(
64
+ shared_state,
65
+ start_time,
66
+ request_from,
67
+ search_string="",
68
+ mirror=None,
69
+ season=None,
70
+ episode=None,
71
+ ):
59
72
  releases = []
60
73
  host = shared_state.values["config"]("Hostnames").get(hostname)
61
74
 
62
75
  if not "arr" in request_from.lower():
63
- debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
76
+ debug(
77
+ f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!'
78
+ )
64
79
  return releases
65
80
 
66
81
  if mirror and mirror not in supported_mirrors:
@@ -71,7 +86,7 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
71
86
  if search_string != "":
72
87
  imdb_id = shared_state.is_imdb_id(search_string)
73
88
  if imdb_id:
74
- local_title = get_localized_title(shared_state, imdb_id, 'de')
89
+ local_title = get_localized_title(shared_state, imdb_id, "de")
75
90
  if not local_title:
76
91
  info(f"{hostname}: no title for IMDb {imdb_id}")
77
92
  return releases
@@ -99,18 +114,20 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
99
114
  if episode:
100
115
  source_search += f"E{int(episode):02d}"
101
116
 
102
- url = f'https://{host}/search'
117
+ url = f"https://{host}/search"
103
118
  headers = {"User-Agent": shared_state.values["user_agent"]}
104
119
  data = {"search": source_search}
105
120
 
106
121
  try:
107
122
  r = requests.post(url, headers=headers, data=data, timeout=timeout)
108
123
  r.raise_for_status()
109
- soup = BeautifulSoup(r.content, 'html.parser')
110
- results = soup.find_all('div', class_='article-right')
124
+ soup = BeautifulSoup(r.content, "html.parser")
125
+ results = soup.find_all("div", class_="article-right")
111
126
  except Exception as e:
112
127
  info(f"{hostname}: {search_type} load error: {e}")
113
- mark_hostname_issue(hostname, search_type, str(e) if "e" in dir() else "Error occurred")
128
+ mark_hostname_issue(
129
+ hostname, search_type, str(e) if "e" in dir() else "Error occurred"
130
+ )
114
131
  return releases
115
132
 
116
133
  if not results:
@@ -118,13 +135,15 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
118
135
 
119
136
  for result in results:
120
137
  try:
121
- imdb_a = result.select_one('a.imdb')
122
- if imdb_a and imdb_a.get('href'):
138
+ imdb_a = result.select_one("a.imdb")
139
+ if imdb_a and imdb_a.get("href"):
123
140
  try:
124
- release_imdb_id = re.search(r'tt\d+', imdb_a['href']).group()
141
+ release_imdb_id = re.search(r"tt\d+", imdb_a["href"]).group()
125
142
  if imdb_id:
126
143
  if release_imdb_id != imdb_id:
127
- debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
144
+ debug(
145
+ f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}"
146
+ )
128
147
  continue
129
148
  except Exception:
130
149
  debug(f"{hostname}: could not extract IMDb ID")
@@ -133,23 +152,25 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
133
152
  debug(f"{hostname}: could not extract IMDb ID")
134
153
  continue
135
154
 
136
- a = result.find('a', class_='release-details', href=True)
155
+ a = result.find("a", class_="release-details", href=True)
137
156
  if not a:
138
157
  continue
139
158
 
140
- sub_title = result.find('span', class_='subtitle')
159
+ sub_title = result.find("span", class_="subtitle")
141
160
  if sub_title:
142
161
  title = sub_title.get_text(strip=True)
143
162
  else:
144
163
  continue
145
164
 
146
- if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
165
+ if not shared_state.is_valid_release(
166
+ title, request_from, search_string, season, episode
167
+ ):
147
168
  continue
148
169
 
149
- source = urljoin(f'https://{host}', a['href'])
170
+ source = urljoin(f"https://{host}", a["href"])
150
171
 
151
172
  mb = 0
152
- size_text = get_release_field(result, 'Größe')
173
+ size_text = get_release_field(result, "Größe")
153
174
  if size_text:
154
175
  size_item = extract_size(size_text)
155
176
  mb = shared_state.convert_to_mb(size_item)
@@ -159,45 +180,54 @@ def nk_search(shared_state, start_time, request_from, search_string="", mirror=N
159
180
 
160
181
  size = mb * 1024 * 1024
161
182
 
162
- password = ''
163
- mirrors_p = result.find('p', class_='mirrors')
183
+ password = ""
184
+ mirrors_p = result.find("p", class_="mirrors")
164
185
  if mirrors_p:
165
- strong = mirrors_p.find('strong')
166
- if strong and strong.get_text(strip=True).lower().startswith('passwort'):
186
+ strong = mirrors_p.find("strong")
187
+ if strong and strong.get_text(strip=True).lower().startswith(
188
+ "passwort"
189
+ ):
167
190
  nxt = strong.next_sibling
168
191
  if nxt:
169
192
  val = str(nxt).strip()
170
193
  if val:
171
194
  password = val.split()[0]
172
195
 
173
- date_text = ''
174
- p_meta = result.find('p', class_='meta')
196
+ date_text = ""
197
+ p_meta = result.find("p", class_="meta")
175
198
  if p_meta:
176
- spans = p_meta.find_all('span')
199
+ spans = p_meta.find_all("span")
177
200
  if len(spans) >= 2:
178
201
  date_part = spans[0].get_text(strip=True)
179
- time_part = spans[1].get_text(strip=True).replace('Uhr', '').strip()
202
+ time_part = spans[1].get_text(strip=True).replace("Uhr", "").strip()
180
203
  date_text = f"{date_part} / {time_part}"
181
204
 
182
205
  published = convert_to_rss_date(date_text) if date_text else ""
183
206
 
184
207
  payload = urlsafe_b64encode(
185
- f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode("utf-8")).decode()
186
- link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
187
-
188
- releases.append({
189
- "details": {
190
- "title": title,
191
- "hostname": hostname,
192
- "imdb_id": release_imdb_id,
193
- "link": link,
194
- "mirror": mirror,
195
- "size": size,
196
- "date": published,
197
- "source": source
198
- },
199
- "type": "protected"
200
- })
208
+ f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}|{hostname}".encode(
209
+ "utf-8"
210
+ )
211
+ ).decode()
212
+ link = (
213
+ f"{shared_state.values['internal_address']}/download/?payload={payload}"
214
+ )
215
+
216
+ releases.append(
217
+ {
218
+ "details": {
219
+ "title": title,
220
+ "hostname": hostname,
221
+ "imdb_id": release_imdb_id,
222
+ "link": link,
223
+ "mirror": mirror,
224
+ "size": size,
225
+ "date": published,
226
+ "source": source,
227
+ },
228
+ "type": "protected",
229
+ }
230
+ )
201
231
  except Exception as e:
202
232
  info(e)
203
233
  debug(f"{hostname}: error parsing search result: {e}")