quasarr 1.3.5__py3-none-any.whl → 1.20.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (67) hide show
  1. quasarr/__init__.py +157 -56
  2. quasarr/api/__init__.py +141 -36
  3. quasarr/api/arr/__init__.py +197 -78
  4. quasarr/api/captcha/__init__.py +897 -42
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +84 -22
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +237 -434
  9. quasarr/downloads/linkcrypters/al.py +237 -0
  10. quasarr/downloads/linkcrypters/filecrypt.py +178 -31
  11. quasarr/downloads/linkcrypters/hide.py +123 -0
  12. quasarr/downloads/packages/__init__.py +461 -0
  13. quasarr/downloads/sources/al.py +697 -0
  14. quasarr/downloads/sources/by.py +106 -0
  15. quasarr/downloads/sources/dd.py +6 -78
  16. quasarr/downloads/sources/dj.py +7 -0
  17. quasarr/downloads/sources/dt.py +1 -1
  18. quasarr/downloads/sources/dw.py +2 -2
  19. quasarr/downloads/sources/he.py +112 -0
  20. quasarr/downloads/sources/mb.py +47 -0
  21. quasarr/downloads/sources/nk.py +51 -0
  22. quasarr/downloads/sources/nx.py +36 -81
  23. quasarr/downloads/sources/sf.py +27 -4
  24. quasarr/downloads/sources/sj.py +7 -0
  25. quasarr/downloads/sources/sl.py +90 -0
  26. quasarr/downloads/sources/wd.py +110 -0
  27. quasarr/providers/cloudflare.py +204 -0
  28. quasarr/providers/html_images.py +20 -0
  29. quasarr/providers/html_templates.py +210 -108
  30. quasarr/providers/imdb_metadata.py +15 -2
  31. quasarr/providers/myjd_api.py +36 -5
  32. quasarr/providers/notifications.py +30 -5
  33. quasarr/providers/obfuscated.py +35 -0
  34. quasarr/providers/sessions/__init__.py +0 -0
  35. quasarr/providers/sessions/al.py +286 -0
  36. quasarr/providers/sessions/dd.py +78 -0
  37. quasarr/providers/sessions/nx.py +76 -0
  38. quasarr/providers/shared_state.py +368 -23
  39. quasarr/providers/statistics.py +154 -0
  40. quasarr/providers/version.py +60 -1
  41. quasarr/search/__init__.py +112 -36
  42. quasarr/search/sources/al.py +448 -0
  43. quasarr/search/sources/by.py +203 -0
  44. quasarr/search/sources/dd.py +17 -6
  45. quasarr/search/sources/dj.py +213 -0
  46. quasarr/search/sources/dt.py +37 -7
  47. quasarr/search/sources/dw.py +27 -47
  48. quasarr/search/sources/fx.py +27 -29
  49. quasarr/search/sources/he.py +196 -0
  50. quasarr/search/sources/mb.py +195 -0
  51. quasarr/search/sources/nk.py +188 -0
  52. quasarr/search/sources/nx.py +22 -6
  53. quasarr/search/sources/sf.py +143 -151
  54. quasarr/search/sources/sj.py +213 -0
  55. quasarr/search/sources/sl.py +246 -0
  56. quasarr/search/sources/wd.py +208 -0
  57. quasarr/storage/config.py +20 -4
  58. quasarr/storage/setup.py +224 -56
  59. quasarr-1.20.4.dist-info/METADATA +304 -0
  60. quasarr-1.20.4.dist-info/RECORD +72 -0
  61. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/WHEEL +1 -1
  62. quasarr/providers/tvmaze_metadata.py +0 -23
  63. quasarr-1.3.5.dist-info/METADATA +0 -174
  64. quasarr-1.3.5.dist-info/RECORD +0 -43
  65. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/entry_points.txt +0 -0
  66. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/licenses/LICENSE +0 -0
  67. {quasarr-1.3.5.dist-info → quasarr-1.20.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,203 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime
10
+ from urllib.parse import quote_plus
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+
15
+ from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.log import info, debug
17
+
18
+ hostname = "by"
19
+ supported_mirrors = ["rapidgator", "ddownload", "nitroflare"]
20
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
21
+ RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
22
+ CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
23
+ IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
24
+
25
+
26
+ def convert_to_rss_date(date_str):
27
+ """
28
+ BY date format: 'dd.mm.yy HH:MM', e.g. '20.07.25 17:48'
29
+ """
30
+ dt_obj = datetime.strptime(date_str, "%d.%m.%y %H:%M")
31
+ return dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
32
+
33
+
34
+ def extract_size(text):
35
+ m = re.match(r"(\d+(?:[.,]\d+)?)\s*([A-Za-z]+)", text)
36
+ if not m:
37
+ raise ValueError(f"Invalid size format: {text!r}")
38
+ size_str = m.group(1).replace(',', '.')
39
+ sizeunit = m.group(2)
40
+ size_float = float(size_str) # convert to float here
41
+ return {"size": size_float, "sizeunit": sizeunit}
42
+
43
+
44
+ def _parse_posts(soup, shared_state, base_url, password, mirror_filter,
45
+ is_search=False, request_from=None, search_string=None,
46
+ season=None, episode=None):
47
+ releases = []
48
+ if not is_search:
49
+ feed_container = soup.find('table', class_='AUDIO_ITEMLIST') # it is actually called this way
50
+ candidates = []
51
+ if feed_container:
52
+ for tbl in feed_container.find_all('table'):
53
+ if tbl.find(string=re.compile(r"Erstellt am:")):
54
+ candidates.append(tbl)
55
+ items = candidates
56
+ else:
57
+ search_table = soup.find('table', class_='SEARCH_ITEMLIST')
58
+ items = []
59
+ if search_table:
60
+ items = [
61
+ tr for tr in search_table.find_all('tr')
62
+ if tr.find('p', class_='TITLE') and tr.find('p', class_='TITLE').find('a', href=True)
63
+ ]
64
+
65
+ for entry in items:
66
+ if entry.find('table'):
67
+ continue # Skip header rows
68
+ try:
69
+ if not is_search:
70
+ table = entry
71
+ # title & source
72
+ try:
73
+ link_tag = table.find('th').find('a')
74
+ except AttributeError:
75
+ link_tag = table.find('a')
76
+ title = link_tag.get_text(strip=True)
77
+ if 'lazylibrarian' in request_from.lower():
78
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
79
+ title = shared_state.normalize_magazine_title(title)
80
+ else:
81
+ title = title.replace(" ", ".")
82
+
83
+ source = base_url + link_tag['href']
84
+ # extract date and size
85
+ date_str = size_str = None
86
+ for row in table.find_all('tr', height=True):
87
+ cols = row.find_all('td')
88
+ if len(cols) == 2:
89
+ label = cols[0].get_text(strip=True)
90
+ val = cols[1].get_text(strip=True)
91
+ if label.startswith('Erstellt am'):
92
+ date_str = val
93
+ elif label.startswith('Größe'):
94
+ size_str = val
95
+ published = convert_to_rss_date(date_str) if date_str else ''
96
+ size_info = extract_size(size_str) if size_str else {'size': '0', 'sizeunit': 'MB'}
97
+ mb = float(size_info['size'])
98
+ size_bytes = int(mb * 1024 * 1024)
99
+ imdb_id = None
100
+ else:
101
+ row = entry
102
+ title_tag = row.find('p', class_='TITLE').find('a')
103
+ title = title_tag.get_text(strip=True)
104
+ if 'lazylibrarian' in request_from.lower():
105
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
106
+ title = shared_state.normalize_magazine_title(title)
107
+ else:
108
+ title = title.replace(" ", ".")
109
+ if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
110
+ continue
111
+
112
+ if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
113
+ continue
114
+ if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
115
+ continue
116
+
117
+ source = base_url + title_tag['href']
118
+ date_cell = row.find_all('td')[2]
119
+ date_str = date_cell.get_text(strip=True)
120
+ published = convert_to_rss_date(date_str)
121
+ size_bytes = 0
122
+ mb = 0
123
+ imdb_id = None
124
+
125
+ payload = urlsafe_b64encode(
126
+ f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
127
+ ).decode()
128
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
129
+
130
+ releases.append({
131
+ 'details': {
132
+ 'title': title,
133
+ 'hostname': hostname,
134
+ 'imdb_id': imdb_id,
135
+ 'link': link,
136
+ 'mirror': mirror_filter,
137
+ 'size': size_bytes,
138
+ 'date': published,
139
+ 'source': source
140
+ },
141
+ 'type': 'protected'
142
+ })
143
+ except Exception as e:
144
+ debug(f"Error parsing {hostname.upper()}: {e}")
145
+ continue
146
+
147
+ return releases
148
+
149
+
150
+ def by_feed(shared_state, start_time, request_from, mirror=None):
151
+ by = shared_state.values['config']('Hostnames').get(hostname)
152
+ password = by
153
+
154
+ if "lazylibrarian" in request_from.lower():
155
+ feed_type = "?cat=71"
156
+ elif "radarr" in request_from.lower():
157
+ feed_type = "?cat=1"
158
+ else:
159
+ feed_type = "?cat=2"
160
+
161
+ base_url = f"https://{by}"
162
+ url = f"{base_url}/{feed_type}"
163
+ headers = {'User-Agent': shared_state.values['user_agent']}
164
+ try:
165
+ html_doc = requests.get(url, headers=headers, timeout=10).content
166
+ soup = BeautifulSoup(html_doc, 'html.parser')
167
+ releases = _parse_posts(soup, shared_state, base_url, password, request_from=request_from, mirror_filter=mirror)
168
+ except Exception as e:
169
+ info(f"Error loading {hostname.upper()} feed: {e}")
170
+ releases = []
171
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
172
+ return releases
173
+
174
+
175
+ def by_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
176
+ by = shared_state.values['config']('Hostnames').get(hostname)
177
+ password = by
178
+
179
+ imdb_id = shared_state.is_imdb_id(search_string)
180
+ if imdb_id:
181
+ title = get_localized_title(shared_state, imdb_id, 'de')
182
+ if not title:
183
+ info(f"Could not extract title from IMDb-ID {imdb_id}")
184
+ return []
185
+ search_string = html.unescape(title)
186
+
187
+ base_url = f"https://{by}"
188
+ q = quote_plus(search_string)
189
+ url = f"{base_url}/?q={q}"
190
+ headers = {'User-Agent': shared_state.values['user_agent']}
191
+ try:
192
+ html_doc = requests.get(url, headers=headers, timeout=10).content
193
+ soup = BeautifulSoup(html_doc, 'html.parser')
194
+ releases = _parse_posts(
195
+ soup, shared_state, base_url, password, mirror_filter=mirror,
196
+ is_search=True, request_from=request_from,
197
+ search_string=search_string, season=season, episode=episode
198
+ )
199
+ except Exception as e:
200
+ info(f"Error loading {hostname.upper()} search: {e}")
201
+ releases = []
202
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
203
+ return releases
@@ -7,9 +7,9 @@ import time
7
7
  from base64 import urlsafe_b64encode
8
8
  from datetime import datetime, timezone
9
9
 
10
- from quasarr.downloads.sources.dd import create_and_persist_session, retrieve_and_validate_session
11
10
  from quasarr.providers.imdb_metadata import get_localized_title
12
11
  from quasarr.providers.log import info, debug
12
+ from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
13
13
 
14
14
  hostname = "dd"
15
15
  supported_mirrors = ["ironfiles", "rapidgator", "filefactory"]
@@ -26,9 +26,18 @@ def extract_size(size_in_bytes):
26
26
  return {"size": size_in_bytes, "sizeunit": "B"}
27
27
 
28
28
 
29
- def dd_search(shared_state, start_time, search_string="", mirror=None):
29
+ def dd_feed(*args, **kwargs):
30
+ return dd_search(*args, **kwargs)
31
+
32
+
33
+ def dd_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
30
34
  releases = []
31
35
  dd = shared_state.values["config"]("Hostnames").get(hostname.lower())
36
+ password = dd
37
+
38
+ if not "arr" in request_from.lower():
39
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
40
+ return releases
32
41
 
33
42
  dd_session = retrieve_and_validate_session(shared_state)
34
43
  if not dd_session:
@@ -48,8 +57,6 @@ def dd_search(shared_state, start_time, search_string="", mirror=None):
48
57
  return releases
49
58
  search_string = html.unescape(search_string)
50
59
 
51
- password = dd
52
-
53
60
  qualities = [
54
61
  "disk-480p",
55
62
  "web-480p",
@@ -85,7 +92,11 @@ def dd_search(shared_state, start_time, search_string="", mirror=None):
85
92
  else:
86
93
  title = release.get("release")
87
94
 
88
- if search_string and not shared_state.search_string_in_sanitized_title(search_string, title):
95
+ if not shared_state.is_valid_release(title,
96
+ request_from,
97
+ search_string,
98
+ season,
99
+ episode):
89
100
  continue
90
101
 
91
102
  imdb_id = release.get("imdbid", None)
@@ -119,6 +130,6 @@ def dd_search(shared_state, start_time, search_string="", mirror=None):
119
130
  info(f"Error loading {hostname.upper()} feed: {e}")
120
131
 
121
132
  elapsed_time = time.time() - start_time
122
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
133
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
123
134
 
124
135
  return releases
@@ -0,0 +1,213 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import json
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+
11
+ import requests
12
+ from bs4 import BeautifulSoup
13
+
14
+ from quasarr.providers.imdb_metadata import get_localized_title
15
+ from quasarr.providers.log import info, debug
16
+
17
+ hostname = "dj"
18
+
19
+
20
+ def convert_to_rss_date(date_str):
21
+ try:
22
+ return datetime.fromisoformat(
23
+ date_str.replace("Z", "+00:00")
24
+ ).strftime("%a, %d %b %Y %H:%M:%S +0000")
25
+ except Exception:
26
+ return ""
27
+
28
+
29
+ def dj_feed(shared_state, start_time, request_from, mirror=None):
30
+ releases = []
31
+
32
+ if "sonarr" not in request_from.lower():
33
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
34
+ return releases
35
+
36
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
37
+ password = sj_host
38
+
39
+ url = f"https://{sj_host}/api/releases/latest/0"
40
+ headers = {"User-Agent": shared_state.values["user_agent"]}
41
+
42
+ try:
43
+ r = requests.get(url, headers=headers, timeout=10)
44
+ data = json.loads(r.content)
45
+ except Exception as e:
46
+ info(f"{hostname.upper()}: feed load error: {e}")
47
+ return releases
48
+
49
+ for release in data:
50
+ try:
51
+ title = release.get("name").rstrip(".")
52
+ if not title:
53
+ continue
54
+
55
+ published = convert_to_rss_date(release.get("createdAt"))
56
+ if not published:
57
+ continue
58
+
59
+ media = release.get("_media", {})
60
+ slug = media.get("slug")
61
+ if not slug:
62
+ continue
63
+
64
+ series_url = f"https://{sj_host}/serie/{slug}"
65
+
66
+ mb = 0
67
+ size = 0
68
+ imdb_id = None
69
+
70
+ payload = urlsafe_b64encode(
71
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
72
+ ).decode("utf-8")
73
+
74
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
75
+
76
+ releases.append({
77
+ "details": {
78
+ "title": title,
79
+ "hostname": hostname,
80
+ "imdb_id": imdb_id,
81
+ "link": link,
82
+ "mirror": mirror,
83
+ "size": size,
84
+ "date": published,
85
+ "source": series_url
86
+ },
87
+ "type": "protected"
88
+ })
89
+
90
+ except Exception as e:
91
+ debug(f"{hostname.upper()}: feed parse error: {e}")
92
+ continue
93
+
94
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
95
+ return releases
96
+
97
+
98
+ def dj_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
99
+ releases = []
100
+
101
+ if "sonarr" not in request_from.lower():
102
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
103
+ return releases
104
+
105
+ sj_host = shared_state.values["config"]("Hostnames").get(hostname)
106
+ password = sj_host
107
+
108
+ imdb_id = shared_state.is_imdb_id(search_string)
109
+ if not imdb_id:
110
+ return releases
111
+
112
+ localized_title = get_localized_title(shared_state, imdb_id, "de")
113
+ if not localized_title:
114
+ info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
115
+ return releases
116
+
117
+ headers = {"User-Agent": shared_state.values["user_agent"]}
118
+ search_url = f"https://{sj_host}/serie/search"
119
+ params = {"q": localized_title}
120
+
121
+ try:
122
+ r = requests.get(search_url, headers=headers, params=params, timeout=10)
123
+ soup = BeautifulSoup(r.content, "html.parser")
124
+ results = soup.find_all("a", href=re.compile(r"^/serie/"))
125
+ except Exception as e:
126
+ info(f"{hostname.upper()}: search load error: {e}")
127
+ return releases
128
+
129
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
130
+ sanitized_search_string = shared_state.sanitize_string(localized_title)
131
+
132
+ for result in results:
133
+ try:
134
+ result_title = result.get_text(strip=True)
135
+
136
+ sanitized_title = shared_state.sanitize_string(result_title)
137
+
138
+ if not re.search(
139
+ rf"\b{re.escape(sanitized_search_string)}\b",
140
+ sanitized_title
141
+ ):
142
+ debug(
143
+ f"Search string '{localized_title}' doesn't match '{result_title}'"
144
+ )
145
+ continue
146
+
147
+ debug(
148
+ f"Matched search string '{localized_title}' with result '{result_title}'"
149
+ )
150
+
151
+ series_url = f"https://{sj_host}{result['href']}"
152
+
153
+ r = requests.get(series_url, headers=headers, timeout=10)
154
+ media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
155
+ if not media_id_match:
156
+ debug(f"{hostname.upper()}: no media id for {result_title}")
157
+ continue
158
+
159
+ media_id = media_id_match.group(1)
160
+ api_url = f"https://{sj_host}/api/media/{media_id}/releases"
161
+
162
+ r = requests.get(api_url, headers=headers, timeout=10)
163
+ data = json.loads(r.content)
164
+
165
+ for season_block in data.values():
166
+ for item in season_block.get("items", []):
167
+ title = item.get("name").rstrip(".")
168
+ if not title:
169
+ continue
170
+
171
+ if not shared_state.is_valid_release(
172
+ title,
173
+ request_from,
174
+ search_string,
175
+ season,
176
+ episode
177
+ ):
178
+ continue
179
+
180
+ published = convert_to_rss_date(item.get("createdAt"))
181
+ if not published:
182
+ debug(f"{hostname.upper()}: no published date for {title}")
183
+ published = one_hour_ago
184
+
185
+ mb = 0
186
+ size = 0
187
+
188
+ payload = urlsafe_b64encode(
189
+ f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
190
+ ).decode("utf-8")
191
+
192
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
193
+
194
+ releases.append({
195
+ "details": {
196
+ "title": title,
197
+ "hostname": hostname,
198
+ "imdb_id": imdb_id,
199
+ "link": link,
200
+ "mirror": mirror,
201
+ "size": size,
202
+ "date": published,
203
+ "source": series_url
204
+ },
205
+ "type": "protected"
206
+ })
207
+
208
+ except Exception as e:
209
+ debug(f"{hostname.upper()}: search parse error: {e}")
210
+ continue
211
+
212
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
213
+ return releases
@@ -57,16 +57,21 @@ def parse_published_datetime(article):
57
57
 
58
58
  def dt_feed(shared_state, start_time, request_from, mirror=None):
59
59
  releases = []
60
- dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
61
- password = dw
60
+ dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
61
+ password = dt
62
62
 
63
- feed_type = "media/videos/" if "Radarr" in request_from else "media/tv-show/"
63
+ if "lazylibrarian" in request_from.lower():
64
+ feed_type = "learning/"
65
+ elif "radarr" in request_from.lower():
66
+ feed_type = "media/videos/"
67
+ else:
68
+ feed_type = "media/tv-show/"
64
69
 
65
70
  if mirror and mirror not in supported_mirrors:
66
71
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
67
72
  return releases
68
73
 
69
- url = f'https://{dw}/{feed_type}'
74
+ url = f'https://{dt}/{feed_type}'
70
75
  headers = {'User-Agent': shared_state.values["user_agent"]}
71
76
 
72
77
  try:
@@ -84,6 +89,10 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
84
89
  title_raw = link_tag.text.strip()
85
90
  title = title_raw.replace(' - ', '-').replace(' ', '.').replace('(', '').replace(')', '')
86
91
 
92
+ if 'lazylibrarian' in request_from.lower():
93
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
94
+ title = shared_state.normalize_magazine_title(title)
95
+
87
96
  try:
88
97
  imdb_id = re.search(r'tt\d+', str(article)).group()
89
98
  except:
@@ -132,12 +141,17 @@ def dt_feed(shared_state, start_time, request_from, mirror=None):
132
141
  return releases
133
142
 
134
143
 
135
- def dt_search(shared_state, start_time, request_from, search_string, mirror=None):
144
+ def dt_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
136
145
  releases = []
137
146
  dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
138
147
  password = dt
139
148
 
140
- cat_id = "9" if "Radarr" in request_from else "64"
149
+ if "lazylibrarian" in request_from.lower():
150
+ cat_id = "100"
151
+ elif "radarr" in request_from.lower():
152
+ cat_id = "9"
153
+ else:
154
+ cat_id = "64"
141
155
 
142
156
  if mirror and mirror not in supported_mirrors:
143
157
  debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!')
@@ -185,7 +199,23 @@ def dt_search(shared_state, start_time, request_from, search_string, mirror=None
185
199
  continue
186
200
  source = link_tag["href"]
187
201
  title_raw = link_tag.text.strip()
188
- title = title_raw.replace(' - ', '-').replace(' ', '.').replace('(', '').replace(')', '')
202
+ title = (title_raw.
203
+ replace(' - ', '-').
204
+ replace(' ', '.').
205
+ replace('(', '').
206
+ replace(')', '')
207
+ )
208
+
209
+ if not shared_state.is_valid_release(title,
210
+ request_from,
211
+ search_string,
212
+ season,
213
+ episode):
214
+ continue
215
+
216
+ if 'lazylibrarian' in request_from.lower():
217
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
218
+ title = shared_state.normalize_magazine_title(title)
189
219
 
190
220
  try:
191
221
  imdb_id = re.search(r"tt\d+", str(article)).group()
@@ -34,55 +34,22 @@ def convert_to_rss_date(date_str):
34
34
 
35
35
 
36
36
  def extract_size(text):
37
- match = re.match(r"(\d+) ([A-Za-z]+)", text)
37
+ # First try the normal pattern: number + space + unit (e.g., "1024 MB")
38
+ match = re.match(r"(\d+)\s+([A-Za-z]+)", text)
38
39
  if match:
39
40
  size = match.group(1)
40
41
  unit = match.group(2)
41
42
  return {"size": size, "sizeunit": unit}
42
- else:
43
- raise ValueError(f"Invalid size format: {text}")
44
-
45
-
46
- def dw_get_download_links(shared_state, content, title):
47
- try:
48
- try:
49
- content = BeautifulSoup(content, "html.parser")
50
- except:
51
- content = BeautifulSoup(str(content), "html.parser")
52
- download_buttons = content.find_all("button", {"class": "show_link"})
53
- except:
54
- info(f"{hostname.upper()} has changed the details page. Parsing links for {title} failed!")
55
- return False
56
-
57
- dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
58
- ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
59
-
60
- download_links = []
61
- try:
62
- for button in download_buttons:
63
- payload = "action=show_link&link_id=" + button["value"]
64
43
 
65
- headers = {
66
- 'User-Agent': shared_state.values["user_agent"],
67
- }
44
+ # If that fails, try pattern with just unit (e.g., "MB")
45
+ unit_match = re.match(r"([A-Za-z]+)", text.strip())
46
+ if unit_match:
47
+ unit = unit_match.group(1)
48
+ # Fall back to 0 when size is missing
49
+ return {"size": "0", "sizeunit": unit}
68
50
 
69
- response = requests.post(ajax_url, payload, headers=headers, timeout=10).json()
70
- if response["success"]:
71
- link = response["data"].split(",")[0]
72
-
73
- if dw in link:
74
- match = re.search(r'https://' + dw + r'/azn/af\.php\?v=([A-Z0-9]+)(#.*)?', link)
75
- if match:
76
- link = (f'https://filecrypt.cc/Container/{match.group(1)}'
77
- f'.html{match.group(2) if match.group(2) else ""}')
78
-
79
- mirror = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
80
- download_links.append([link, mirror])
81
- except:
82
- info(f"{hostname.upper()} has changed the site structure. Parsing links for {title} failed!")
83
- pass
84
-
85
- return download_links
51
+ # If neither pattern matches, raise the original error
52
+ raise ValueError(f"Invalid size format: {text}")
86
53
 
87
54
 
88
55
  def dw_feed(shared_state, start_time, request_from, mirror=None):
@@ -90,6 +57,10 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
90
57
  dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
91
58
  password = dw
92
59
 
60
+ if not "arr" in request_from.lower():
61
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
62
+ return releases
63
+
93
64
  if "Radarr" in request_from:
94
65
  feed_type = "videos/filme/"
95
66
  else:
@@ -151,16 +122,21 @@ def dw_feed(shared_state, start_time, request_from, mirror=None):
151
122
  info(f"Error loading {hostname.upper()} feed: {e}")
152
123
 
153
124
  elapsed_time = time.time() - start_time
154
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
125
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
155
126
 
156
127
  return releases
157
128
 
158
129
 
159
- def dw_search(shared_state, start_time, request_from, search_string, mirror=None):
130
+ def dw_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
160
131
  releases = []
161
132
  dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
162
133
  password = dw
163
134
 
135
+ if not "arr" in request_from.lower():
136
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
137
+ return releases
138
+
139
+
164
140
  if "Radarr" in request_from:
165
141
  search_type = "videocategory=filme"
166
142
  else:
@@ -191,7 +167,11 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
191
167
  try:
192
168
  title = result.a.text.strip()
193
169
 
194
- if not imdb_id and not shared_state.search_string_in_sanitized_title(search_string, title):
170
+ if not shared_state.is_valid_release(title,
171
+ request_from,
172
+ search_string,
173
+ season,
174
+ episode):
195
175
  continue
196
176
 
197
177
  if not imdb_id:
@@ -229,6 +209,6 @@ def dw_search(shared_state, start_time, request_from, search_string, mirror=None
229
209
  })
230
210
 
231
211
  elapsed_time = time.time() - start_time
232
- debug(f"Time taken: {elapsed_time:.2f} seconds ({hostname.lower()})")
212
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
233
213
 
234
214
  return releases