quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,246 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import datetime
6
+ import html
7
+ import re
8
+ import time
9
+ from concurrent.futures import ThreadPoolExecutor, as_completed
10
+ import xml.etree.ElementTree as ET
11
+ from base64 import urlsafe_b64encode
12
+ from urllib.parse import quote_plus
13
+
14
+ import requests
15
+ from bs4 import BeautifulSoup
16
+
17
+ from quasarr.providers.imdb_metadata import get_localized_title
18
+ from quasarr.providers.log import info, debug
19
+
20
+ hostname = "sl"
21
+ supported_mirrors = ["nitroflare", "ddownload"] # ignoring captcha-protected multiup/mirrorace for now
22
+
23
+
24
+ def extract_size(text):
25
+ match = re.match(r"([\d\.]+)\s*([KMGT]B)", text, re.IGNORECASE)
26
+ if match:
27
+ size = match.group(1)
28
+ unit = match.group(2).upper()
29
+ return {"size": size, "sizeunit": unit}
30
+ else:
31
+ raise ValueError(f"Invalid size format: {text}")
32
+
33
+
34
+ def parse_pubdate_to_iso(pubdate_str):
35
+ """
36
+ Parse an RFC-822 pubDate from RSS into an ISO8601 string with timezone.
37
+ """
38
+ dt = datetime.datetime.strptime(pubdate_str, '%a, %d %b %Y %H:%M:%S %z')
39
+ return dt.isoformat()
40
+
41
+
42
+ def sl_feed(shared_state, start_time, request_from, mirror=None):
43
+ releases = []
44
+
45
+ sl = shared_state.values["config"]("Hostnames").get(hostname.lower())
46
+ password = sl
47
+
48
+ if "lazylibrarian" in request_from.lower():
49
+ feed_type = "ebooks"
50
+ elif "radarr" in request_from.lower():
51
+ feed_type = "movies"
52
+ else:
53
+ feed_type = "tv-shows"
54
+
55
+ if mirror and mirror not in supported_mirrors:
56
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
57
+ return releases
58
+
59
+ url = f'https://{sl}/{feed_type}/feed/'
60
+ headers = {'User-Agent': shared_state.values['user_agent']}
61
+
62
+ try:
63
+ xml_text = requests.get(url, headers=headers, timeout=10).text
64
+ root = ET.fromstring(xml_text)
65
+
66
+ for item in root.find('channel').findall('item'):
67
+ try:
68
+ title = item.findtext('title').strip()
69
+ if 'lazylibrarian' in request_from.lower():
70
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
71
+ title = shared_state.normalize_magazine_title(title)
72
+
73
+ source = item.findtext('link').strip()
74
+
75
+ desc = item.findtext('description') or ''
76
+
77
+ size_match = re.search(r"Size:\s*([\d\.]+\s*(?:GB|MB|KB|TB))", desc, re.IGNORECASE)
78
+ if not size_match:
79
+ debug(f"Size not found in RSS item: {title}")
80
+ continue
81
+ size_info = size_match.group(1).strip()
82
+ size_item = extract_size(size_info)
83
+ mb = shared_state.convert_to_mb(size_item)
84
+ size = mb * 1024 * 1024
85
+
86
+ pubdate = item.findtext('pubDate').strip()
87
+ published = parse_pubdate_to_iso(pubdate)
88
+
89
+ m = re.search(r"https?://www\.imdb\.com/title/(tt\d+)", desc)
90
+ imdb_id = m.group(1) if m else None
91
+
92
+ payload = urlsafe_b64encode(
93
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
94
+ ).decode("utf-8")
95
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
96
+
97
+ releases.append({
98
+ "details": {
99
+ "title": title,
100
+ "hostname": hostname.lower(),
101
+ "imdb_id": imdb_id,
102
+ "link": link,
103
+ "mirror": mirror,
104
+ "size": size,
105
+ "date": published,
106
+ "source": source
107
+ },
108
+ "type": "protected"
109
+ })
110
+
111
+ except Exception as e:
112
+ info(f"Error parsing {hostname.upper()} feed item: {e}")
113
+ continue
114
+
115
+ except Exception as e:
116
+ info(f"Error loading {hostname.upper()} feed: {e}")
117
+
118
+ elapsed = time.time() - start_time
119
+ debug(f"Time taken: {elapsed:.2f}s ({hostname})")
120
+ return releases
121
+
122
+
123
+ def sl_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
124
+ releases = []
125
+ sl = shared_state.values["config"]("Hostnames").get(hostname.lower())
126
+ password = sl
127
+
128
+ if "lazylibrarian" in request_from.lower():
129
+ feed_type = "ebooks"
130
+ elif "radarr" in request_from.lower():
131
+ feed_type = "movies"
132
+ else:
133
+ feed_type = "tv-shows"
134
+
135
+ if mirror and mirror not in supported_mirrors:
136
+ debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
137
+ return releases
138
+
139
+ try:
140
+ imdb_id = shared_state.is_imdb_id(search_string)
141
+ if imdb_id:
142
+ search_string = get_localized_title(shared_state, imdb_id, 'en') or ''
143
+ search_string = html.unescape(search_string)
144
+ if not search_string:
145
+ info(f"Could not extract title from IMDb-ID {imdb_id}")
146
+ return releases
147
+
148
+ # Build the list of URLs to search. For tv-shows also search the "foreign" section.
149
+ q = quote_plus(search_string)
150
+ urls = [f'https://{sl}/{feed_type}/?s={q}']
151
+ if feed_type == "tv-shows":
152
+ urls.append(f'https://{sl}/foreign/?s={q}')
153
+
154
+ headers = {"User-Agent": shared_state.values['user_agent']}
155
+
156
+ # Fetch pages in parallel (so we don't double the slow site latency)
157
+ def fetch(url):
158
+ try:
159
+ debug(f"Fetching {url} ({hostname})")
160
+ r = requests.get(url, headers=headers, timeout=10)
161
+ r.raise_for_status()
162
+ return r.text
163
+ except Exception as e:
164
+ info(f"Error fetching {hostname} url {url}: {e}")
165
+ return ''
166
+
167
+ html_texts = []
168
+ with ThreadPoolExecutor(max_workers=len(urls)) as tpe:
169
+ futures = {tpe.submit(fetch, u): u for u in urls}
170
+ for future in as_completed(futures):
171
+ try:
172
+ html_texts.append(future.result())
173
+ except Exception as e:
174
+ info(f"Error fetching {hostname} search page: {e}")
175
+
176
+ # Parse each result and collect unique releases (dedupe by source link)
177
+ seen_sources = set()
178
+ for html_text in html_texts:
179
+ if not html_text:
180
+ continue
181
+ try:
182
+ soup = BeautifulSoup(html_text, 'html.parser')
183
+ posts = soup.find_all('div', class_=lambda c: c and c.startswith('post-'))
184
+
185
+ for post in posts:
186
+ try:
187
+ a = post.find('h1').find('a')
188
+ title = a.get_text(strip=True)
189
+
190
+ if not shared_state.is_valid_release(title,
191
+ request_from,
192
+ search_string,
193
+ season,
194
+ episode):
195
+ continue
196
+
197
+ if 'lazylibrarian' in request_from.lower():
198
+ title = shared_state.normalize_magazine_title(title)
199
+
200
+ source = a['href']
201
+ # dedupe
202
+ if source in seen_sources:
203
+ continue
204
+ seen_sources.add(source)
205
+
206
+ # Published date
207
+ time_tag = post.find('span', {'class': 'localtime'})
208
+ published = None
209
+ if time_tag and time_tag.has_attr('data-lttime'):
210
+ published = time_tag['data-lttime']
211
+ published = published or datetime.datetime.utcnow().isoformat() + '+00:00'
212
+
213
+ size = 0
214
+ imdb_id = None
215
+
216
+ payload = urlsafe_b64encode(
217
+ f"{title}|{source}|{mirror}|0|{password}|{imdb_id}".encode('utf-8')
218
+ ).decode('utf-8')
219
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
220
+
221
+ releases.append({
222
+ "details": {
223
+ "title": title,
224
+ "hostname": hostname.lower(),
225
+ "imdb_id": imdb_id,
226
+ "link": link,
227
+ "mirror": mirror,
228
+ "size": size,
229
+ "date": published,
230
+ "source": source
231
+ },
232
+ "type": "protected"
233
+ })
234
+ except Exception as e:
235
+ info(f"Error parsing {hostname.upper()} search item: {e}")
236
+ continue
237
+ except Exception as e:
238
+ info(f"Error parsing {hostname.upper()} search HTML: {e}")
239
+ continue
240
+
241
+ except Exception as e:
242
+ info(f"Error loading {hostname.upper()} search page: {e}")
243
+
244
+ elapsed = time.time() - start_time
245
+ debug(f"Search time: {elapsed:.2f}s ({hostname})")
246
+ return releases
@@ -0,0 +1,208 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import re
7
+ import time
8
+ from base64 import urlsafe_b64encode
9
+ from datetime import datetime, timedelta
10
+ from urllib.parse import quote, quote_plus
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+
15
+ from quasarr.providers.imdb_metadata import get_localized_title
16
+ from quasarr.providers.log import info, debug
17
+
18
+ hostname = "wd"
19
+ supported_mirrors = ["rapidgator", "ddownload", "katfile", "fikper", "turbobit"]
20
+
21
+ # regex to detect porn-tag .XXX. (case-insensitive, dots included)
22
+ XXX_REGEX = re.compile(r"\.xxx\.", re.I)
23
+ # regex to detect video resolution
24
+ RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
25
+ # regex to detect video codec tags
26
+ CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
27
+
28
+
29
+ def convert_to_rss_date(date_str):
30
+ """
31
+ date_str comes in as "02.05.2025 - 09:04"
32
+ Return RFC‑822 style date with +0000 timezone.
33
+ """
34
+ parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
35
+ return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
36
+
37
+
38
+ def extract_size(text):
39
+ """
40
+ e.g. "8 GB" → {"size": "8", "sizeunit": "GB"}
41
+ """
42
+ match = re.match(r"(\d+(?:\.\d+)?)\s*([A-Za-z]+)", text)
43
+ if not match:
44
+ raise ValueError(f"Invalid size format: {text!r}")
45
+ return {"size": match.group(1), "sizeunit": match.group(2)}
46
+
47
+
48
+ def _parse_rows(
49
+ soup,
50
+ shared_state,
51
+ url_base,
52
+ password,
53
+ mirror_filter,
54
+ request_from=None,
55
+ search_string=None,
56
+ season=None,
57
+ episode=None
58
+ ):
59
+ """
60
+ Walk the <table> rows, extract one release per row.
61
+ Only include rows with at least one supported mirror.
62
+ If mirror_filter provided, only include rows where mirror_filter is present.
63
+
64
+ Context detection:
65
+ - feed when search_string is None
66
+ - search when search_string is a str
67
+
68
+ Porn-filtering:
69
+ - feed: always drop .XXX.
70
+ - search: drop .XXX. unless 'xxx' in search_string (case-insensitive)
71
+
72
+ If in search context, also filter out non-video releases (ebooks, games).
73
+ """
74
+ releases = []
75
+ is_search = search_string is not None
76
+
77
+ one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
78
+
79
+ for tr in soup.select("table.table tbody tr.lh-sm"):
80
+ try:
81
+ a = tr.find("a", class_="upload-link")
82
+ raw_href = a["href"]
83
+ href = quote(raw_href, safe="/?:=&")
84
+ source = f"https://{url_base}{href}"
85
+
86
+ preview_div = a.find("div", class_="preview-text")
87
+ date_txt = preview_div.get_text(strip=True) if preview_div else None
88
+ if preview_div:
89
+ preview_div.extract()
90
+
91
+ title = a.get_text(strip=True)
92
+
93
+ # search context contains non-video releases (ebooks, games, etc.)
94
+ if is_search:
95
+ if not shared_state.is_valid_release(title,
96
+ request_from,
97
+ search_string,
98
+ season,
99
+ episode):
100
+ continue
101
+
102
+ if 'lazylibrarian' in request_from.lower():
103
+ # lazylibrarian can only detect specific date formats / issue numbering for magazines
104
+ title = shared_state.normalize_magazine_title(title)
105
+ else:
106
+ # drop .XXX. unless user explicitly searched xxx
107
+ if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
108
+ continue
109
+ # require resolution/codec
110
+ if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
111
+ continue
112
+ # require no spaces in title
113
+ if " " in title:
114
+ continue
115
+
116
+ hoster_names = tr.find("span", class_="button-warezkorb")["data-hoster-names"]
117
+ mirrors = [m.strip().lower() for m in hoster_names.split(",")]
118
+ valid = [m for m in mirrors if m in supported_mirrors]
119
+ if not valid or (mirror_filter and mirror_filter not in valid):
120
+ continue
121
+
122
+ size_txt = tr.find("span", class_="element-size").get_text(strip=True)
123
+ sz = extract_size(size_txt)
124
+ mb = shared_state.convert_to_mb(sz)
125
+ size_bytes = mb * 1024 * 1024
126
+
127
+ imdb_id = None
128
+ published = convert_to_rss_date(date_txt) if date_txt else one_hour_ago
129
+
130
+ payload = urlsafe_b64encode(
131
+ f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
132
+ ).decode()
133
+ download_link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
134
+
135
+ releases.append({
136
+ "details": {
137
+ "title": title,
138
+ "hostname": hostname,
139
+ "imdb_id": imdb_id,
140
+ "link": download_link,
141
+ "mirror": mirror_filter,
142
+ "size": size_bytes,
143
+ "date": published,
144
+ "source": source
145
+ },
146
+ "type": "protected"
147
+ })
148
+ except Exception as e:
149
+ debug(f"Error parsing {hostname.upper()} row: {e}")
150
+ continue
151
+ return releases
152
+
153
+
154
+ def wd_feed(shared_state, start_time, request_from, mirror=None):
155
+ wd = shared_state.values["config"]("Hostnames").get(hostname.lower())
156
+ password = wd
157
+
158
+ if "lazylibrarian" in request_from.lower():
159
+ feed_type = "Ebooks"
160
+ elif "radarr" in request_from.lower():
161
+ feed_type = "Movies"
162
+ else:
163
+ feed_type = "Serien"
164
+
165
+ url = f"https://{wd}/{feed_type}"
166
+ headers = {'User-Agent': shared_state.values["user_agent"]}
167
+ try:
168
+ response = requests.get(url, headers=headers, timeout=10).content
169
+ soup = BeautifulSoup(response, "html.parser")
170
+ releases = _parse_rows(soup, shared_state, wd, password, mirror)
171
+ except Exception as e:
172
+ info(f"Error loading {hostname.upper()} feed: {e}")
173
+ releases = []
174
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
175
+ return releases
176
+
177
+
178
+ def wd_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
179
+ releases = []
180
+ wd = shared_state.values["config"]("Hostnames").get(hostname.lower())
181
+ password = wd
182
+
183
+ imdb_id = shared_state.is_imdb_id(search_string)
184
+ if imdb_id:
185
+ search_string = get_localized_title(shared_state, imdb_id, 'de')
186
+ if not search_string:
187
+ info(f"Could not extract title from IMDb-ID {imdb_id}")
188
+ return releases
189
+ search_string = html.unescape(search_string)
190
+
191
+ q = quote_plus(search_string)
192
+ url = f"https://{wd}/search?q={q}"
193
+ headers = {'User-Agent': shared_state.values["user_agent"]}
194
+
195
+ try:
196
+ response = requests.get(url, headers=headers, timeout=10).content
197
+ soup = BeautifulSoup(response, "html.parser")
198
+ releases = _parse_rows(
199
+ soup, shared_state, wd, password, mirror,
200
+ request_from=request_from,
201
+ search_string=search_string,
202
+ season=season, episode=episode
203
+ )
204
+ except Exception as e:
205
+ info(f"Error loading {hostname.upper()} search: {e}")
206
+ releases = []
207
+ debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
208
+ return releases