quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of quasarr might be problematic. Click here for more details.

Files changed (77) hide show
  1. quasarr/__init__.py +316 -42
  2. quasarr/api/__init__.py +187 -0
  3. quasarr/api/arr/__init__.py +387 -0
  4. quasarr/api/captcha/__init__.py +1189 -0
  5. quasarr/api/config/__init__.py +23 -0
  6. quasarr/api/sponsors_helper/__init__.py +166 -0
  7. quasarr/api/statistics/__init__.py +196 -0
  8. quasarr/downloads/__init__.py +319 -256
  9. quasarr/downloads/linkcrypters/__init__.py +0 -0
  10. quasarr/downloads/linkcrypters/al.py +237 -0
  11. quasarr/downloads/linkcrypters/filecrypt.py +444 -0
  12. quasarr/downloads/linkcrypters/hide.py +123 -0
  13. quasarr/downloads/packages/__init__.py +476 -0
  14. quasarr/downloads/sources/al.py +697 -0
  15. quasarr/downloads/sources/by.py +106 -0
  16. quasarr/downloads/sources/dd.py +76 -0
  17. quasarr/downloads/sources/dj.py +7 -0
  18. quasarr/downloads/sources/dl.py +199 -0
  19. quasarr/downloads/sources/dt.py +66 -0
  20. quasarr/downloads/sources/dw.py +14 -7
  21. quasarr/downloads/sources/he.py +112 -0
  22. quasarr/downloads/sources/mb.py +47 -0
  23. quasarr/downloads/sources/nk.py +54 -0
  24. quasarr/downloads/sources/nx.py +42 -83
  25. quasarr/downloads/sources/sf.py +159 -0
  26. quasarr/downloads/sources/sj.py +7 -0
  27. quasarr/downloads/sources/sl.py +90 -0
  28. quasarr/downloads/sources/wd.py +110 -0
  29. quasarr/downloads/sources/wx.py +127 -0
  30. quasarr/providers/cloudflare.py +204 -0
  31. quasarr/providers/html_images.py +22 -0
  32. quasarr/providers/html_templates.py +211 -104
  33. quasarr/providers/imdb_metadata.py +108 -3
  34. quasarr/providers/log.py +19 -0
  35. quasarr/providers/myjd_api.py +201 -40
  36. quasarr/providers/notifications.py +99 -11
  37. quasarr/providers/obfuscated.py +65 -0
  38. quasarr/providers/sessions/__init__.py +0 -0
  39. quasarr/providers/sessions/al.py +286 -0
  40. quasarr/providers/sessions/dd.py +78 -0
  41. quasarr/providers/sessions/dl.py +175 -0
  42. quasarr/providers/sessions/nx.py +76 -0
  43. quasarr/providers/shared_state.py +656 -79
  44. quasarr/providers/statistics.py +154 -0
  45. quasarr/providers/version.py +60 -1
  46. quasarr/providers/web_server.py +1 -1
  47. quasarr/search/__init__.py +144 -15
  48. quasarr/search/sources/al.py +448 -0
  49. quasarr/search/sources/by.py +204 -0
  50. quasarr/search/sources/dd.py +135 -0
  51. quasarr/search/sources/dj.py +213 -0
  52. quasarr/search/sources/dl.py +354 -0
  53. quasarr/search/sources/dt.py +265 -0
  54. quasarr/search/sources/dw.py +94 -67
  55. quasarr/search/sources/fx.py +89 -33
  56. quasarr/search/sources/he.py +196 -0
  57. quasarr/search/sources/mb.py +195 -0
  58. quasarr/search/sources/nk.py +188 -0
  59. quasarr/search/sources/nx.py +75 -21
  60. quasarr/search/sources/sf.py +374 -0
  61. quasarr/search/sources/sj.py +213 -0
  62. quasarr/search/sources/sl.py +246 -0
  63. quasarr/search/sources/wd.py +208 -0
  64. quasarr/search/sources/wx.py +337 -0
  65. quasarr/storage/config.py +39 -10
  66. quasarr/storage/setup.py +269 -97
  67. quasarr/storage/sqlite_database.py +6 -1
  68. quasarr-1.23.0.dist-info/METADATA +306 -0
  69. quasarr-1.23.0.dist-info/RECORD +77 -0
  70. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
  71. quasarr/arr/__init__.py +0 -423
  72. quasarr/captcha_solver/__init__.py +0 -284
  73. quasarr-0.1.6.dist-info/METADATA +0 -81
  74. quasarr-0.1.6.dist-info/RECORD +0 -31
  75. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
  76. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
  77. {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,337 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Quasarr
3
+ # Project by https://github.com/rix1337
4
+
5
+ import html
6
+ import time
7
+ import traceback
8
+ import warnings
9
+ from base64 import urlsafe_b64encode
10
+ from datetime import datetime
11
+
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+ from bs4 import XMLParsedAsHTMLWarning
15
+
16
+ from quasarr.providers.imdb_metadata import get_localized_title
17
+ from quasarr.providers.log import info, debug
18
+
19
+ warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning) # we dont want to use lxml
20
+
21
+ hostname = "wx"
22
+ supported_mirrors = []
23
+
24
+
25
+ def wx_feed(shared_state, start_time, request_from, mirror=None):
26
+ """
27
+ Fetch latest releases from RSS feed.
28
+ """
29
+ releases = []
30
+ host = shared_state.values["config"]("Hostnames").get(hostname)
31
+
32
+ if "lazylibrarian" in request_from.lower():
33
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
34
+ return releases
35
+
36
+ rss_url = f'https://{host}/rss'
37
+ headers = {
38
+ 'User-Agent': shared_state.values["user_agent"],
39
+ }
40
+
41
+ try:
42
+ response = requests.get(rss_url, headers=headers, timeout=10)
43
+
44
+ if response.status_code != 200:
45
+ info(f"{hostname.upper()}: RSS feed returned status {response.status_code}")
46
+ return releases
47
+
48
+ soup = BeautifulSoup(response.content, 'html.parser')
49
+ items = soup.find_all('entry')
50
+
51
+ if not items:
52
+ items = soup.find_all('item')
53
+
54
+ if not items:
55
+ debug(f"{hostname.upper()}: No entries found in RSS feed")
56
+ return releases
57
+
58
+ debug(f"{hostname.upper()}: Found {len(items)} entries in RSS feed")
59
+
60
+ for item in items:
61
+ try:
62
+ title_tag = item.find('title')
63
+ if not title_tag:
64
+ continue
65
+
66
+ title = title_tag.get_text(strip=True)
67
+ if not title:
68
+ continue
69
+
70
+ title = html.unescape(title)
71
+ title = title.replace(']]>', '').replace('<![CDATA[', '')
72
+ title = title.replace(' ', '.')
73
+
74
+ link_tag = item.find('link', rel='alternate')
75
+ if link_tag and link_tag.has_attr('href'):
76
+ source = link_tag['href']
77
+ else:
78
+ link_tag = item.find('link')
79
+ if not link_tag:
80
+ continue
81
+ source = link_tag.get_text(strip=True)
82
+
83
+ if not source:
84
+ continue
85
+
86
+ pub_date = item.find('updated') or item.find('pubDate')
87
+ if pub_date:
88
+ published = pub_date.get_text(strip=True)
89
+ else:
90
+ # Fallback: use current time if no pubDate found
91
+ published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
92
+
93
+ mb = 0
94
+ size = 0
95
+ imdb_id = None
96
+ password = host.upper()
97
+
98
+ payload = urlsafe_b64encode(
99
+ f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id or ''}".encode("utf-8")
100
+ ).decode("utf-8")
101
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
102
+
103
+ releases.append({
104
+ "details": {
105
+ "title": title,
106
+ "hostname": hostname,
107
+ "imdb_id": imdb_id,
108
+ "link": link,
109
+ "mirror": mirror,
110
+ "size": size,
111
+ "date": published,
112
+ "source": source
113
+ },
114
+ "type": "protected"
115
+ })
116
+
117
+ except Exception as e:
118
+ debug(f"{hostname.upper()}: error parsing RSS entry: {e}")
119
+ continue
120
+
121
+ except Exception as e:
122
+ info(f"Error loading {hostname.upper()} feed: {e}")
123
+ return releases
124
+
125
+ elapsed_time = time.time() - start_time
126
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
127
+
128
+ return releases
129
+
130
+
131
+ def wx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
132
+ """
133
+ Search using internal API.
134
+ """
135
+ releases = []
136
+ host = shared_state.values["config"]("Hostnames").get(hostname)
137
+
138
+ if "lazylibrarian" in request_from.lower():
139
+ debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
140
+ return releases
141
+
142
+ imdb_id = shared_state.is_imdb_id(search_string)
143
+ if imdb_id:
144
+ info(f"{hostname.upper()}: Received IMDb ID: {imdb_id}")
145
+ title = get_localized_title(shared_state, imdb_id, 'de')
146
+ if not title:
147
+ info(f"{hostname.upper()}: no title for IMDb {imdb_id}")
148
+ return releases
149
+ info(f"{hostname.upper()}: Translated IMDb {imdb_id} to German title: '{title}'")
150
+ search_string = html.unescape(title)
151
+ else:
152
+ info(f"{hostname.upper()}: Using search string directly: '{search_string}'")
153
+
154
+ api_url = f'https://api.{host}/start/search'
155
+
156
+ headers = {
157
+ 'User-Agent': shared_state.values["user_agent"],
158
+ 'Accept': 'application/json, text/plain, */*',
159
+ 'Referer': f'https://{host}/search'
160
+ }
161
+
162
+ params = {
163
+ '__LOAD_P': '',
164
+ 'per_page': 50,
165
+ 'q': search_string,
166
+ 'selectedTypes': '',
167
+ 'selectedGenres': '',
168
+ 'types': 'movie,series,anime',
169
+ 'genres': '',
170
+ 'years': '',
171
+ 'ratings': '',
172
+ 'page': 1,
173
+ 'sortBy': 'latest',
174
+ 'sortOrder': 'desc'
175
+ }
176
+
177
+ if "sonarr" in request_from.lower():
178
+ params['types'] = 'series,anime'
179
+ elif "radarr" in request_from.lower():
180
+ params['types'] = 'movie'
181
+
182
+ info(f"{hostname.upper()}: Searching: '{search_string}'")
183
+
184
+ try:
185
+ response = requests.get(api_url, headers=headers, params=params, timeout=10)
186
+
187
+ if response.status_code != 200:
188
+ info(f"{hostname.upper()}: Search API returned status {response.status_code}")
189
+ return releases
190
+
191
+ data = response.json()
192
+
193
+ if 'items' in data and 'data' in data['items']:
194
+ items = data['items']['data']
195
+ elif 'data' in data:
196
+ items = data['data']
197
+ elif 'results' in data:
198
+ items = data['results']
199
+ else:
200
+ items = data if isinstance(data, list) else []
201
+
202
+ info(f"{hostname.upper()}: Found {len(items)} items in search results")
203
+
204
+ for item in items:
205
+ try:
206
+ uid = item.get('uid')
207
+ if not uid:
208
+ debug(f"{hostname.upper()}: Item has no UID, skipping")
209
+ continue
210
+
211
+ info(f"{hostname.upper()}: Fetching details for UID: {uid}")
212
+
213
+ detail_url = f'https://api.{host}/start/d/{uid}'
214
+ detail_response = requests.get(detail_url, headers=headers, timeout=10)
215
+
216
+ if detail_response.status_code != 200:
217
+ debug(f"{hostname.upper()}: Detail API returned {detail_response.status_code} for {uid}")
218
+ continue
219
+
220
+ detail_data = detail_response.json()
221
+
222
+ if 'item' in detail_data:
223
+ detail_item = detail_data['item']
224
+ else:
225
+ detail_item = detail_data
226
+
227
+ item_imdb_id = imdb_id
228
+ if not item_imdb_id:
229
+ item_imdb_id = detail_item.get('imdb_id') or detail_item.get('imdbid')
230
+ if not item_imdb_id and 'options' in detail_item:
231
+ item_imdb_id = detail_item['options'].get('imdb_id')
232
+
233
+ source = f"https://{host}/detail/{uid}"
234
+
235
+ main_title = detail_item.get('fulltitle') or detail_item.get('title') or detail_item.get('name')
236
+ if main_title:
237
+ title = html.unescape(main_title)
238
+ title = title.replace(' ', '.')
239
+
240
+ if shared_state.is_valid_release(title, request_from, search_string, season, episode):
241
+ published = detail_item.get('updated_at') or detail_item.get('created_at')
242
+ if not published:
243
+ published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
244
+ password = f"www.{host}"
245
+
246
+ payload = urlsafe_b64encode(
247
+ f"{title}|{source}|{mirror}|0|{password}|{item_imdb_id or ''}".encode("utf-8")
248
+ ).decode("utf-8")
249
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
250
+
251
+ releases.append({
252
+ "details": {
253
+ "title": title,
254
+ "hostname": hostname,
255
+ "imdb_id": item_imdb_id,
256
+ "link": link,
257
+ "mirror": mirror,
258
+ "size": 0,
259
+ "date": published,
260
+ "source": source
261
+ },
262
+ "type": "protected"
263
+ })
264
+
265
+ if 'releases' in detail_item and isinstance(detail_item['releases'], list):
266
+ info(f"{hostname.upper()}: Found {len(detail_item['releases'])} releases for {uid}")
267
+
268
+ for release in detail_item['releases']:
269
+ try:
270
+ release_title = release.get('fulltitle')
271
+ if not release_title:
272
+ continue
273
+
274
+ release_title = html.unescape(release_title)
275
+ release_title = release_title.replace(' ', '.')
276
+
277
+ if not shared_state.is_valid_release(release_title, request_from, search_string, season,
278
+ episode):
279
+ debug(f"{hostname.upper()}: ✗ Release filtered out: {release_title}")
280
+ continue
281
+
282
+ release_uid = release.get('uid')
283
+ if release_uid:
284
+ release_source = f"https://{host}/detail/{uid}?release={release_uid}"
285
+ else:
286
+ release_source = source
287
+
288
+ release_published = release.get('updated_at') or release.get(
289
+ 'created_at') or detail_item.get('updated_at')
290
+ if not release_published:
291
+ release_published = datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
292
+ release_size = release.get('size', 0)
293
+ password = f"www.{host}"
294
+
295
+ payload = urlsafe_b64encode(
296
+ f"{release_title}|{release_source}|{mirror}|{release_size}|{password}|{item_imdb_id or ''}".encode(
297
+ "utf-8")
298
+ ).decode("utf-8")
299
+ link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
300
+
301
+ releases.append({
302
+ "details": {
303
+ "title": release_title,
304
+ "hostname": hostname,
305
+ "imdb_id": item_imdb_id,
306
+ "link": link,
307
+ "mirror": mirror,
308
+ "size": release_size,
309
+ "date": release_published,
310
+ "source": release_source
311
+ },
312
+ "type": "protected"
313
+ })
314
+
315
+ except Exception as e:
316
+ debug(f"{hostname.upper()}: Error parsing release: {e}")
317
+ continue
318
+ else:
319
+ debug(f"{hostname.upper()}: No releases array found for {uid}")
320
+
321
+ except Exception as e:
322
+ debug(f"{hostname.upper()}: Error processing item: {e}")
323
+ debug(f"{hostname.upper()}: {traceback.format_exc()}")
324
+ continue
325
+
326
+ info(f"{hostname.upper()}: Returning {len(releases)} total releases")
327
+
328
+ except Exception as e:
329
+ info(f"Error in {hostname.upper()} search: {e}")
330
+
331
+ debug(f"{hostname.upper()}: {traceback.format_exc()}")
332
+ return releases
333
+
334
+ elapsed_time = time.time() - start_time
335
+ debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
336
+
337
+ return releases
quasarr/storage/config.py CHANGED
@@ -11,24 +11,53 @@ from Cryptodome.Cipher import AES
11
11
  from Cryptodome.Random import get_random_bytes
12
12
  from Cryptodome.Util.Padding import pad
13
13
 
14
- from quasarr.storage.sqlite_database import DataBase
15
14
  from quasarr.providers import shared_state
15
+ from quasarr.storage.sqlite_database import DataBase
16
16
 
17
17
 
18
18
  class Config(object):
19
19
  _DEFAULT_CONFIG = {
20
+ 'API': [
21
+ ("key", "secret", ""),
22
+ ],
20
23
  'JDownloader': [
21
24
  ("user", "secret", ""),
22
25
  ("password", "secret", ""),
23
26
  ("device", "str", ""),
24
27
  ],
25
28
  'Hostnames': [
26
- ("fx", "secret", ""),
27
- ("sf", "secret", ""),
29
+ ("al", "secret", ""),
30
+ ("by", "secret", ""),
31
+ ("dd", "secret", ""),
32
+ ("dj", "secret", ""),
33
+ ("dl", "secret", ""),
34
+ ("dt", "secret", ""),
28
35
  ("dw", "secret", ""),
29
- ("ff", "secret", ""),
36
+ ("fx", "secret", ""),
37
+ ("he", "secret", ""),
38
+ ("mb", "secret", ""),
39
+ ("nk", "secret", ""),
30
40
  ("nx", "secret", ""),
31
- ("dd", "secret", "")
41
+ ("sf", "secret", ""),
42
+ ("sj", "secret", ""),
43
+ ("sl", "secret", ""),
44
+ ("wd", "secret", ""),
45
+ ("wx", "secret", "")
46
+ ],
47
+ 'FlareSolverr': [
48
+ ("url", "str", ""),
49
+ ],
50
+ 'AL': [
51
+ ("user", "secret", ""),
52
+ ("password", "secret", "")
53
+ ],
54
+ 'DD': [
55
+ ("user", "secret", ""),
56
+ ("password", "secret", "")
57
+ ],
58
+ 'DL': [
59
+ ("user", "secret", ""),
60
+ ("password", "secret", "")
32
61
  ],
33
62
  'NX': [
34
63
  ("user", "secret", ""),
@@ -47,10 +76,10 @@ class Config(object):
47
76
  self._section) or self._set_default_config(self._section)
48
77
  self.__config__ = self._read_config(self._section)
49
78
  except configparser.DuplicateSectionError:
50
- print('Doppelte Sektion in der Konfigurationsdatei.')
79
+ print('Duplicate Section in Config File')
51
80
  raise
52
- except:
53
- print('Ein unbekannter Fehler in der Konfigurationsdatei ist aufgetreten.')
81
+ except Exception as e:
82
+ print(f'Unknown error while reading config file: {e}')
54
83
  raise
55
84
 
56
85
  def _set_default_config(self, section):
@@ -124,11 +153,11 @@ def get_clean_hostnames(shared_state):
124
153
  if strg and '/' in strg:
125
154
  strg = strg.replace('https://', '').replace('http://', '')
126
155
  strg = re.findall(r'([a-z-.]*\.[a-z]*)', strg)[0]
127
- hostnames.save(host, string)
156
+ hostnames.save(host, strg)
128
157
  if strg and re.match(r'.*[A-Z].*', strg):
129
158
  hostnames.save(host, strg.lower())
130
159
  if strg:
131
- print(f'{host.upper()}: "{strg}"')
160
+ print(f'Using "{strg}" as hostname for "{host}"')
132
161
  return strg
133
162
 
134
163
  for name in shared_state.values["sites"]: