quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +316 -42
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +387 -0
- quasarr/api/captcha/__init__.py +1189 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +319 -256
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +476 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dl.py +199 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +14 -7
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +54 -0
- quasarr/downloads/sources/nx.py +42 -83
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/downloads/sources/wx.py +127 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +22 -0
- quasarr/providers/html_templates.py +211 -104
- quasarr/providers/imdb_metadata.py +108 -3
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +201 -40
- quasarr/providers/notifications.py +99 -11
- quasarr/providers/obfuscated.py +65 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/dl.py +175 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +656 -79
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +60 -1
- quasarr/providers/web_server.py +1 -1
- quasarr/search/__init__.py +144 -15
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +204 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dl.py +354 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +94 -67
- quasarr/search/sources/fx.py +89 -33
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +75 -21
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/search/sources/wx.py +337 -0
- quasarr/storage/config.py +39 -10
- quasarr/storage/setup.py +269 -97
- quasarr/storage/sqlite_database.py +6 -1
- quasarr-1.23.0.dist-info/METADATA +306 -0
- quasarr-1.23.0.dist-info/RECORD +77 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
- quasarr/arr/__init__.py +0 -423
- quasarr/captcha_solver/__init__.py +0 -284
- quasarr-0.1.6.dist-info/METADATA +0 -81
- quasarr-0.1.6.dist-info/RECORD +0 -31
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
quasarr/search/sources/fx.py
CHANGED
|
@@ -3,14 +3,20 @@
|
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
5
|
import re
|
|
6
|
+
import time
|
|
6
7
|
from base64 import urlsafe_b64encode
|
|
7
8
|
|
|
8
9
|
import requests
|
|
9
10
|
from bs4 import BeautifulSoup
|
|
10
11
|
|
|
12
|
+
from quasarr.providers.log import info, debug
|
|
13
|
+
|
|
14
|
+
hostname = "fx"
|
|
15
|
+
supported_mirrors = ["rapidgator"]
|
|
16
|
+
|
|
11
17
|
|
|
12
18
|
def extract_size(text):
|
|
13
|
-
match = re.match(r"(\d+)([A-Za-z]+)", text)
|
|
19
|
+
match = re.match(r"(\d+)\s*([A-Za-z]+)", text)
|
|
14
20
|
if match:
|
|
15
21
|
size = match.group(1)
|
|
16
22
|
unit = match.group(2)
|
|
@@ -19,10 +25,20 @@ def extract_size(text):
|
|
|
19
25
|
raise ValueError(f"Invalid size format: {text}")
|
|
20
26
|
|
|
21
27
|
|
|
22
|
-
def fx_feed(shared_state):
|
|
28
|
+
def fx_feed(shared_state, start_time, request_from, mirror=None):
|
|
23
29
|
releases = []
|
|
24
30
|
|
|
25
|
-
fx = shared_state.values["config"]("Hostnames").get(
|
|
31
|
+
fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
32
|
+
|
|
33
|
+
if not "arr" in request_from.lower():
|
|
34
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
35
|
+
return releases
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
if mirror and mirror not in supported_mirrors:
|
|
39
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
40
|
+
' Skipping search!')
|
|
41
|
+
return releases
|
|
26
42
|
|
|
27
43
|
password = fx.split(".")[0]
|
|
28
44
|
url = f'https://{fx}/'
|
|
@@ -31,11 +47,11 @@ def fx_feed(shared_state):
|
|
|
31
47
|
}
|
|
32
48
|
|
|
33
49
|
try:
|
|
34
|
-
request = requests.get(url, headers=headers).content
|
|
50
|
+
request = requests.get(url, headers=headers, timeout=10).content
|
|
35
51
|
feed = BeautifulSoup(request, "html.parser")
|
|
36
|
-
items = feed.
|
|
52
|
+
items = feed.find_all("article")
|
|
37
53
|
except Exception as e:
|
|
38
|
-
|
|
54
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
39
55
|
return releases
|
|
40
56
|
|
|
41
57
|
if items:
|
|
@@ -44,28 +60,34 @@ def fx_feed(shared_state):
|
|
|
44
60
|
article = BeautifulSoup(str(item), "html.parser")
|
|
45
61
|
try:
|
|
46
62
|
source = article.find('h2', class_='entry-title').a["href"]
|
|
47
|
-
titles = article.
|
|
63
|
+
titles = article.find_all("a", href=re.compile("(filecrypt|safe." + fx + ")"))
|
|
48
64
|
except:
|
|
49
65
|
continue
|
|
50
66
|
i = 0
|
|
51
67
|
for title in titles:
|
|
52
68
|
link = title["href"]
|
|
53
|
-
title = (title.text
|
|
54
|
-
|
|
69
|
+
title = shared_state.sanitize_title(title.text)
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
|
|
73
|
+
imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
|
|
74
|
+
except:
|
|
75
|
+
imdb_id = None
|
|
55
76
|
|
|
56
77
|
try:
|
|
57
|
-
size_info = article.
|
|
78
|
+
size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
|
|
58
79
|
i].next.next.text.replace("|", "").strip()
|
|
59
80
|
size_item = extract_size(size_info)
|
|
60
81
|
mb = shared_state.convert_to_mb(size_item)
|
|
61
82
|
size = mb * 1024 * 1024
|
|
62
|
-
payload = urlsafe_b64encode(
|
|
83
|
+
payload = urlsafe_b64encode(
|
|
84
|
+
f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
|
|
63
85
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
64
86
|
except:
|
|
65
87
|
continue
|
|
66
88
|
|
|
67
89
|
try:
|
|
68
|
-
dates = article.
|
|
90
|
+
dates = article.find_all("time")
|
|
69
91
|
for date in dates:
|
|
70
92
|
published = date["datetime"]
|
|
71
93
|
except:
|
|
@@ -73,8 +95,11 @@ def fx_feed(shared_state):
|
|
|
73
95
|
|
|
74
96
|
releases.append({
|
|
75
97
|
"details": {
|
|
76
|
-
"title":
|
|
98
|
+
"title": title,
|
|
99
|
+
"hostname": hostname.lower(),
|
|
100
|
+
"imdb_id": imdb_id,
|
|
77
101
|
"link": link,
|
|
102
|
+
"mirror": mirror,
|
|
78
103
|
"size": size,
|
|
79
104
|
"date": published,
|
|
80
105
|
"source": source
|
|
@@ -83,68 +108,93 @@ def fx_feed(shared_state):
|
|
|
83
108
|
})
|
|
84
109
|
|
|
85
110
|
except Exception as e:
|
|
86
|
-
|
|
111
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
112
|
+
|
|
113
|
+
elapsed_time = time.time() - start_time
|
|
114
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
87
115
|
|
|
88
116
|
return releases
|
|
89
117
|
|
|
90
118
|
|
|
91
|
-
def fx_search(shared_state,
|
|
119
|
+
def fx_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
92
120
|
releases = []
|
|
121
|
+
fx = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
122
|
+
password = fx.split(".")[0]
|
|
93
123
|
|
|
94
|
-
|
|
124
|
+
if not "arr" in request_from.lower():
|
|
125
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
126
|
+
return releases
|
|
95
127
|
|
|
96
|
-
|
|
97
|
-
|
|
128
|
+
|
|
129
|
+
if mirror and mirror not in supported_mirrors:
|
|
130
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
131
|
+
' Skipping search!')
|
|
132
|
+
return releases
|
|
133
|
+
|
|
134
|
+
url = f'https://{fx}/?s={search_string}'
|
|
98
135
|
headers = {
|
|
99
136
|
'User-Agent': shared_state.values["user_agent"],
|
|
100
137
|
}
|
|
101
138
|
|
|
102
139
|
try:
|
|
103
|
-
request = requests.get(url, headers=headers).content
|
|
140
|
+
request = requests.get(url, headers=headers, timeout=10).content
|
|
104
141
|
search = BeautifulSoup(request, "html.parser")
|
|
105
142
|
results = search.find('h2', class_='entry-title')
|
|
106
143
|
|
|
107
144
|
except Exception as e:
|
|
108
|
-
|
|
145
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
109
146
|
return releases
|
|
110
147
|
|
|
111
148
|
if results:
|
|
112
149
|
for result in results:
|
|
113
|
-
result_source = result["href"]
|
|
114
150
|
try:
|
|
115
|
-
|
|
151
|
+
result_source = result["href"]
|
|
152
|
+
request = requests.get(result_source, headers=headers, timeout=10).content
|
|
116
153
|
feed = BeautifulSoup(request, "html.parser")
|
|
117
|
-
items = feed.
|
|
154
|
+
items = feed.find_all("article")
|
|
118
155
|
except Exception as e:
|
|
119
|
-
|
|
156
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
120
157
|
return releases
|
|
121
158
|
|
|
122
159
|
for item in items:
|
|
123
160
|
try:
|
|
124
161
|
article = BeautifulSoup(str(item), "html.parser")
|
|
125
162
|
try:
|
|
126
|
-
titles = article.
|
|
163
|
+
titles = article.find_all("a", href=re.compile(r"filecrypt\."))
|
|
127
164
|
except:
|
|
128
165
|
continue
|
|
129
166
|
i = 0
|
|
130
167
|
for title in titles:
|
|
131
168
|
link = title["href"]
|
|
132
|
-
title = (title.text
|
|
133
|
-
|
|
169
|
+
title = shared_state.sanitize_title(title.text)
|
|
170
|
+
|
|
171
|
+
if not shared_state.is_valid_release(title,
|
|
172
|
+
request_from,
|
|
173
|
+
search_string,
|
|
174
|
+
season,
|
|
175
|
+
episode):
|
|
176
|
+
continue
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
imdb_link = article.find("a", href=re.compile(r"imdb\.com"))
|
|
180
|
+
imdb_id = re.search(r'tt\d+', str(imdb_link)).group()
|
|
181
|
+
except:
|
|
182
|
+
imdb_id = None
|
|
183
|
+
|
|
134
184
|
try:
|
|
135
|
-
size_info = article.
|
|
185
|
+
size_info = article.find_all("strong", text=re.compile(r"(size|größe)", re.IGNORECASE))[
|
|
136
186
|
i].next.next.text.replace("|", "").strip()
|
|
137
187
|
size_item = extract_size(size_info)
|
|
138
188
|
mb = shared_state.convert_to_mb(size_item)
|
|
139
189
|
size = mb * 1024 * 1024
|
|
140
|
-
payload = urlsafe_b64encode(
|
|
141
|
-
"utf-8")
|
|
190
|
+
payload = urlsafe_b64encode(
|
|
191
|
+
f"{title}|{link}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
|
|
142
192
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
143
193
|
except:
|
|
144
194
|
continue
|
|
145
195
|
|
|
146
196
|
try:
|
|
147
|
-
dates = article.
|
|
197
|
+
dates = article.find_all("time")
|
|
148
198
|
for date in dates:
|
|
149
199
|
published = date["datetime"]
|
|
150
200
|
except:
|
|
@@ -152,8 +202,11 @@ def fx_search(shared_state, imdb_id):
|
|
|
152
202
|
|
|
153
203
|
releases.append({
|
|
154
204
|
"details": {
|
|
155
|
-
"title":
|
|
205
|
+
"title": title,
|
|
206
|
+
"hostname": hostname.lower(),
|
|
207
|
+
"imdb_id": imdb_id,
|
|
156
208
|
"link": link,
|
|
209
|
+
"mirror": mirror,
|
|
157
210
|
"size": size,
|
|
158
211
|
"date": published,
|
|
159
212
|
"source": result_source
|
|
@@ -162,6 +215,9 @@ def fx_search(shared_state, imdb_id):
|
|
|
162
215
|
})
|
|
163
216
|
|
|
164
217
|
except Exception as e:
|
|
165
|
-
|
|
218
|
+
info(f"Error parsing {hostname.upper()} search: {e}")
|
|
219
|
+
|
|
220
|
+
elapsed_time = time.time() - start_time
|
|
221
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
166
222
|
|
|
167
223
|
return releases
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from html import unescape
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
from bs4 import BeautifulSoup
|
|
13
|
+
|
|
14
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
+
from quasarr.providers.log import info, debug
|
|
16
|
+
|
|
17
|
+
hostname = "he"
|
|
18
|
+
supported_mirrors = ["rapidgator", "nitroflare"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def parse_posted_ago(txt):
|
|
22
|
+
try:
|
|
23
|
+
m = re.search(r"(\d+)\s*(sec|min|hour|day|week|month|year)s?", txt, re.IGNORECASE)
|
|
24
|
+
if not m:
|
|
25
|
+
return ''
|
|
26
|
+
value = int(m.group(1))
|
|
27
|
+
unit = m.group(2).lower()
|
|
28
|
+
now = datetime.utcnow()
|
|
29
|
+
if unit.startswith('sec'):
|
|
30
|
+
delta = timedelta(seconds=value)
|
|
31
|
+
elif unit.startswith('min'):
|
|
32
|
+
delta = timedelta(minutes=value)
|
|
33
|
+
elif unit.startswith('hour'):
|
|
34
|
+
delta = timedelta(hours=value)
|
|
35
|
+
elif unit.startswith('day'):
|
|
36
|
+
delta = timedelta(days=value)
|
|
37
|
+
elif unit.startswith('week'):
|
|
38
|
+
delta = timedelta(weeks=value)
|
|
39
|
+
elif unit.startswith('month'):
|
|
40
|
+
delta = timedelta(days=30 * value)
|
|
41
|
+
else:
|
|
42
|
+
delta = timedelta(days=365 * value)
|
|
43
|
+
return (datetime.utcnow() - delta).strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
44
|
+
except Exception:
|
|
45
|
+
return ''
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def extract_size(text: str) -> dict:
|
|
49
|
+
match = re.search(r"(\d+(?:[\.,]\d+)?)\s*([A-Za-z]+)", text)
|
|
50
|
+
if match:
|
|
51
|
+
size = match.group(1).replace(',', '.')
|
|
52
|
+
unit = match.group(2)
|
|
53
|
+
return {"size": size, "sizeunit": unit}
|
|
54
|
+
return {"size": "0", "sizeunit": "MB"}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def he_feed(*args, **kwargs):
|
|
58
|
+
return he_search(*args, **kwargs)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def he_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
|
|
62
|
+
releases = []
|
|
63
|
+
host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
64
|
+
|
|
65
|
+
if not "arr" in request_from.lower():
|
|
66
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
67
|
+
return releases
|
|
68
|
+
|
|
69
|
+
if "radarr" in request_from.lower():
|
|
70
|
+
tag = "movies"
|
|
71
|
+
else:
|
|
72
|
+
tag = "tv-shows"
|
|
73
|
+
|
|
74
|
+
if mirror and mirror not in supported_mirrors:
|
|
75
|
+
debug(f'Mirror "{mirror}" not supported by {hostname}.')
|
|
76
|
+
return releases
|
|
77
|
+
|
|
78
|
+
source_search = ""
|
|
79
|
+
if search_string != "":
|
|
80
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
81
|
+
if imdb_id:
|
|
82
|
+
local_title = get_localized_title(shared_state, imdb_id, 'en')
|
|
83
|
+
if not local_title:
|
|
84
|
+
info(f"{hostname}: no title for IMDb {imdb_id}")
|
|
85
|
+
return releases
|
|
86
|
+
source_search = local_title
|
|
87
|
+
else:
|
|
88
|
+
return releases
|
|
89
|
+
source_search = unescape(source_search)
|
|
90
|
+
else:
|
|
91
|
+
imdb_id = None
|
|
92
|
+
|
|
93
|
+
url = f'https://{host}/tag/{tag}/'
|
|
94
|
+
|
|
95
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
96
|
+
params = {"s": source_search}
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
r = requests.get(url, headers=headers, params=params, timeout=10)
|
|
100
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
101
|
+
results = soup.find_all('div', class_='item')
|
|
102
|
+
except Exception as e:
|
|
103
|
+
info(f"{hostname}: search load error: {e}")
|
|
104
|
+
return releases
|
|
105
|
+
|
|
106
|
+
if not results:
|
|
107
|
+
return releases
|
|
108
|
+
|
|
109
|
+
for result in results:
|
|
110
|
+
try:
|
|
111
|
+
data = result.find('div', class_='data')
|
|
112
|
+
if not data:
|
|
113
|
+
continue
|
|
114
|
+
|
|
115
|
+
headline = data.find('h5')
|
|
116
|
+
if not headline:
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
a = headline.find('a', href=True)
|
|
120
|
+
if not a:
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
source = a['href'].strip()
|
|
124
|
+
|
|
125
|
+
head_title = a.get_text(strip=True)
|
|
126
|
+
if not head_title:
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
head_split = head_title.split(" – ")
|
|
130
|
+
title = head_split[0].strip()
|
|
131
|
+
|
|
132
|
+
if not shared_state.is_valid_release(title, request_from, search_string, season, episode):
|
|
133
|
+
continue
|
|
134
|
+
|
|
135
|
+
size_item = extract_size(head_split[1].strip())
|
|
136
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
137
|
+
|
|
138
|
+
size = mb * 1024 * 1024
|
|
139
|
+
|
|
140
|
+
published = None
|
|
141
|
+
p_meta = data.find('p', class_='meta')
|
|
142
|
+
if p_meta:
|
|
143
|
+
posted_span = None
|
|
144
|
+
for sp in p_meta.find_all('span'):
|
|
145
|
+
txt = sp.get_text(' ', strip=True)
|
|
146
|
+
if txt.lower().startswith('posted') or 'ago' in txt.lower():
|
|
147
|
+
posted_span = txt
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
if posted_span:
|
|
151
|
+
published = parse_posted_ago(posted_span)
|
|
152
|
+
|
|
153
|
+
if published is None:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
release_imdb_id = None
|
|
157
|
+
try:
|
|
158
|
+
r = requests.get(source, headers=headers, timeout=10)
|
|
159
|
+
soup = BeautifulSoup(r.content, 'html.parser')
|
|
160
|
+
imdb_link = soup.find('a', href=re.compile(r"imdb\.com/title/tt\d+", re.IGNORECASE))
|
|
161
|
+
if imdb_link:
|
|
162
|
+
release_imdb_id = re.search(r'tt\d+', imdb_link['href']).group()
|
|
163
|
+
if imdb_id and release_imdb_id != imdb_id:
|
|
164
|
+
debug(f"{hostname}: IMDb ID mismatch: expected {imdb_id}, found {release_imdb_id}")
|
|
165
|
+
continue
|
|
166
|
+
else:
|
|
167
|
+
debug(f"{hostname}: imdb link not found for title {title}")
|
|
168
|
+
except Exception as e:
|
|
169
|
+
debug(f"{hostname}: failed to determine imdb_id for title {title}")
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
password = None
|
|
173
|
+
payload = urlsafe_b64encode(
|
|
174
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{release_imdb_id}".encode("utf-8")).decode()
|
|
175
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
176
|
+
|
|
177
|
+
releases.append({
|
|
178
|
+
"details": {
|
|
179
|
+
"title": title,
|
|
180
|
+
"hostname": hostname,
|
|
181
|
+
"imdb_id": release_imdb_id,
|
|
182
|
+
"link": link,
|
|
183
|
+
"mirror": mirror,
|
|
184
|
+
"size": size,
|
|
185
|
+
"date": published,
|
|
186
|
+
"source": source
|
|
187
|
+
},
|
|
188
|
+
"type": "protected"
|
|
189
|
+
})
|
|
190
|
+
except Exception as e:
|
|
191
|
+
debug(f"{hostname}: error parsing search result: {e}")
|
|
192
|
+
continue
|
|
193
|
+
|
|
194
|
+
elapsed = time.time() - start_time
|
|
195
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
196
|
+
return releases
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from base64 import urlsafe_b64encode
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
from urllib.parse import quote_plus
|
|
11
|
+
|
|
12
|
+
import requests
|
|
13
|
+
from bs4 import BeautifulSoup
|
|
14
|
+
|
|
15
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
16
|
+
from quasarr.providers.log import info, debug
|
|
17
|
+
|
|
18
|
+
hostname = "mb"
|
|
19
|
+
supported_mirrors = ["rapidgator", "ddownload"]
|
|
20
|
+
XXX_REGEX = re.compile(r"\.xxx\.", re.I)
|
|
21
|
+
RESOLUTION_REGEX = re.compile(r"\d{3,4}p", re.I)
|
|
22
|
+
CODEC_REGEX = re.compile(r"x264|x265|h264|h265|hevc|avc", re.I)
|
|
23
|
+
IMDB_REGEX = re.compile(r"imdb\.com/title/(tt\d+)")
|
|
24
|
+
|
|
25
|
+
# map German month names to numbers
|
|
26
|
+
GERMAN_MONTHS = {
|
|
27
|
+
'Januar': '01', 'Februar': '02', 'März': '03', 'April': '04', 'Mai': '05', 'Juni': '06',
|
|
28
|
+
'Juli': '07', 'August': '08', 'September': '09', 'Oktober': '10', 'November': '11', 'Dezember': '12'
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def convert_to_rss_date(date_str):
|
|
33
|
+
parsed = datetime.strptime(date_str, "%d.%m.%Y - %H:%M")
|
|
34
|
+
return parsed.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def extract_size(text):
|
|
38
|
+
m = re.match(r"(\d+(?:\.\d+)?)\s*([A-Za-z]+)", text)
|
|
39
|
+
if not m:
|
|
40
|
+
raise ValueError(f"Invalid size format: {text!r}")
|
|
41
|
+
return {"size": m.group(1), "sizeunit": m.group(2)}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _parse_posts(soup, shared_state, password, mirror_filter,
|
|
45
|
+
is_search=False, request_from=None, search_string=None,
|
|
46
|
+
season=None, episode=None):
|
|
47
|
+
releases = []
|
|
48
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
|
|
49
|
+
|
|
50
|
+
for post in soup.select("div.post"):
|
|
51
|
+
try:
|
|
52
|
+
# title & source
|
|
53
|
+
h1 = post.find("h1")
|
|
54
|
+
a = h1.find("a")
|
|
55
|
+
source = a["href"].strip()
|
|
56
|
+
title = a.get_text(strip=True)
|
|
57
|
+
|
|
58
|
+
# parse date
|
|
59
|
+
date_p = post.find("p", class_="date_x")
|
|
60
|
+
date_txt = date_p.get_text(strip=True) if date_p else None
|
|
61
|
+
published = one_hour_ago
|
|
62
|
+
if date_txt:
|
|
63
|
+
m_date = re.search(r'(?:\w+, )?(\d{1,2})\.\s*(\w+)\s+(\d{4})\s+(\d{2}:\d{2})', date_txt)
|
|
64
|
+
if m_date:
|
|
65
|
+
day, mon_name, year, hm = m_date.groups()
|
|
66
|
+
mon = GERMAN_MONTHS.get(mon_name, '01')
|
|
67
|
+
dt_obj = datetime.strptime(f"{day}.{mon}.{year} {hm}", "%d.%m.%Y %H:%M")
|
|
68
|
+
published = dt_obj.strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
69
|
+
|
|
70
|
+
if is_search:
|
|
71
|
+
if not shared_state.is_valid_release(title,
|
|
72
|
+
request_from,
|
|
73
|
+
search_string,
|
|
74
|
+
season,
|
|
75
|
+
episode):
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
# drop .XXX. unless user explicitly searched xxx
|
|
79
|
+
if XXX_REGEX.search(title) and 'xxx' not in search_string.lower():
|
|
80
|
+
continue
|
|
81
|
+
# require resolution/codec
|
|
82
|
+
if not (RESOLUTION_REGEX.search(title) or CODEC_REGEX.search(title)):
|
|
83
|
+
continue
|
|
84
|
+
# require no spaces in title
|
|
85
|
+
if " " in title:
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
# can't check for mirrors in search context
|
|
89
|
+
if mirror_filter and mirror_filter not in supported_mirrors:
|
|
90
|
+
continue
|
|
91
|
+
else:
|
|
92
|
+
mirror_candidates = []
|
|
93
|
+
for strong in post.find_all('strong', string=re.compile(r'^Download', re.I)):
|
|
94
|
+
link_tag = strong.find_next_sibling('a')
|
|
95
|
+
if link_tag and link_tag.get_text(strip=True):
|
|
96
|
+
host = link_tag.get_text(strip=True).split('.')[0].lower()
|
|
97
|
+
mirror_candidates.append(host)
|
|
98
|
+
valid = [m for m in mirror_candidates if m in supported_mirrors]
|
|
99
|
+
if not valid or (mirror_filter and mirror_filter not in valid):
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
# extract IMDb ID
|
|
103
|
+
imdb_id = None
|
|
104
|
+
for tag in post.find_all('a', href=True):
|
|
105
|
+
m = IMDB_REGEX.search(tag['href'])
|
|
106
|
+
if m:
|
|
107
|
+
imdb_id = m.group(1)
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
# size extraction
|
|
111
|
+
mb = size_bytes = 0
|
|
112
|
+
size_match = re.search(r"Größe:\s*([\d\.]+)\s*([GMK]B)", post.get_text())
|
|
113
|
+
if size_match:
|
|
114
|
+
sz = {"size": size_match.group(1), "sizeunit": size_match.group(2)}
|
|
115
|
+
mb = shared_state.convert_to_mb(sz)
|
|
116
|
+
size_bytes = mb * 1024 * 1024
|
|
117
|
+
|
|
118
|
+
payload = urlsafe_b64encode(
|
|
119
|
+
f"{title}|{source}|{mirror_filter}|{mb}|{password}|{imdb_id}".encode()
|
|
120
|
+
).decode()
|
|
121
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
122
|
+
|
|
123
|
+
releases.append({
|
|
124
|
+
"details": {
|
|
125
|
+
"title": title,
|
|
126
|
+
"hostname": hostname,
|
|
127
|
+
"imdb_id": imdb_id,
|
|
128
|
+
"link": link,
|
|
129
|
+
"mirror": mirror_filter,
|
|
130
|
+
"size": size_bytes,
|
|
131
|
+
"date": published,
|
|
132
|
+
"source": source
|
|
133
|
+
},
|
|
134
|
+
"type": "protected"
|
|
135
|
+
})
|
|
136
|
+
except Exception as e:
|
|
137
|
+
debug(f"Error parsing {hostname.upper()} post: {e}")
|
|
138
|
+
continue
|
|
139
|
+
return releases
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def mb_feed(shared_state, start_time, request_from, mirror=None):
|
|
143
|
+
mb = shared_state.values["config"]("Hostnames").get(hostname)
|
|
144
|
+
|
|
145
|
+
if not "arr" in request_from.lower():
|
|
146
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
147
|
+
return []
|
|
148
|
+
|
|
149
|
+
password = mb
|
|
150
|
+
section = "neuerscheinungen" if "Radarr" in request_from else "serie"
|
|
151
|
+
url = f"https://{mb}/category/{section}/"
|
|
152
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
153
|
+
try:
|
|
154
|
+
html_doc = requests.get(url, headers=headers, timeout=10).content
|
|
155
|
+
soup = BeautifulSoup(html_doc, "html.parser")
|
|
156
|
+
releases = _parse_posts(soup, shared_state, password, mirror_filter=mirror)
|
|
157
|
+
except Exception as e:
|
|
158
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
159
|
+
releases = []
|
|
160
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
161
|
+
return releases
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def mb_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
165
|
+
mb = shared_state.values["config"]("Hostnames").get(hostname)
|
|
166
|
+
|
|
167
|
+
if not "arr" in request_from.lower():
|
|
168
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
169
|
+
return []
|
|
170
|
+
|
|
171
|
+
password = mb
|
|
172
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
173
|
+
if imdb_id:
|
|
174
|
+
title = get_localized_title(shared_state, imdb_id, 'de')
|
|
175
|
+
if not title:
|
|
176
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
177
|
+
return []
|
|
178
|
+
search_string = html.unescape(title)
|
|
179
|
+
|
|
180
|
+
q = quote_plus(search_string)
|
|
181
|
+
url = f"https://{mb}/?s={q}&id=20&post_type=post"
|
|
182
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
183
|
+
try:
|
|
184
|
+
html_doc = requests.get(url, headers=headers, timeout=10).content
|
|
185
|
+
soup = BeautifulSoup(html_doc, "html.parser")
|
|
186
|
+
releases = _parse_posts(
|
|
187
|
+
soup, shared_state, password, mirror_filter=mirror,
|
|
188
|
+
is_search=True, request_from=request_from,
|
|
189
|
+
search_string=search_string, season=season, episode=episode
|
|
190
|
+
)
|
|
191
|
+
except Exception as e:
|
|
192
|
+
info(f"Error loading {hostname.upper()} search: {e}")
|
|
193
|
+
releases = []
|
|
194
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
195
|
+
return releases
|