quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +316 -42
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +387 -0
- quasarr/api/captcha/__init__.py +1189 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +319 -256
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +476 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dl.py +199 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +14 -7
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +54 -0
- quasarr/downloads/sources/nx.py +42 -83
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/downloads/sources/wx.py +127 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +22 -0
- quasarr/providers/html_templates.py +211 -104
- quasarr/providers/imdb_metadata.py +108 -3
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +201 -40
- quasarr/providers/notifications.py +99 -11
- quasarr/providers/obfuscated.py +65 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/dl.py +175 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +656 -79
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +60 -1
- quasarr/providers/web_server.py +1 -1
- quasarr/search/__init__.py +144 -15
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +204 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dl.py +354 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +94 -67
- quasarr/search/sources/fx.py +89 -33
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +75 -21
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/search/sources/wx.py +337 -0
- quasarr/storage/config.py +39 -10
- quasarr/storage/setup.py +269 -97
- quasarr/storage/sqlite_database.py +6 -1
- quasarr-1.23.0.dist-info/METADATA +306 -0
- quasarr-1.23.0.dist-info/RECORD +77 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
- quasarr/arr/__init__.py +0 -423
- quasarr/captcha_solver/__init__.py +0 -284
- quasarr-0.1.6.dist-info/METADATA +0 -81
- quasarr-0.1.6.dist-info/RECORD +0 -31
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import datetime
|
|
6
|
+
import html
|
|
7
|
+
import re
|
|
8
|
+
import time
|
|
9
|
+
from base64 import urlsafe_b64encode
|
|
10
|
+
from datetime import timezone, timedelta
|
|
11
|
+
from urllib.parse import quote_plus
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
from bs4 import BeautifulSoup
|
|
15
|
+
|
|
16
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
17
|
+
from quasarr.providers.log import info, debug
|
|
18
|
+
|
|
19
|
+
hostname = "dt"
|
|
20
|
+
supported_mirrors = ["rapidgator", "nitroflare", "ddownload"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def extract_size(text):
|
|
24
|
+
match = re.match(r"([\d\.]+)\s*([KMGT]B)", text, re.IGNORECASE)
|
|
25
|
+
if match:
|
|
26
|
+
size = match.group(1)
|
|
27
|
+
unit = match.group(2).upper()
|
|
28
|
+
return {"size": size, "sizeunit": unit}
|
|
29
|
+
else:
|
|
30
|
+
raise ValueError(f"Invalid size format: {text}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def parse_published_datetime(article):
|
|
34
|
+
date_box = article.find('div', class_='mr-2 shadow-sm1 text-center')
|
|
35
|
+
mon = date_box.find('small').text.strip()
|
|
36
|
+
day = date_box.find('h4').text.strip()
|
|
37
|
+
year = date_box.find('h6').text.strip()
|
|
38
|
+
month_num = datetime.datetime.strptime(mon, '%b').month
|
|
39
|
+
|
|
40
|
+
time_icon = article.select_one('i.fa-clock-o')
|
|
41
|
+
if time_icon:
|
|
42
|
+
# its parent <span> contains e.g. "19:12"
|
|
43
|
+
raw = time_icon.parent.get_text(strip=True)
|
|
44
|
+
m = re.search(r'(\d{1,2}:\d{2})', raw)
|
|
45
|
+
if m:
|
|
46
|
+
hh, mm = map(int, m.group(1).split(':'))
|
|
47
|
+
else:
|
|
48
|
+
hh, mm = 0, 0
|
|
49
|
+
else:
|
|
50
|
+
hh, mm = 0, 0
|
|
51
|
+
|
|
52
|
+
# this timezone is fixed to CET+1 and might be wrong
|
|
53
|
+
cet = timezone(timedelta(hours=1))
|
|
54
|
+
dt = datetime.datetime(int(year), month_num, int(day), hh, mm, tzinfo=cet)
|
|
55
|
+
return dt.isoformat()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def dt_feed(shared_state, start_time, request_from, mirror=None):
|
|
59
|
+
releases = []
|
|
60
|
+
dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
61
|
+
password = dt
|
|
62
|
+
|
|
63
|
+
if "lazylibrarian" in request_from.lower():
|
|
64
|
+
feed_type = "learning/"
|
|
65
|
+
elif "radarr" in request_from.lower():
|
|
66
|
+
feed_type = "media/videos/"
|
|
67
|
+
else:
|
|
68
|
+
feed_type = "media/tv-show/"
|
|
69
|
+
|
|
70
|
+
if mirror and mirror not in supported_mirrors:
|
|
71
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
|
|
72
|
+
return releases
|
|
73
|
+
|
|
74
|
+
url = f'https://{dt}/{feed_type}'
|
|
75
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
resp = requests.get(url, headers=headers, timeout=10).content
|
|
79
|
+
feed = BeautifulSoup(resp, "html.parser")
|
|
80
|
+
|
|
81
|
+
for article in feed.find_all('article'):
|
|
82
|
+
try:
|
|
83
|
+
link_tag = article.select_one('h4.font-weight-bold a')
|
|
84
|
+
if not link_tag:
|
|
85
|
+
debug(f"Link tag not found in article: {article} at {hostname.upper()}")
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
source = link_tag['href']
|
|
89
|
+
title_raw = link_tag.text.strip()
|
|
90
|
+
title = title_raw.replace(' - ', '-').replace(' ', '.').replace('(', '').replace(')', '')
|
|
91
|
+
|
|
92
|
+
if 'lazylibrarian' in request_from.lower():
|
|
93
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
94
|
+
title = shared_state.normalize_magazine_title(title)
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
imdb_id = re.search(r'tt\d+', str(article)).group()
|
|
98
|
+
except:
|
|
99
|
+
imdb_id = None
|
|
100
|
+
|
|
101
|
+
body_text = article.find('div', class_='card-body').get_text(" ")
|
|
102
|
+
size_match = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
|
|
103
|
+
if not size_match:
|
|
104
|
+
debug(f"Size not found in article: {article} at {hostname.upper()}")
|
|
105
|
+
continue
|
|
106
|
+
size_info = size_match.group(1).strip()
|
|
107
|
+
size_item = extract_size(size_info)
|
|
108
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
109
|
+
size = mb * 1024 * 1024
|
|
110
|
+
|
|
111
|
+
published = parse_published_datetime(article)
|
|
112
|
+
|
|
113
|
+
payload = urlsafe_b64encode(
|
|
114
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
115
|
+
).decode("utf-8")
|
|
116
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
releases.append({
|
|
123
|
+
"details": {
|
|
124
|
+
"title": title,
|
|
125
|
+
"hostname": hostname.lower(),
|
|
126
|
+
"imdb_id": imdb_id,
|
|
127
|
+
"link": link,
|
|
128
|
+
"mirror": mirror,
|
|
129
|
+
"size": size,
|
|
130
|
+
"date": published,
|
|
131
|
+
"source": source
|
|
132
|
+
},
|
|
133
|
+
"type": "protected"
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
138
|
+
|
|
139
|
+
elapsed = time.time() - start_time
|
|
140
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
141
|
+
return releases
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def dt_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
145
|
+
releases = []
|
|
146
|
+
dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
147
|
+
password = dt
|
|
148
|
+
|
|
149
|
+
if "lazylibrarian" in request_from.lower():
|
|
150
|
+
cat_id = "100"
|
|
151
|
+
elif "radarr" in request_from.lower():
|
|
152
|
+
cat_id = "9"
|
|
153
|
+
else:
|
|
154
|
+
cat_id = "64"
|
|
155
|
+
|
|
156
|
+
if mirror and mirror not in supported_mirrors:
|
|
157
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!')
|
|
158
|
+
return releases
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
162
|
+
if imdb_id:
|
|
163
|
+
search_string = get_localized_title(shared_state, imdb_id, 'en')
|
|
164
|
+
if not search_string:
|
|
165
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
166
|
+
return releases
|
|
167
|
+
search_string = html.unescape(search_string)
|
|
168
|
+
|
|
169
|
+
q = quote_plus(search_string)
|
|
170
|
+
|
|
171
|
+
url = (
|
|
172
|
+
f"https://{dt}/index.php?"
|
|
173
|
+
f"do=search&"
|
|
174
|
+
f"subaction=search&"
|
|
175
|
+
f"search_start=0&"
|
|
176
|
+
f"full_search=1&"
|
|
177
|
+
f"story={q}&"
|
|
178
|
+
f"catlist%5B%5D={cat_id}&"
|
|
179
|
+
f"sortby=date&"
|
|
180
|
+
f"resorder=desc&"
|
|
181
|
+
f"titleonly=3&"
|
|
182
|
+
f"searchuser=&"
|
|
183
|
+
f"beforeafter=after&"
|
|
184
|
+
f"searchdate=0&"
|
|
185
|
+
f"replyless=0&"
|
|
186
|
+
f"replylimit=0&"
|
|
187
|
+
f"showposts=0"
|
|
188
|
+
)
|
|
189
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
190
|
+
|
|
191
|
+
resp = requests.get(url, headers=headers, timeout=10).content
|
|
192
|
+
page = BeautifulSoup(resp, "html.parser")
|
|
193
|
+
|
|
194
|
+
for article in page.find_all("article"):
|
|
195
|
+
try:
|
|
196
|
+
link_tag = article.select_one("h4.font-weight-bold a")
|
|
197
|
+
if not link_tag:
|
|
198
|
+
debug(f"No title link in search-article: {article}")
|
|
199
|
+
continue
|
|
200
|
+
source = link_tag["href"]
|
|
201
|
+
title_raw = link_tag.text.strip()
|
|
202
|
+
title = (title_raw.
|
|
203
|
+
replace(' - ', '-').
|
|
204
|
+
replace(' ', '.').
|
|
205
|
+
replace('(', '').
|
|
206
|
+
replace(')', '')
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
if not shared_state.is_valid_release(title,
|
|
210
|
+
request_from,
|
|
211
|
+
search_string,
|
|
212
|
+
season,
|
|
213
|
+
episode):
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
if 'lazylibrarian' in request_from.lower():
|
|
217
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
218
|
+
title = shared_state.normalize_magazine_title(title)
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
imdb_id = re.search(r"tt\d+", str(article)).group()
|
|
222
|
+
except:
|
|
223
|
+
imdb_id = None
|
|
224
|
+
|
|
225
|
+
body_text = article.find("div", class_="card-body").get_text(" ")
|
|
226
|
+
m = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
|
|
227
|
+
if not m:
|
|
228
|
+
debug(f"Size not found in search-article: {title_raw}")
|
|
229
|
+
continue
|
|
230
|
+
size_item = extract_size(m.group(1).strip())
|
|
231
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
232
|
+
size = mb * 1024 * 1024
|
|
233
|
+
|
|
234
|
+
published = parse_published_datetime(article)
|
|
235
|
+
|
|
236
|
+
payload = urlsafe_b64encode(
|
|
237
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}"
|
|
238
|
+
.encode("utf-8")
|
|
239
|
+
).decode("utf-8")
|
|
240
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
241
|
+
|
|
242
|
+
except Exception as e:
|
|
243
|
+
info(f"Error parsing {hostname.upper()} search item: {e}")
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
releases.append({
|
|
247
|
+
"details": {
|
|
248
|
+
"title": title,
|
|
249
|
+
"hostname": hostname.lower(),
|
|
250
|
+
"imdb_id": imdb_id,
|
|
251
|
+
"link": link,
|
|
252
|
+
"mirror": mirror,
|
|
253
|
+
"size": size,
|
|
254
|
+
"date": published,
|
|
255
|
+
"source": source
|
|
256
|
+
},
|
|
257
|
+
"type": "protected"
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
except Exception as e:
|
|
261
|
+
info(f"Error loading {hostname.upper()} search page: {e}")
|
|
262
|
+
|
|
263
|
+
elapsed = time.time() - start_time
|
|
264
|
+
debug(f"Search time: {elapsed:.2f}s ({hostname})")
|
|
265
|
+
return releases
|
quasarr/search/sources/dw.py
CHANGED
|
@@ -4,11 +4,17 @@
|
|
|
4
4
|
|
|
5
5
|
import datetime
|
|
6
6
|
import re
|
|
7
|
+
import time
|
|
7
8
|
from base64 import urlsafe_b64encode
|
|
8
9
|
|
|
9
10
|
import requests
|
|
10
11
|
from bs4 import BeautifulSoup
|
|
11
12
|
|
|
13
|
+
from quasarr.providers.log import info, debug
|
|
14
|
+
|
|
15
|
+
hostname = "dw"
|
|
16
|
+
supported_mirrors = ["1fichier", "rapidgator", "ddownload", "katfile"]
|
|
17
|
+
|
|
12
18
|
|
|
13
19
|
def convert_to_rss_date(date_str):
|
|
14
20
|
german_months = ["Januar", "Februar", "März", "April", "Mai", "Juni",
|
|
@@ -28,98 +34,84 @@ def convert_to_rss_date(date_str):
|
|
|
28
34
|
|
|
29
35
|
|
|
30
36
|
def extract_size(text):
|
|
31
|
-
|
|
37
|
+
# First try the normal pattern: number + space + unit (e.g., "1024 MB")
|
|
38
|
+
match = re.match(r"(\d+)\s+([A-Za-z]+)", text)
|
|
32
39
|
if match:
|
|
33
40
|
size = match.group(1)
|
|
34
41
|
unit = match.group(2)
|
|
35
42
|
return {"size": size, "sizeunit": unit}
|
|
36
|
-
else:
|
|
37
|
-
raise ValueError(f"Invalid size format: {text}")
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def dw_get_download_links(shared_state, content, title):
|
|
41
|
-
try:
|
|
42
|
-
try:
|
|
43
|
-
content = BeautifulSoup(content, "html.parser")
|
|
44
|
-
except:
|
|
45
|
-
content = BeautifulSoup(str(content), "html.parser")
|
|
46
|
-
download_buttons = content.findAll("button", {"class": "show_link"})
|
|
47
|
-
except:
|
|
48
|
-
print("DW hat die Detail-Seite angepasst. Parsen von Download-Links für " + title + " nicht möglich!")
|
|
49
|
-
return False
|
|
50
|
-
|
|
51
|
-
dw = shared_state.values["config"]("Hostnames").get("dw")
|
|
52
|
-
ajax_url = "https://" + dw + "/wp-admin/admin-ajax.php"
|
|
53
|
-
|
|
54
|
-
download_links = []
|
|
55
|
-
try:
|
|
56
|
-
for button in download_buttons:
|
|
57
|
-
payload = "action=show_link&link_id=" + button["value"]
|
|
58
|
-
|
|
59
|
-
headers = {
|
|
60
|
-
'User-Agent': shared_state.values["user_agent"],
|
|
61
|
-
}
|
|
62
43
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
44
|
+
# If that fails, try pattern with just unit (e.g., "MB")
|
|
45
|
+
unit_match = re.match(r"([A-Za-z]+)", text.strip())
|
|
46
|
+
if unit_match:
|
|
47
|
+
unit = unit_match.group(1)
|
|
48
|
+
# Fall back to 0 when size is missing
|
|
49
|
+
return {"size": "0", "sizeunit": unit}
|
|
66
50
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
if match:
|
|
70
|
-
link = (f'https://filecrypt.cc/Container/{match.group(1)}'
|
|
71
|
-
f'.html{match.group(2) if match.group(2) else ""}')
|
|
51
|
+
# If neither pattern matches, raise the original error
|
|
52
|
+
raise ValueError(f"Invalid size format: {text}")
|
|
72
53
|
|
|
73
|
-
hoster = button.nextSibling.img["src"].split("/")[-1].replace(".png", "")
|
|
74
|
-
download_links.append([link, hoster])
|
|
75
|
-
except:
|
|
76
|
-
print("DW site has been updated. Parsing download links not possible!")
|
|
77
|
-
pass
|
|
78
54
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
def dw_feed(shared_state, request_from):
|
|
55
|
+
def dw_feed(shared_state, start_time, request_from, mirror=None):
|
|
83
56
|
releases = []
|
|
84
|
-
dw = shared_state.values["config"]("Hostnames").get(
|
|
57
|
+
dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
85
58
|
password = dw
|
|
86
59
|
|
|
60
|
+
if not "arr" in request_from.lower():
|
|
61
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
62
|
+
return releases
|
|
63
|
+
|
|
87
64
|
if "Radarr" in request_from:
|
|
88
65
|
feed_type = "videos/filme/"
|
|
89
66
|
else:
|
|
90
67
|
feed_type = "videos/serien/"
|
|
91
68
|
|
|
69
|
+
if mirror and mirror not in supported_mirrors:
|
|
70
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
71
|
+
' Skipping search!')
|
|
72
|
+
return releases
|
|
73
|
+
|
|
92
74
|
url = f'https://{dw}/{feed_type}'
|
|
93
75
|
headers = {
|
|
94
76
|
'User-Agent': shared_state.values["user_agent"],
|
|
95
77
|
}
|
|
96
78
|
|
|
97
79
|
try:
|
|
98
|
-
request = requests.get(url, headers=headers).content
|
|
80
|
+
request = requests.get(url, headers=headers, timeout=10).content
|
|
99
81
|
feed = BeautifulSoup(request, "html.parser")
|
|
100
|
-
articles = feed.
|
|
82
|
+
articles = feed.find_all('h4')
|
|
101
83
|
|
|
102
84
|
for article in articles:
|
|
103
85
|
try:
|
|
104
86
|
source = article.a["href"]
|
|
105
87
|
title = article.a.text.strip()
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
imdb_id = re.search(r'tt\d+', str(article)).group()
|
|
91
|
+
except:
|
|
92
|
+
imdb_id = None
|
|
93
|
+
|
|
106
94
|
size_info = article.find("span").text.strip()
|
|
107
95
|
size_item = extract_size(size_info)
|
|
108
|
-
mb = shared_state.convert_to_mb(size_item)
|
|
96
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
97
|
+
size = mb * 1024 * 1024
|
|
109
98
|
date = article.parent.parent.find("span", {"class": "date updated"}).text.strip()
|
|
110
99
|
published = convert_to_rss_date(date)
|
|
111
|
-
payload = urlsafe_b64encode(
|
|
112
|
-
"utf-8")
|
|
100
|
+
payload = urlsafe_b64encode(
|
|
101
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
|
|
113
102
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
114
103
|
except Exception as e:
|
|
115
|
-
|
|
104
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
116
105
|
continue
|
|
117
106
|
|
|
118
107
|
releases.append({
|
|
119
108
|
"details": {
|
|
120
|
-
"title":
|
|
109
|
+
"title": title,
|
|
110
|
+
"hostname": hostname.lower(),
|
|
111
|
+
"imdb_id": imdb_id,
|
|
121
112
|
"link": link,
|
|
122
|
-
"
|
|
113
|
+
"mirror": mirror,
|
|
114
|
+
"size": size,
|
|
123
115
|
"date": published,
|
|
124
116
|
"source": source
|
|
125
117
|
},
|
|
@@ -127,61 +119,96 @@ def dw_feed(shared_state, request_from):
|
|
|
127
119
|
})
|
|
128
120
|
|
|
129
121
|
except Exception as e:
|
|
130
|
-
|
|
122
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
123
|
+
|
|
124
|
+
elapsed_time = time.time() - start_time
|
|
125
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
131
126
|
|
|
132
127
|
return releases
|
|
133
128
|
|
|
134
129
|
|
|
135
|
-
def dw_search(shared_state, request_from,
|
|
130
|
+
def dw_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
136
131
|
releases = []
|
|
137
|
-
dw = shared_state.values["config"]("Hostnames").get(
|
|
132
|
+
dw = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
138
133
|
password = dw
|
|
139
134
|
|
|
135
|
+
if not "arr" in request_from.lower():
|
|
136
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
137
|
+
return releases
|
|
138
|
+
|
|
139
|
+
|
|
140
140
|
if "Radarr" in request_from:
|
|
141
141
|
search_type = "videocategory=filme"
|
|
142
142
|
else:
|
|
143
143
|
search_type = "videocategory=serien"
|
|
144
144
|
|
|
145
|
-
|
|
145
|
+
if mirror and mirror not in ["1fichier", "rapidgator", "ddownload", "katfile"]:
|
|
146
|
+
debug(f'Mirror "{mirror}" not not supported by {hostname.upper()}. Skipping search!')
|
|
147
|
+
return releases
|
|
148
|
+
|
|
149
|
+
url = f'https://{dw}/?s={search_string}&{search_type}'
|
|
146
150
|
headers = {
|
|
147
151
|
'User-Agent': shared_state.values["user_agent"],
|
|
148
152
|
}
|
|
149
153
|
|
|
150
154
|
try:
|
|
151
|
-
request = requests.get(url, headers=headers).content
|
|
155
|
+
request = requests.get(url, headers=headers, timeout=10).content
|
|
152
156
|
search = BeautifulSoup(request, "html.parser")
|
|
153
|
-
results = search.
|
|
157
|
+
results = search.find_all('h4')
|
|
154
158
|
|
|
155
159
|
except Exception as e:
|
|
156
|
-
|
|
160
|
+
info(f"Error loading {hostname.upper()} search feed: {e}")
|
|
157
161
|
return releases
|
|
158
162
|
|
|
163
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
164
|
+
|
|
159
165
|
if results:
|
|
160
166
|
for result in results:
|
|
161
167
|
try:
|
|
162
|
-
source = result.a["href"]
|
|
163
168
|
title = result.a.text.strip()
|
|
169
|
+
|
|
170
|
+
if not shared_state.is_valid_release(title,
|
|
171
|
+
request_from,
|
|
172
|
+
search_string,
|
|
173
|
+
season,
|
|
174
|
+
episode):
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
if not imdb_id:
|
|
178
|
+
try:
|
|
179
|
+
imdb_id = re.search(r'tt\d+', str(result)).group()
|
|
180
|
+
except:
|
|
181
|
+
imdb_id = None
|
|
182
|
+
|
|
183
|
+
source = result.a["href"]
|
|
164
184
|
size_info = result.find("span").text.strip()
|
|
165
185
|
size_item = extract_size(size_info)
|
|
166
|
-
mb = shared_state.convert_to_mb(size_item)
|
|
186
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
187
|
+
size = mb * 1024 * 1024
|
|
167
188
|
date = result.parent.parent.find("span", {"class": "date updated"}).text.strip()
|
|
168
189
|
published = convert_to_rss_date(date)
|
|
169
|
-
payload = urlsafe_b64encode(
|
|
170
|
-
"utf-8")
|
|
190
|
+
payload = urlsafe_b64encode(
|
|
191
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
|
|
171
192
|
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
172
193
|
except Exception as e:
|
|
173
|
-
|
|
194
|
+
info(f"Error parsing {hostname.upper()} search: {e}")
|
|
174
195
|
continue
|
|
175
196
|
|
|
176
197
|
releases.append({
|
|
177
198
|
"details": {
|
|
178
|
-
"title":
|
|
199
|
+
"title": title,
|
|
200
|
+
"hostname": hostname.lower(),
|
|
201
|
+
"imdb_id": imdb_id,
|
|
179
202
|
"link": link,
|
|
180
|
-
"
|
|
203
|
+
"mirror": mirror,
|
|
204
|
+
"size": size,
|
|
181
205
|
"date": published,
|
|
182
206
|
"source": source
|
|
183
207
|
},
|
|
184
208
|
"type": "protected"
|
|
185
209
|
})
|
|
186
210
|
|
|
211
|
+
elapsed_time = time.time() - start_time
|
|
212
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
213
|
+
|
|
187
214
|
return releases
|