quasarr 1.20.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +460 -0
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +373 -0
- quasarr/api/captcha/__init__.py +1075 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +267 -0
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +467 -0
- quasarr/downloads/sources/__init__.py +0 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +65 -0
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +51 -0
- quasarr/downloads/sources/nx.py +105 -0
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/providers/__init__.py +0 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +20 -0
- quasarr/providers/html_templates.py +241 -0
- quasarr/providers/imdb_metadata.py +142 -0
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +917 -0
- quasarr/providers/notifications.py +124 -0
- quasarr/providers/obfuscated.py +51 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +826 -0
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +118 -0
- quasarr/providers/web_server.py +49 -0
- quasarr/search/__init__.py +153 -0
- quasarr/search/sources/__init__.py +0 -0
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +203 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +214 -0
- quasarr/search/sources/fx.py +223 -0
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +197 -0
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/storage/__init__.py +0 -0
- quasarr/storage/config.py +163 -0
- quasarr/storage/setup.py +458 -0
- quasarr/storage/sqlite_database.py +80 -0
- quasarr-1.20.6.dist-info/METADATA +304 -0
- quasarr-1.20.6.dist-info/RECORD +72 -0
- quasarr-1.20.6.dist-info/WHEEL +5 -0
- quasarr-1.20.6.dist-info/entry_points.txt +2 -0
- quasarr-1.20.6.dist-info/licenses/LICENSE +21 -0
- quasarr-1.20.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import time
|
|
7
|
+
from base64 import urlsafe_b64encode
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
|
|
10
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
11
|
+
from quasarr.providers.log import info, debug
|
|
12
|
+
from quasarr.providers.sessions.dd import create_and_persist_session, retrieve_and_validate_session
|
|
13
|
+
|
|
14
|
+
hostname = "dd"
|
|
15
|
+
supported_mirrors = ["ironfiles", "rapidgator", "filefactory"]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def convert_to_rss_date(unix_timestamp):
|
|
19
|
+
parsed_date = datetime.fromtimestamp(unix_timestamp, tz=timezone.utc)
|
|
20
|
+
rss_date = parsed_date.strftime('%a, %d %b %Y %H:%M:%S %z')
|
|
21
|
+
|
|
22
|
+
return rss_date
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def extract_size(size_in_bytes):
|
|
26
|
+
return {"size": size_in_bytes, "sizeunit": "B"}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def dd_feed(*args, **kwargs):
|
|
30
|
+
return dd_search(*args, **kwargs)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def dd_search(shared_state, start_time, request_from, search_string="", mirror=None, season=None, episode=None):
|
|
34
|
+
releases = []
|
|
35
|
+
dd = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
36
|
+
password = dd
|
|
37
|
+
|
|
38
|
+
if not "arr" in request_from.lower():
|
|
39
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
40
|
+
return releases
|
|
41
|
+
|
|
42
|
+
dd_session = retrieve_and_validate_session(shared_state)
|
|
43
|
+
if not dd_session:
|
|
44
|
+
info(f"Could not retrieve valid session for {dd}")
|
|
45
|
+
return releases
|
|
46
|
+
|
|
47
|
+
if mirror and mirror not in supported_mirrors:
|
|
48
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported mirrors: {supported_mirrors}.'
|
|
49
|
+
' Skipping search!')
|
|
50
|
+
return releases
|
|
51
|
+
|
|
52
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
53
|
+
if imdb_id:
|
|
54
|
+
search_string = get_localized_title(shared_state, imdb_id, 'en')
|
|
55
|
+
if not search_string:
|
|
56
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
57
|
+
return releases
|
|
58
|
+
search_string = html.unescape(search_string)
|
|
59
|
+
|
|
60
|
+
qualities = [
|
|
61
|
+
"disk-480p",
|
|
62
|
+
"web-480p",
|
|
63
|
+
"movie-480p-x265",
|
|
64
|
+
"disk-1080p-x265",
|
|
65
|
+
"web-1080p",
|
|
66
|
+
"web-1080p-x265",
|
|
67
|
+
"web-2160p-x265-hdr",
|
|
68
|
+
"movie-1080p-x265",
|
|
69
|
+
"movie-2160p-webdl-x265-hdr"
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
headers = {
|
|
73
|
+
'User-Agent': shared_state.values["user_agent"],
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
release_list = []
|
|
78
|
+
for page in range(0, 100, 20):
|
|
79
|
+
url = f'https://{dd}/index/search/keyword/{search_string}/qualities/{','.join(qualities)}/from/{page}/search'
|
|
80
|
+
|
|
81
|
+
releases_on_page = dd_session.get(url, headers=headers, timeout=10).json()
|
|
82
|
+
if releases_on_page:
|
|
83
|
+
release_list.extend(releases_on_page)
|
|
84
|
+
|
|
85
|
+
for release in release_list:
|
|
86
|
+
try:
|
|
87
|
+
if release.get("fake"):
|
|
88
|
+
debug(
|
|
89
|
+
f"Release {release.get('release')} marked as fake. Invalidating {hostname.upper()} session...")
|
|
90
|
+
create_and_persist_session(shared_state)
|
|
91
|
+
return []
|
|
92
|
+
else:
|
|
93
|
+
title = release.get("release")
|
|
94
|
+
|
|
95
|
+
if not shared_state.is_valid_release(title,
|
|
96
|
+
request_from,
|
|
97
|
+
search_string,
|
|
98
|
+
season,
|
|
99
|
+
episode):
|
|
100
|
+
continue
|
|
101
|
+
|
|
102
|
+
imdb_id = release.get("imdbid", None)
|
|
103
|
+
|
|
104
|
+
source = f"https://{dd}/"
|
|
105
|
+
size_item = extract_size(release.get("size"))
|
|
106
|
+
mb = shared_state.convert_to_mb(size_item) * 1024 * 1024
|
|
107
|
+
published = convert_to_rss_date(release.get("when"))
|
|
108
|
+
payload = urlsafe_b64encode(
|
|
109
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")).decode("utf-8")
|
|
110
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
111
|
+
|
|
112
|
+
releases.append({
|
|
113
|
+
"details": {
|
|
114
|
+
"title": title,
|
|
115
|
+
"hostname": hostname.lower(),
|
|
116
|
+
"imdb_id": imdb_id,
|
|
117
|
+
"link": link,
|
|
118
|
+
"mirror": mirror,
|
|
119
|
+
"size": mb,
|
|
120
|
+
"date": published,
|
|
121
|
+
"source": source
|
|
122
|
+
},
|
|
123
|
+
"type": "protected"
|
|
124
|
+
})
|
|
125
|
+
except Exception as e:
|
|
126
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
except Exception as e:
|
|
130
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
131
|
+
|
|
132
|
+
elapsed_time = time.time() - start_time
|
|
133
|
+
debug(f"Time taken: {elapsed_time:.2f}s ({hostname})")
|
|
134
|
+
|
|
135
|
+
return releases
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from base64 import urlsafe_b64encode
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
|
|
11
|
+
import requests
|
|
12
|
+
from bs4 import BeautifulSoup
|
|
13
|
+
|
|
14
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
15
|
+
from quasarr.providers.log import info, debug
|
|
16
|
+
|
|
17
|
+
hostname = "dj"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def convert_to_rss_date(date_str):
|
|
21
|
+
try:
|
|
22
|
+
return datetime.fromisoformat(
|
|
23
|
+
date_str.replace("Z", "+00:00")
|
|
24
|
+
).strftime("%a, %d %b %Y %H:%M:%S +0000")
|
|
25
|
+
except Exception:
|
|
26
|
+
return ""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def dj_feed(shared_state, start_time, request_from, mirror=None):
|
|
30
|
+
releases = []
|
|
31
|
+
|
|
32
|
+
if "sonarr" not in request_from.lower():
|
|
33
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
34
|
+
return releases
|
|
35
|
+
|
|
36
|
+
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
37
|
+
password = sj_host
|
|
38
|
+
|
|
39
|
+
url = f"https://{sj_host}/api/releases/latest/0"
|
|
40
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
r = requests.get(url, headers=headers, timeout=10)
|
|
44
|
+
data = json.loads(r.content)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
info(f"{hostname.upper()}: feed load error: {e}")
|
|
47
|
+
return releases
|
|
48
|
+
|
|
49
|
+
for release in data:
|
|
50
|
+
try:
|
|
51
|
+
title = release.get("name").rstrip(".")
|
|
52
|
+
if not title:
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
published = convert_to_rss_date(release.get("createdAt"))
|
|
56
|
+
if not published:
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
media = release.get("_media", {})
|
|
60
|
+
slug = media.get("slug")
|
|
61
|
+
if not slug:
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
series_url = f"https://{sj_host}/serie/{slug}"
|
|
65
|
+
|
|
66
|
+
mb = 0
|
|
67
|
+
size = 0
|
|
68
|
+
imdb_id = None
|
|
69
|
+
|
|
70
|
+
payload = urlsafe_b64encode(
|
|
71
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
72
|
+
).decode("utf-8")
|
|
73
|
+
|
|
74
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
75
|
+
|
|
76
|
+
releases.append({
|
|
77
|
+
"details": {
|
|
78
|
+
"title": title,
|
|
79
|
+
"hostname": hostname,
|
|
80
|
+
"imdb_id": imdb_id,
|
|
81
|
+
"link": link,
|
|
82
|
+
"mirror": mirror,
|
|
83
|
+
"size": size,
|
|
84
|
+
"date": published,
|
|
85
|
+
"source": series_url
|
|
86
|
+
},
|
|
87
|
+
"type": "protected"
|
|
88
|
+
})
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
debug(f"{hostname.upper()}: feed parse error: {e}")
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
95
|
+
return releases
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def dj_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
99
|
+
releases = []
|
|
100
|
+
|
|
101
|
+
if "sonarr" not in request_from.lower():
|
|
102
|
+
debug(f'Skipping {request_from} search on "{hostname.upper()}" (unsupported media type)!')
|
|
103
|
+
return releases
|
|
104
|
+
|
|
105
|
+
sj_host = shared_state.values["config"]("Hostnames").get(hostname)
|
|
106
|
+
password = sj_host
|
|
107
|
+
|
|
108
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
109
|
+
if not imdb_id:
|
|
110
|
+
return releases
|
|
111
|
+
|
|
112
|
+
localized_title = get_localized_title(shared_state, imdb_id, "de")
|
|
113
|
+
if not localized_title:
|
|
114
|
+
info(f"{hostname.upper()}: no localized title for IMDb {imdb_id}")
|
|
115
|
+
return releases
|
|
116
|
+
|
|
117
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
118
|
+
search_url = f"https://{sj_host}/serie/search"
|
|
119
|
+
params = {"q": localized_title}
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
r = requests.get(search_url, headers=headers, params=params, timeout=10)
|
|
123
|
+
soup = BeautifulSoup(r.content, "html.parser")
|
|
124
|
+
results = soup.find_all("a", href=re.compile(r"^/serie/"))
|
|
125
|
+
except Exception as e:
|
|
126
|
+
info(f"{hostname.upper()}: search load error: {e}")
|
|
127
|
+
return releases
|
|
128
|
+
|
|
129
|
+
one_hour_ago = (datetime.now() - timedelta(hours=1)).strftime('%Y-%m-%d %H:%M:%S')
|
|
130
|
+
sanitized_search_string = shared_state.sanitize_string(localized_title)
|
|
131
|
+
|
|
132
|
+
for result in results:
|
|
133
|
+
try:
|
|
134
|
+
result_title = result.get_text(strip=True)
|
|
135
|
+
|
|
136
|
+
sanitized_title = shared_state.sanitize_string(result_title)
|
|
137
|
+
|
|
138
|
+
if not re.search(
|
|
139
|
+
rf"\b{re.escape(sanitized_search_string)}\b",
|
|
140
|
+
sanitized_title
|
|
141
|
+
):
|
|
142
|
+
debug(
|
|
143
|
+
f"Search string '{localized_title}' doesn't match '{result_title}'"
|
|
144
|
+
)
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
debug(
|
|
148
|
+
f"Matched search string '{localized_title}' with result '{result_title}'"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
series_url = f"https://{sj_host}{result['href']}"
|
|
152
|
+
|
|
153
|
+
r = requests.get(series_url, headers=headers, timeout=10)
|
|
154
|
+
media_id_match = re.search(r'data-mediaid="([^"]+)"', r.text)
|
|
155
|
+
if not media_id_match:
|
|
156
|
+
debug(f"{hostname.upper()}: no media id for {result_title}")
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
media_id = media_id_match.group(1)
|
|
160
|
+
api_url = f"https://{sj_host}/api/media/{media_id}/releases"
|
|
161
|
+
|
|
162
|
+
r = requests.get(api_url, headers=headers, timeout=10)
|
|
163
|
+
data = json.loads(r.content)
|
|
164
|
+
|
|
165
|
+
for season_block in data.values():
|
|
166
|
+
for item in season_block.get("items", []):
|
|
167
|
+
title = item.get("name").rstrip(".")
|
|
168
|
+
if not title:
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
if not shared_state.is_valid_release(
|
|
172
|
+
title,
|
|
173
|
+
request_from,
|
|
174
|
+
search_string,
|
|
175
|
+
season,
|
|
176
|
+
episode
|
|
177
|
+
):
|
|
178
|
+
continue
|
|
179
|
+
|
|
180
|
+
published = convert_to_rss_date(item.get("createdAt"))
|
|
181
|
+
if not published:
|
|
182
|
+
debug(f"{hostname.upper()}: no published date for {title}")
|
|
183
|
+
published = one_hour_ago
|
|
184
|
+
|
|
185
|
+
mb = 0
|
|
186
|
+
size = 0
|
|
187
|
+
|
|
188
|
+
payload = urlsafe_b64encode(
|
|
189
|
+
f"{title}|{series_url}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
190
|
+
).decode("utf-8")
|
|
191
|
+
|
|
192
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
193
|
+
|
|
194
|
+
releases.append({
|
|
195
|
+
"details": {
|
|
196
|
+
"title": title,
|
|
197
|
+
"hostname": hostname,
|
|
198
|
+
"imdb_id": imdb_id,
|
|
199
|
+
"link": link,
|
|
200
|
+
"mirror": mirror,
|
|
201
|
+
"size": size,
|
|
202
|
+
"date": published,
|
|
203
|
+
"source": series_url
|
|
204
|
+
},
|
|
205
|
+
"type": "protected"
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
debug(f"{hostname.upper()}: search parse error: {e}")
|
|
210
|
+
continue
|
|
211
|
+
|
|
212
|
+
debug(f"Time taken: {time.time() - start_time:.2f}s ({hostname})")
|
|
213
|
+
return releases
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
import datetime
|
|
6
|
+
import html
|
|
7
|
+
import re
|
|
8
|
+
import time
|
|
9
|
+
from base64 import urlsafe_b64encode
|
|
10
|
+
from datetime import timezone, timedelta
|
|
11
|
+
from urllib.parse import quote_plus
|
|
12
|
+
|
|
13
|
+
import requests
|
|
14
|
+
from bs4 import BeautifulSoup
|
|
15
|
+
|
|
16
|
+
from quasarr.providers.imdb_metadata import get_localized_title
|
|
17
|
+
from quasarr.providers.log import info, debug
|
|
18
|
+
|
|
19
|
+
hostname = "dt"
|
|
20
|
+
supported_mirrors = ["rapidgator", "nitroflare", "ddownload"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def extract_size(text):
|
|
24
|
+
match = re.match(r"([\d\.]+)\s*([KMGT]B)", text, re.IGNORECASE)
|
|
25
|
+
if match:
|
|
26
|
+
size = match.group(1)
|
|
27
|
+
unit = match.group(2).upper()
|
|
28
|
+
return {"size": size, "sizeunit": unit}
|
|
29
|
+
else:
|
|
30
|
+
raise ValueError(f"Invalid size format: {text}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def parse_published_datetime(article):
|
|
34
|
+
date_box = article.find('div', class_='mr-2 shadow-sm1 text-center')
|
|
35
|
+
mon = date_box.find('small').text.strip()
|
|
36
|
+
day = date_box.find('h4').text.strip()
|
|
37
|
+
year = date_box.find('h6').text.strip()
|
|
38
|
+
month_num = datetime.datetime.strptime(mon, '%b').month
|
|
39
|
+
|
|
40
|
+
time_icon = article.select_one('i.fa-clock-o')
|
|
41
|
+
if time_icon:
|
|
42
|
+
# its parent <span> contains e.g. "19:12"
|
|
43
|
+
raw = time_icon.parent.get_text(strip=True)
|
|
44
|
+
m = re.search(r'(\d{1,2}:\d{2})', raw)
|
|
45
|
+
if m:
|
|
46
|
+
hh, mm = map(int, m.group(1).split(':'))
|
|
47
|
+
else:
|
|
48
|
+
hh, mm = 0, 0
|
|
49
|
+
else:
|
|
50
|
+
hh, mm = 0, 0
|
|
51
|
+
|
|
52
|
+
# this timezone is fixed to CET+1 and might be wrong
|
|
53
|
+
cet = timezone(timedelta(hours=1))
|
|
54
|
+
dt = datetime.datetime(int(year), month_num, int(day), hh, mm, tzinfo=cet)
|
|
55
|
+
return dt.isoformat()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def dt_feed(shared_state, start_time, request_from, mirror=None):
|
|
59
|
+
releases = []
|
|
60
|
+
dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
61
|
+
password = dt
|
|
62
|
+
|
|
63
|
+
if "lazylibrarian" in request_from.lower():
|
|
64
|
+
feed_type = "learning/"
|
|
65
|
+
elif "radarr" in request_from.lower():
|
|
66
|
+
feed_type = "media/videos/"
|
|
67
|
+
else:
|
|
68
|
+
feed_type = "media/tv-show/"
|
|
69
|
+
|
|
70
|
+
if mirror and mirror not in supported_mirrors:
|
|
71
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Supported: {supported_mirrors}. Skipping!')
|
|
72
|
+
return releases
|
|
73
|
+
|
|
74
|
+
url = f'https://{dt}/{feed_type}'
|
|
75
|
+
headers = {'User-Agent': shared_state.values["user_agent"]}
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
resp = requests.get(url, headers=headers, timeout=10).content
|
|
79
|
+
feed = BeautifulSoup(resp, "html.parser")
|
|
80
|
+
|
|
81
|
+
for article in feed.find_all('article'):
|
|
82
|
+
try:
|
|
83
|
+
link_tag = article.select_one('h4.font-weight-bold a')
|
|
84
|
+
if not link_tag:
|
|
85
|
+
debug(f"Link tag not found in article: {article} at {hostname.upper()}")
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
source = link_tag['href']
|
|
89
|
+
title_raw = link_tag.text.strip()
|
|
90
|
+
title = title_raw.replace(' - ', '-').replace(' ', '.').replace('(', '').replace(')', '')
|
|
91
|
+
|
|
92
|
+
if 'lazylibrarian' in request_from.lower():
|
|
93
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
94
|
+
title = shared_state.normalize_magazine_title(title)
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
imdb_id = re.search(r'tt\d+', str(article)).group()
|
|
98
|
+
except:
|
|
99
|
+
imdb_id = None
|
|
100
|
+
|
|
101
|
+
body_text = article.find('div', class_='card-body').get_text(" ")
|
|
102
|
+
size_match = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
|
|
103
|
+
if not size_match:
|
|
104
|
+
debug(f"Size not found in article: {article} at {hostname.upper()}")
|
|
105
|
+
continue
|
|
106
|
+
size_info = size_match.group(1).strip()
|
|
107
|
+
size_item = extract_size(size_info)
|
|
108
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
109
|
+
size = mb * 1024 * 1024
|
|
110
|
+
|
|
111
|
+
published = parse_published_datetime(article)
|
|
112
|
+
|
|
113
|
+
payload = urlsafe_b64encode(
|
|
114
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}".encode("utf-8")
|
|
115
|
+
).decode("utf-8")
|
|
116
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
info(f"Error parsing {hostname.upper()} feed: {e}")
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
releases.append({
|
|
123
|
+
"details": {
|
|
124
|
+
"title": title,
|
|
125
|
+
"hostname": hostname.lower(),
|
|
126
|
+
"imdb_id": imdb_id,
|
|
127
|
+
"link": link,
|
|
128
|
+
"mirror": mirror,
|
|
129
|
+
"size": size,
|
|
130
|
+
"date": published,
|
|
131
|
+
"source": source
|
|
132
|
+
},
|
|
133
|
+
"type": "protected"
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
except Exception as e:
|
|
137
|
+
info(f"Error loading {hostname.upper()} feed: {e}")
|
|
138
|
+
|
|
139
|
+
elapsed = time.time() - start_time
|
|
140
|
+
debug(f"Time taken: {elapsed:.2f}s ({hostname})")
|
|
141
|
+
return releases
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def dt_search(shared_state, start_time, request_from, search_string, mirror=None, season=None, episode=None):
|
|
145
|
+
releases = []
|
|
146
|
+
dt = shared_state.values["config"]("Hostnames").get(hostname.lower())
|
|
147
|
+
password = dt
|
|
148
|
+
|
|
149
|
+
if "lazylibrarian" in request_from.lower():
|
|
150
|
+
cat_id = "100"
|
|
151
|
+
elif "radarr" in request_from.lower():
|
|
152
|
+
cat_id = "9"
|
|
153
|
+
else:
|
|
154
|
+
cat_id = "64"
|
|
155
|
+
|
|
156
|
+
if mirror and mirror not in supported_mirrors:
|
|
157
|
+
debug(f'Mirror "{mirror}" not supported by "{hostname.upper()}". Skipping search!')
|
|
158
|
+
return releases
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
imdb_id = shared_state.is_imdb_id(search_string)
|
|
162
|
+
if imdb_id:
|
|
163
|
+
search_string = get_localized_title(shared_state, imdb_id, 'en')
|
|
164
|
+
if not search_string:
|
|
165
|
+
info(f"Could not extract title from IMDb-ID {imdb_id}")
|
|
166
|
+
return releases
|
|
167
|
+
search_string = html.unescape(search_string)
|
|
168
|
+
|
|
169
|
+
q = quote_plus(search_string)
|
|
170
|
+
|
|
171
|
+
url = (
|
|
172
|
+
f"https://{dt}/index.php?"
|
|
173
|
+
f"do=search&"
|
|
174
|
+
f"subaction=search&"
|
|
175
|
+
f"search_start=0&"
|
|
176
|
+
f"full_search=1&"
|
|
177
|
+
f"story={q}&"
|
|
178
|
+
f"catlist%5B%5D={cat_id}&"
|
|
179
|
+
f"sortby=date&"
|
|
180
|
+
f"resorder=desc&"
|
|
181
|
+
f"titleonly=3&"
|
|
182
|
+
f"searchuser=&"
|
|
183
|
+
f"beforeafter=after&"
|
|
184
|
+
f"searchdate=0&"
|
|
185
|
+
f"replyless=0&"
|
|
186
|
+
f"replylimit=0&"
|
|
187
|
+
f"showposts=0"
|
|
188
|
+
)
|
|
189
|
+
headers = {"User-Agent": shared_state.values["user_agent"]}
|
|
190
|
+
|
|
191
|
+
resp = requests.get(url, headers=headers, timeout=10).content
|
|
192
|
+
page = BeautifulSoup(resp, "html.parser")
|
|
193
|
+
|
|
194
|
+
for article in page.find_all("article"):
|
|
195
|
+
try:
|
|
196
|
+
link_tag = article.select_one("h4.font-weight-bold a")
|
|
197
|
+
if not link_tag:
|
|
198
|
+
debug(f"No title link in search-article: {article}")
|
|
199
|
+
continue
|
|
200
|
+
source = link_tag["href"]
|
|
201
|
+
title_raw = link_tag.text.strip()
|
|
202
|
+
title = (title_raw.
|
|
203
|
+
replace(' - ', '-').
|
|
204
|
+
replace(' ', '.').
|
|
205
|
+
replace('(', '').
|
|
206
|
+
replace(')', '')
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
if not shared_state.is_valid_release(title,
|
|
210
|
+
request_from,
|
|
211
|
+
search_string,
|
|
212
|
+
season,
|
|
213
|
+
episode):
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
if 'lazylibrarian' in request_from.lower():
|
|
217
|
+
# lazylibrarian can only detect specific date formats / issue numbering for magazines
|
|
218
|
+
title = shared_state.normalize_magazine_title(title)
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
imdb_id = re.search(r"tt\d+", str(article)).group()
|
|
222
|
+
except:
|
|
223
|
+
imdb_id = None
|
|
224
|
+
|
|
225
|
+
body_text = article.find("div", class_="card-body").get_text(" ")
|
|
226
|
+
m = re.search(r"(\d+(?:\.\d+)?\s*(?:GB|MB|KB|TB))", body_text, re.IGNORECASE)
|
|
227
|
+
if not m:
|
|
228
|
+
debug(f"Size not found in search-article: {title_raw}")
|
|
229
|
+
continue
|
|
230
|
+
size_item = extract_size(m.group(1).strip())
|
|
231
|
+
mb = shared_state.convert_to_mb(size_item)
|
|
232
|
+
size = mb * 1024 * 1024
|
|
233
|
+
|
|
234
|
+
published = parse_published_datetime(article)
|
|
235
|
+
|
|
236
|
+
payload = urlsafe_b64encode(
|
|
237
|
+
f"{title}|{source}|{mirror}|{mb}|{password}|{imdb_id}"
|
|
238
|
+
.encode("utf-8")
|
|
239
|
+
).decode("utf-8")
|
|
240
|
+
link = f"{shared_state.values['internal_address']}/download/?payload={payload}"
|
|
241
|
+
|
|
242
|
+
except Exception as e:
|
|
243
|
+
info(f"Error parsing {hostname.upper()} search item: {e}")
|
|
244
|
+
continue
|
|
245
|
+
|
|
246
|
+
releases.append({
|
|
247
|
+
"details": {
|
|
248
|
+
"title": title,
|
|
249
|
+
"hostname": hostname.lower(),
|
|
250
|
+
"imdb_id": imdb_id,
|
|
251
|
+
"link": link,
|
|
252
|
+
"mirror": mirror,
|
|
253
|
+
"size": size,
|
|
254
|
+
"date": published,
|
|
255
|
+
"source": source
|
|
256
|
+
},
|
|
257
|
+
"type": "protected"
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
except Exception as e:
|
|
261
|
+
info(f"Error loading {hostname.upper()} search page: {e}")
|
|
262
|
+
|
|
263
|
+
elapsed = time.time() - start_time
|
|
264
|
+
debug(f"Search time: {elapsed:.2f}s ({hostname})")
|
|
265
|
+
return releases
|