quasarr 0.1.6__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of quasarr might be problematic. Click here for more details.
- quasarr/__init__.py +316 -42
- quasarr/api/__init__.py +187 -0
- quasarr/api/arr/__init__.py +387 -0
- quasarr/api/captcha/__init__.py +1189 -0
- quasarr/api/config/__init__.py +23 -0
- quasarr/api/sponsors_helper/__init__.py +166 -0
- quasarr/api/statistics/__init__.py +196 -0
- quasarr/downloads/__init__.py +319 -256
- quasarr/downloads/linkcrypters/__init__.py +0 -0
- quasarr/downloads/linkcrypters/al.py +237 -0
- quasarr/downloads/linkcrypters/filecrypt.py +444 -0
- quasarr/downloads/linkcrypters/hide.py +123 -0
- quasarr/downloads/packages/__init__.py +476 -0
- quasarr/downloads/sources/al.py +697 -0
- quasarr/downloads/sources/by.py +106 -0
- quasarr/downloads/sources/dd.py +76 -0
- quasarr/downloads/sources/dj.py +7 -0
- quasarr/downloads/sources/dl.py +199 -0
- quasarr/downloads/sources/dt.py +66 -0
- quasarr/downloads/sources/dw.py +14 -7
- quasarr/downloads/sources/he.py +112 -0
- quasarr/downloads/sources/mb.py +47 -0
- quasarr/downloads/sources/nk.py +54 -0
- quasarr/downloads/sources/nx.py +42 -83
- quasarr/downloads/sources/sf.py +159 -0
- quasarr/downloads/sources/sj.py +7 -0
- quasarr/downloads/sources/sl.py +90 -0
- quasarr/downloads/sources/wd.py +110 -0
- quasarr/downloads/sources/wx.py +127 -0
- quasarr/providers/cloudflare.py +204 -0
- quasarr/providers/html_images.py +22 -0
- quasarr/providers/html_templates.py +211 -104
- quasarr/providers/imdb_metadata.py +108 -3
- quasarr/providers/log.py +19 -0
- quasarr/providers/myjd_api.py +201 -40
- quasarr/providers/notifications.py +99 -11
- quasarr/providers/obfuscated.py +65 -0
- quasarr/providers/sessions/__init__.py +0 -0
- quasarr/providers/sessions/al.py +286 -0
- quasarr/providers/sessions/dd.py +78 -0
- quasarr/providers/sessions/dl.py +175 -0
- quasarr/providers/sessions/nx.py +76 -0
- quasarr/providers/shared_state.py +656 -79
- quasarr/providers/statistics.py +154 -0
- quasarr/providers/version.py +60 -1
- quasarr/providers/web_server.py +1 -1
- quasarr/search/__init__.py +144 -15
- quasarr/search/sources/al.py +448 -0
- quasarr/search/sources/by.py +204 -0
- quasarr/search/sources/dd.py +135 -0
- quasarr/search/sources/dj.py +213 -0
- quasarr/search/sources/dl.py +354 -0
- quasarr/search/sources/dt.py +265 -0
- quasarr/search/sources/dw.py +94 -67
- quasarr/search/sources/fx.py +89 -33
- quasarr/search/sources/he.py +196 -0
- quasarr/search/sources/mb.py +195 -0
- quasarr/search/sources/nk.py +188 -0
- quasarr/search/sources/nx.py +75 -21
- quasarr/search/sources/sf.py +374 -0
- quasarr/search/sources/sj.py +213 -0
- quasarr/search/sources/sl.py +246 -0
- quasarr/search/sources/wd.py +208 -0
- quasarr/search/sources/wx.py +337 -0
- quasarr/storage/config.py +39 -10
- quasarr/storage/setup.py +269 -97
- quasarr/storage/sqlite_database.py +6 -1
- quasarr-1.23.0.dist-info/METADATA +306 -0
- quasarr-1.23.0.dist-info/RECORD +77 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/WHEEL +1 -1
- quasarr/arr/__init__.py +0 -423
- quasarr/captcha_solver/__init__.py +0 -284
- quasarr-0.1.6.dist-info/METADATA +0 -81
- quasarr-0.1.6.dist-info/RECORD +0 -31
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/entry_points.txt +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info/licenses}/LICENSE +0 -0
- {quasarr-0.1.6.dist-info → quasarr-1.23.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Quasarr
|
|
3
|
+
# Project by https://github.com/rix1337
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class StatsHelper:
|
|
9
|
+
"""
|
|
10
|
+
Multiprocessing-safe stats helper using separate rows.
|
|
11
|
+
Uses shared_state for database access across processes.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, shared_state):
|
|
15
|
+
self.shared_state = shared_state
|
|
16
|
+
self._ensure_stats_exist()
|
|
17
|
+
|
|
18
|
+
def _get_db(self):
|
|
19
|
+
"""Get database interface through shared_state"""
|
|
20
|
+
return self.shared_state.values["database"]("statistics")
|
|
21
|
+
|
|
22
|
+
def _ensure_stats_exist(self):
|
|
23
|
+
"""Initialize stats if they don't exist"""
|
|
24
|
+
default_stats = {
|
|
25
|
+
"packages_downloaded": 0,
|
|
26
|
+
"links_processed": 0,
|
|
27
|
+
"captcha_decryptions_automatic": 0,
|
|
28
|
+
"captcha_decryptions_manual": 0,
|
|
29
|
+
"failed_downloads": 0,
|
|
30
|
+
"failed_decryptions_automatic": 0,
|
|
31
|
+
"failed_decryptions_manual": 0
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
db = self._get_db()
|
|
35
|
+
for key, default_value in default_stats.items():
|
|
36
|
+
if db.retrieve(key) is None:
|
|
37
|
+
db.store(key, str(default_value))
|
|
38
|
+
|
|
39
|
+
def _get_stat(self, key: str, default: int = 0) -> int:
|
|
40
|
+
"""Get a single stat value"""
|
|
41
|
+
try:
|
|
42
|
+
db = self._get_db()
|
|
43
|
+
value = db.retrieve(key)
|
|
44
|
+
return int(value) if value is not None else default
|
|
45
|
+
except (ValueError, TypeError):
|
|
46
|
+
return default
|
|
47
|
+
|
|
48
|
+
def _increment_stat(self, key: str, count: int = 1):
|
|
49
|
+
"""Process-safe increment of a single stat"""
|
|
50
|
+
db = self._get_db()
|
|
51
|
+
current = self._get_stat(key, 0)
|
|
52
|
+
db.update_store(key, str(current + count))
|
|
53
|
+
|
|
54
|
+
def increment_package_with_links(self, links):
|
|
55
|
+
"""Increment package downloaded and links processed for one package, or failed download if no links
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
links: Can be:
|
|
59
|
+
- list/array: counts the length
|
|
60
|
+
- int: uses the value directly
|
|
61
|
+
- None/False/empty: treats as failed download
|
|
62
|
+
"""
|
|
63
|
+
# Handle different input types
|
|
64
|
+
if links is None or links is False:
|
|
65
|
+
link_count = 0
|
|
66
|
+
elif isinstance(links, (list, tuple)):
|
|
67
|
+
link_count = len(links)
|
|
68
|
+
elif isinstance(links, int):
|
|
69
|
+
link_count = links
|
|
70
|
+
else:
|
|
71
|
+
# Handle other falsy values or unexpected types
|
|
72
|
+
try:
|
|
73
|
+
link_count = int(links) if links else 0
|
|
74
|
+
except (ValueError, TypeError):
|
|
75
|
+
link_count = 0
|
|
76
|
+
|
|
77
|
+
# Now handle the actual increment logic
|
|
78
|
+
if link_count == 0:
|
|
79
|
+
self._increment_stat("failed_downloads", 1)
|
|
80
|
+
else:
|
|
81
|
+
self._increment_stat("packages_downloaded", 1)
|
|
82
|
+
self._increment_stat("links_processed", link_count)
|
|
83
|
+
|
|
84
|
+
def increment_captcha_decryptions_automatic(self):
|
|
85
|
+
"""Increment automatic captcha decryptions counter"""
|
|
86
|
+
self._increment_stat("captcha_decryptions_automatic", 1)
|
|
87
|
+
|
|
88
|
+
def increment_captcha_decryptions_manual(self):
|
|
89
|
+
"""Increment manual captcha decryptions counter"""
|
|
90
|
+
self._increment_stat("captcha_decryptions_manual", 1)
|
|
91
|
+
|
|
92
|
+
def increment_failed_downloads(self):
|
|
93
|
+
"""Increment failed downloads counter"""
|
|
94
|
+
self._increment_stat("failed_downloads", 1)
|
|
95
|
+
|
|
96
|
+
def increment_failed_decryptions_automatic(self):
|
|
97
|
+
"""Increment failed automatic decryptions counter"""
|
|
98
|
+
self._increment_stat("failed_decryptions_automatic", 1)
|
|
99
|
+
|
|
100
|
+
def increment_failed_decryptions_manual(self):
|
|
101
|
+
"""Increment failed manual decryptions counter"""
|
|
102
|
+
self._increment_stat("failed_decryptions_manual", 1)
|
|
103
|
+
|
|
104
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
105
|
+
"""Get all current statistics"""
|
|
106
|
+
stats = {
|
|
107
|
+
"packages_downloaded": self._get_stat("packages_downloaded", 0),
|
|
108
|
+
"links_processed": self._get_stat("links_processed", 0),
|
|
109
|
+
"captcha_decryptions_automatic": self._get_stat("captcha_decryptions_automatic", 0),
|
|
110
|
+
"captcha_decryptions_manual": self._get_stat("captcha_decryptions_manual", 0),
|
|
111
|
+
"failed_downloads": self._get_stat("failed_downloads", 0),
|
|
112
|
+
"failed_decryptions_automatic": self._get_stat("failed_decryptions_automatic", 0),
|
|
113
|
+
"failed_decryptions_manual": self._get_stat("failed_decryptions_manual", 0)
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
# Calculate totals and rates
|
|
117
|
+
total_captcha_decryptions = stats["captcha_decryptions_automatic"] + stats["captcha_decryptions_manual"]
|
|
118
|
+
total_failed_decryptions = stats["failed_decryptions_automatic"] + stats["failed_decryptions_manual"]
|
|
119
|
+
total_download_attempts = stats["packages_downloaded"] + stats["failed_downloads"]
|
|
120
|
+
total_decryption_attempts = total_captcha_decryptions + total_failed_decryptions
|
|
121
|
+
total_automatic_attempts = stats["captcha_decryptions_automatic"] + stats["failed_decryptions_automatic"]
|
|
122
|
+
total_manual_attempts = stats["captcha_decryptions_manual"] + stats["failed_decryptions_manual"]
|
|
123
|
+
|
|
124
|
+
# Add calculated fields
|
|
125
|
+
stats.update({
|
|
126
|
+
"total_captcha_decryptions": total_captcha_decryptions,
|
|
127
|
+
"total_failed_decryptions": total_failed_decryptions,
|
|
128
|
+
"total_download_attempts": total_download_attempts,
|
|
129
|
+
"total_decryption_attempts": total_decryption_attempts,
|
|
130
|
+
"total_automatic_attempts": total_automatic_attempts,
|
|
131
|
+
"total_manual_attempts": total_manual_attempts,
|
|
132
|
+
"download_success_rate": (
|
|
133
|
+
(stats["packages_downloaded"] / total_download_attempts * 100)
|
|
134
|
+
if total_download_attempts > 0 else 0
|
|
135
|
+
),
|
|
136
|
+
"decryption_success_rate": (
|
|
137
|
+
(total_captcha_decryptions / total_decryption_attempts * 100)
|
|
138
|
+
if total_decryption_attempts > 0 else 0
|
|
139
|
+
),
|
|
140
|
+
"automatic_decryption_success_rate": (
|
|
141
|
+
(stats["captcha_decryptions_automatic"] / total_automatic_attempts * 100)
|
|
142
|
+
if total_automatic_attempts > 0 else 0
|
|
143
|
+
),
|
|
144
|
+
"manual_decryption_success_rate": (
|
|
145
|
+
(stats["captcha_decryptions_manual"] / total_manual_attempts * 100)
|
|
146
|
+
if total_manual_attempts > 0 else 0
|
|
147
|
+
),
|
|
148
|
+
"average_links_per_package": (
|
|
149
|
+
stats["links_processed"] / stats["packages_downloaded"]
|
|
150
|
+
if stats["packages_downloaded"] > 0 else 0
|
|
151
|
+
)
|
|
152
|
+
})
|
|
153
|
+
|
|
154
|
+
return stats
|
quasarr/providers/version.py
CHANGED
|
@@ -4,9 +4,68 @@
|
|
|
4
4
|
|
|
5
5
|
import re
|
|
6
6
|
|
|
7
|
+
import requests
|
|
8
|
+
|
|
7
9
|
|
|
8
10
|
def get_version():
|
|
9
|
-
return "
|
|
11
|
+
return "1.23.0"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_latest_version():
|
|
15
|
+
"""
|
|
16
|
+
Query GitHub API for the latest release of the Quasarr repository.
|
|
17
|
+
Returns the tag name string (e.g. "1.5.0" or "1.4.2a1").
|
|
18
|
+
Raises RuntimeError on HTTP errors.
|
|
19
|
+
"""
|
|
20
|
+
api_url = "https://api.github.com/repos/rix1337/Quasarr/releases/latest"
|
|
21
|
+
resp = requests.get(api_url, headers={"Accept": "application/vnd.github.v3+json"})
|
|
22
|
+
if resp.status_code != 200:
|
|
23
|
+
raise RuntimeError(f"GitHub API error: {resp.status_code} {resp.text}")
|
|
24
|
+
data = resp.json()
|
|
25
|
+
tag = data.get("tag_name") or data.get("name")
|
|
26
|
+
if not tag:
|
|
27
|
+
raise RuntimeError("Could not find tag_name in GitHub response")
|
|
28
|
+
return tag
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _version_key(v):
|
|
32
|
+
"""
|
|
33
|
+
Normalize a version string into a tuple for comparisons.
|
|
34
|
+
E.g. "1.4.2a3" -> (1, 4, 2, 'a', 3), "1.4.2" -> (1, 4, 2, '', 0)
|
|
35
|
+
"""
|
|
36
|
+
m = re.match(r"^([0-9]+(?:\.[0-9]+)*)([a-z]?)([0-9]*)$", v)
|
|
37
|
+
if not m:
|
|
38
|
+
clean = re.sub(r"[^\d.]", "", v)
|
|
39
|
+
parts = clean.split(".")
|
|
40
|
+
nums = tuple(int(x) for x in parts if x.isdigit())
|
|
41
|
+
return nums + ("", 0)
|
|
42
|
+
base, alpha, num = m.groups()
|
|
43
|
+
nums = tuple(int(x) for x in base.split("."))
|
|
44
|
+
suffix_num = int(num) if num.isdigit() else 0
|
|
45
|
+
return nums + (alpha or "", suffix_num)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def is_newer(latest, current):
|
|
49
|
+
"""
|
|
50
|
+
Return True if latest > current using semantic+alpha comparison.
|
|
51
|
+
"""
|
|
52
|
+
return _version_key(latest) > _version_key(current)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def newer_version_available():
|
|
56
|
+
"""
|
|
57
|
+
Check local vs. GitHub latest version.
|
|
58
|
+
Returns the latest version string if a newer release is available,
|
|
59
|
+
otherwise returns None.
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
current = get_version()
|
|
63
|
+
latest = get_latest_version()
|
|
64
|
+
except:
|
|
65
|
+
raise
|
|
66
|
+
if is_newer(latest, current):
|
|
67
|
+
return latest
|
|
68
|
+
return None
|
|
10
69
|
|
|
11
70
|
|
|
12
71
|
def create_version_file():
|
quasarr/providers/web_server.py
CHANGED
|
@@ -24,7 +24,7 @@ class Server:
|
|
|
24
24
|
self.listen = listen
|
|
25
25
|
self.port = port
|
|
26
26
|
self.server = make_server(self.listen, self.port, self.wsgi_app,
|
|
27
|
-
ThreadingWSGIServer)
|
|
27
|
+
ThreadingWSGIServer, handler_class=NoLoggingWSGIRequestHandler)
|
|
28
28
|
|
|
29
29
|
def serve_temporarily(self):
|
|
30
30
|
global temp_server_success
|
quasarr/search/__init__.py
CHANGED
|
@@ -2,32 +2,161 @@
|
|
|
2
2
|
# Quasarr
|
|
3
3
|
# Project by https://github.com/rix1337
|
|
4
4
|
|
|
5
|
+
import time
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
|
+
|
|
8
|
+
from quasarr.providers.log import info, debug
|
|
9
|
+
from quasarr.search.sources.al import al_feed, al_search
|
|
10
|
+
from quasarr.search.sources.by import by_feed, by_search
|
|
11
|
+
from quasarr.search.sources.dd import dd_search, dd_feed
|
|
12
|
+
from quasarr.search.sources.dj import dj_search, dj_feed
|
|
13
|
+
from quasarr.search.sources.dl import dl_search, dl_feed
|
|
14
|
+
from quasarr.search.sources.dt import dt_feed, dt_search
|
|
5
15
|
from quasarr.search.sources.dw import dw_feed, dw_search
|
|
6
16
|
from quasarr.search.sources.fx import fx_feed, fx_search
|
|
17
|
+
from quasarr.search.sources.he import he_feed, he_search
|
|
18
|
+
from quasarr.search.sources.mb import mb_feed, mb_search
|
|
19
|
+
from quasarr.search.sources.nk import nk_feed, nk_search
|
|
7
20
|
from quasarr.search.sources.nx import nx_feed, nx_search
|
|
21
|
+
from quasarr.search.sources.sf import sf_feed, sf_search
|
|
22
|
+
from quasarr.search.sources.sj import sj_search, sj_feed
|
|
23
|
+
from quasarr.search.sources.sl import sl_feed, sl_search
|
|
24
|
+
from quasarr.search.sources.wd import wd_feed, wd_search
|
|
25
|
+
from quasarr.search.sources.wx import wx_feed, wx_search
|
|
8
26
|
|
|
9
27
|
|
|
10
|
-
def get_search_results(shared_state, request_from, imdb_id=None):
|
|
28
|
+
def get_search_results(shared_state, request_from, imdb_id="", search_phrase="", mirror=None, season="", episode=""):
|
|
11
29
|
results = []
|
|
12
30
|
|
|
31
|
+
if imdb_id and not imdb_id.startswith('tt'):
|
|
32
|
+
imdb_id = f'tt{imdb_id}'
|
|
33
|
+
|
|
34
|
+
docs_search = "lazylibrarian" in request_from.lower()
|
|
35
|
+
|
|
36
|
+
al = shared_state.values["config"]("Hostnames").get("al")
|
|
37
|
+
by = shared_state.values["config"]("Hostnames").get("by")
|
|
38
|
+
dd = shared_state.values["config"]("Hostnames").get("dd")
|
|
39
|
+
dl = shared_state.values["config"]("Hostnames").get("dl")
|
|
40
|
+
dt = shared_state.values["config"]("Hostnames").get("dt")
|
|
41
|
+
dj = shared_state.values["config"]("Hostnames").get("dj")
|
|
13
42
|
dw = shared_state.values["config"]("Hostnames").get("dw")
|
|
14
43
|
fx = shared_state.values["config"]("Hostnames").get("fx")
|
|
44
|
+
he = shared_state.values["config"]("Hostnames").get("he")
|
|
45
|
+
mb = shared_state.values["config"]("Hostnames").get("mb")
|
|
46
|
+
nk = shared_state.values["config"]("Hostnames").get("nk")
|
|
15
47
|
nx = shared_state.values["config"]("Hostnames").get("nx")
|
|
48
|
+
sf = shared_state.values["config"]("Hostnames").get("sf")
|
|
49
|
+
sj = shared_state.values["config"]("Hostnames").get("sj")
|
|
50
|
+
sl = shared_state.values["config"]("Hostnames").get("sl")
|
|
51
|
+
wd = shared_state.values["config"]("Hostnames").get("wd")
|
|
52
|
+
wx = shared_state.values["config"]("Hostnames").get("wx")
|
|
53
|
+
|
|
54
|
+
start_time = time.time()
|
|
55
|
+
|
|
56
|
+
functions = []
|
|
57
|
+
|
|
58
|
+
# Radarr/Sonarr use imdb_id for searches
|
|
59
|
+
imdb_map = [
|
|
60
|
+
(al, al_search),
|
|
61
|
+
(by, by_search),
|
|
62
|
+
(dd, dd_search),
|
|
63
|
+
(dl, dl_search),
|
|
64
|
+
(dt, dt_search),
|
|
65
|
+
(dj, dj_search),
|
|
66
|
+
(dw, dw_search),
|
|
67
|
+
(fx, fx_search),
|
|
68
|
+
(he, he_search),
|
|
69
|
+
(mb, mb_search),
|
|
70
|
+
(nk, nk_search),
|
|
71
|
+
(nx, nx_search),
|
|
72
|
+
(sf, sf_search),
|
|
73
|
+
(sj, sj_search),
|
|
74
|
+
(sl, sl_search),
|
|
75
|
+
(wd, wd_search),
|
|
76
|
+
(wx, wx_search),
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
# LazyLibrarian uses search_phrase for searches
|
|
80
|
+
phrase_map = [
|
|
81
|
+
(by, by_search),
|
|
82
|
+
(dl, dl_search),
|
|
83
|
+
(dt, dt_search),
|
|
84
|
+
(nx, nx_search),
|
|
85
|
+
(sl, sl_search),
|
|
86
|
+
(wd, wd_search),
|
|
87
|
+
]
|
|
88
|
+
|
|
89
|
+
# Feed searches omit imdb_id and search_phrase
|
|
90
|
+
feed_map = [
|
|
91
|
+
(al, al_feed),
|
|
92
|
+
(by, by_feed),
|
|
93
|
+
(dd, dd_feed),
|
|
94
|
+
(dj, dj_feed),
|
|
95
|
+
(dl, dl_feed),
|
|
96
|
+
(dt, dt_feed),
|
|
97
|
+
(dw, dw_feed),
|
|
98
|
+
(fx, fx_feed),
|
|
99
|
+
(he, he_feed),
|
|
100
|
+
(mb, mb_feed),
|
|
101
|
+
(nk, nk_feed),
|
|
102
|
+
(nx, nx_feed),
|
|
103
|
+
(sf, sf_feed),
|
|
104
|
+
(sj, sj_feed),
|
|
105
|
+
(sl, sl_feed),
|
|
106
|
+
(wd, wd_feed),
|
|
107
|
+
(wx, wx_feed),
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
if imdb_id: # only Radarr/Sonarr are using imdb_id
|
|
111
|
+
args, kwargs = (
|
|
112
|
+
(shared_state, start_time, request_from, imdb_id),
|
|
113
|
+
{'mirror': mirror, 'season': season, 'episode': episode}
|
|
114
|
+
)
|
|
115
|
+
for flag, func in imdb_map:
|
|
116
|
+
if flag:
|
|
117
|
+
functions.append(lambda f=func, a=args, kw=kwargs: f(*a, **kw))
|
|
118
|
+
|
|
119
|
+
elif search_phrase and docs_search: # only LazyLibrarian is allowed to use search_phrase
|
|
120
|
+
args, kwargs = (
|
|
121
|
+
(shared_state, start_time, request_from, search_phrase),
|
|
122
|
+
{'mirror': mirror, 'season': season, 'episode': episode}
|
|
123
|
+
)
|
|
124
|
+
for flag, func in phrase_map:
|
|
125
|
+
if flag:
|
|
126
|
+
functions.append(lambda f=func, a=args, kw=kwargs: f(*a, **kw))
|
|
127
|
+
|
|
128
|
+
elif search_phrase:
|
|
129
|
+
debug(
|
|
130
|
+
f"Search phrase '{search_phrase}' is not supported for {request_from}. Only LazyLibrarian can use search phrases.")
|
|
131
|
+
|
|
132
|
+
else:
|
|
133
|
+
args, kwargs = (
|
|
134
|
+
(shared_state, start_time, request_from),
|
|
135
|
+
{'mirror': mirror}
|
|
136
|
+
)
|
|
137
|
+
for flag, func in feed_map:
|
|
138
|
+
if flag:
|
|
139
|
+
functions.append(lambda f=func, a=args, kw=kwargs: f(*a, **kw))
|
|
16
140
|
|
|
17
141
|
if imdb_id:
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
results.extend(fx_search(shared_state, imdb_id))
|
|
22
|
-
if nx:
|
|
23
|
-
results.extend(nx_search(shared_state, request_from, imdb_id))
|
|
142
|
+
stype = f'IMDb-ID "{imdb_id}"'
|
|
143
|
+
elif search_phrase:
|
|
144
|
+
stype = f'Search-Phrase "{search_phrase}"'
|
|
24
145
|
else:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
146
|
+
stype = "feed search"
|
|
147
|
+
|
|
148
|
+
info(f'Starting {len(functions)} search functions for {stype}... This may take some time.')
|
|
149
|
+
|
|
150
|
+
with ThreadPoolExecutor() as executor:
|
|
151
|
+
futures = [executor.submit(func) for func in functions]
|
|
152
|
+
for future in as_completed(futures):
|
|
153
|
+
try:
|
|
154
|
+
result = future.result()
|
|
155
|
+
results.extend(result)
|
|
156
|
+
except Exception as e:
|
|
157
|
+
info(f"An error occurred: {e}")
|
|
158
|
+
|
|
159
|
+
elapsed_time = time.time() - start_time
|
|
160
|
+
info(f"Providing {len(results)} releases to {request_from} for {stype}. Time taken: {elapsed_time:.2f} seconds")
|
|
161
|
+
|
|
33
162
|
return results
|