qBitrr2 4.10.9__py3-none-any.whl → 5.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qBitrr/arss.py +2165 -889
- qBitrr/auto_update.py +382 -0
- qBitrr/bundled_data.py +3 -2
- qBitrr/config.py +20 -3
- qBitrr/db_lock.py +79 -0
- qBitrr/env_config.py +19 -7
- qBitrr/gen_config.py +287 -26
- qBitrr/logger.py +87 -3
- qBitrr/main.py +453 -101
- qBitrr/search_activity_store.py +88 -0
- qBitrr/static/assets/ArrView.js +2 -0
- qBitrr/static/assets/ArrView.js.map +1 -0
- qBitrr/static/assets/ConfigView.js +4 -0
- qBitrr/static/assets/ConfigView.js.map +1 -0
- qBitrr/static/assets/LogsView.js +230 -0
- qBitrr/static/assets/LogsView.js.map +1 -0
- qBitrr/static/assets/ProcessesView.js +2 -0
- qBitrr/static/assets/ProcessesView.js.map +1 -0
- qBitrr/static/assets/app.css +1 -0
- qBitrr/static/assets/app.js +11 -0
- qBitrr/static/assets/app.js.map +1 -0
- qBitrr/static/assets/build.svg +3 -0
- qBitrr/static/assets/check-mark.svg +5 -0
- qBitrr/static/assets/close.svg +4 -0
- qBitrr/static/assets/download.svg +5 -0
- qBitrr/static/assets/gear.svg +5 -0
- qBitrr/static/assets/lidarr.svg +1 -0
- qBitrr/static/assets/live-streaming.svg +8 -0
- qBitrr/static/assets/log.svg +3 -0
- qBitrr/static/assets/plus.svg +4 -0
- qBitrr/static/assets/process.svg +15 -0
- qBitrr/static/assets/react-select.esm.js +14 -0
- qBitrr/static/assets/react-select.esm.js.map +1 -0
- qBitrr/static/assets/refresh-arrow.svg +3 -0
- qBitrr/static/assets/table.js +23 -0
- qBitrr/static/assets/table.js.map +1 -0
- qBitrr/static/assets/trash.svg +8 -0
- qBitrr/static/assets/up-arrow.svg +3 -0
- qBitrr/static/assets/useInterval.js +2 -0
- qBitrr/static/assets/useInterval.js.map +1 -0
- qBitrr/static/assets/vendor.js +33 -0
- qBitrr/static/assets/vendor.js.map +1 -0
- qBitrr/static/assets/visibility.svg +9 -0
- qBitrr/static/index.html +47 -0
- qBitrr/static/manifest.json +23 -0
- qBitrr/static/sw.js +105 -0
- qBitrr/static/vite.svg +1 -0
- qBitrr/tables.py +44 -0
- qBitrr/utils.py +82 -15
- qBitrr/versioning.py +136 -0
- qBitrr/webui.py +2612 -0
- qbitrr2-5.4.5.dist-info/METADATA +1116 -0
- qbitrr2-5.4.5.dist-info/RECORD +61 -0
- {qBitrr2-4.10.9.dist-info → qbitrr2-5.4.5.dist-info}/WHEEL +1 -1
- qBitrr2-4.10.9.dist-info/METADATA +0 -233
- qBitrr2-4.10.9.dist-info/RECORD +0 -19
- {qBitrr2-4.10.9.dist-info → qbitrr2-5.4.5.dist-info}/entry_points.txt +0 -0
- {qBitrr2-4.10.9.dist-info → qbitrr2-5.4.5.dist-info/licenses}/LICENSE +0 -0
- {qBitrr2-4.10.9.dist-info → qbitrr2-5.4.5.dist-info}/top_level.txt +0 -0
qBitrr/webui.py
ADDED
|
@@ -0,0 +1,2612 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import io
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import secrets
|
|
8
|
+
import threading
|
|
9
|
+
import time
|
|
10
|
+
from collections.abc import Mapping
|
|
11
|
+
from datetime import datetime, timedelta, timezone
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from flask import Flask, jsonify, redirect, request, send_file
|
|
16
|
+
from peewee import fn
|
|
17
|
+
|
|
18
|
+
from qBitrr.arss import FreeSpaceManager, PlaceHolderArr
|
|
19
|
+
from qBitrr.bundled_data import patched_version, tagged_version
|
|
20
|
+
from qBitrr.config import CONFIG, HOME_PATH
|
|
21
|
+
from qBitrr.logger import run_logs
|
|
22
|
+
from qBitrr.search_activity_store import (
|
|
23
|
+
clear_search_activity,
|
|
24
|
+
fetch_search_activities,
|
|
25
|
+
)
|
|
26
|
+
from qBitrr.versioning import fetch_latest_release, fetch_release_by_tag
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _toml_set(doc, dotted_key: str, value: Any):
|
|
30
|
+
keys = dotted_key.split(".")
|
|
31
|
+
cur = doc
|
|
32
|
+
for k in keys[:-1]:
|
|
33
|
+
if k not in cur or not isinstance(cur[k], dict):
|
|
34
|
+
cur[k] = {}
|
|
35
|
+
cur = cur[k]
|
|
36
|
+
cur[keys[-1]] = value
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _toml_delete(doc, dotted_key: str) -> None:
|
|
40
|
+
keys = dotted_key.split(".")
|
|
41
|
+
cur = doc
|
|
42
|
+
parents = []
|
|
43
|
+
for k in keys[:-1]:
|
|
44
|
+
next_cur = cur.get(k)
|
|
45
|
+
if not isinstance(next_cur, dict):
|
|
46
|
+
return
|
|
47
|
+
parents.append((cur, k))
|
|
48
|
+
cur = next_cur
|
|
49
|
+
cur.pop(keys[-1], None)
|
|
50
|
+
for parent, key in reversed(parents):
|
|
51
|
+
node = parent.get(key)
|
|
52
|
+
if isinstance(node, dict) and not node:
|
|
53
|
+
parent.pop(key, None)
|
|
54
|
+
else:
|
|
55
|
+
break
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _toml_to_jsonable(obj: Any) -> Any:
|
|
59
|
+
try:
|
|
60
|
+
if hasattr(obj, "unwrap"):
|
|
61
|
+
return _toml_to_jsonable(obj.unwrap())
|
|
62
|
+
if isinstance(obj, dict):
|
|
63
|
+
return {k: _toml_to_jsonable(v) for k, v in obj.items()}
|
|
64
|
+
if isinstance(obj, list):
|
|
65
|
+
return [_toml_to_jsonable(v) for v in obj]
|
|
66
|
+
return obj
|
|
67
|
+
except Exception:
|
|
68
|
+
return obj
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class WebUI:
|
|
72
|
+
def __init__(self, manager, host: str = "0.0.0.0", port: int = 6969):
|
|
73
|
+
self.manager = manager
|
|
74
|
+
self.host = host
|
|
75
|
+
self.port = port
|
|
76
|
+
self.app = Flask(__name__)
|
|
77
|
+
self.logger = logging.getLogger("qBitrr.WebUI")
|
|
78
|
+
run_logs(self.logger, "WebUI")
|
|
79
|
+
self.logger.info("Initialising WebUI on %s:%s", self.host, self.port)
|
|
80
|
+
if self.host in {"0.0.0.0", "::"}:
|
|
81
|
+
self.logger.warning(
|
|
82
|
+
"WebUI configured to listen on %s. Expose this only behind a trusted reverse proxy.",
|
|
83
|
+
self.host,
|
|
84
|
+
)
|
|
85
|
+
self.app.logger.handlers.clear()
|
|
86
|
+
self.app.logger.propagate = True
|
|
87
|
+
self.app.logger.setLevel(self.logger.level)
|
|
88
|
+
werkzeug_logger = logging.getLogger("werkzeug")
|
|
89
|
+
werkzeug_logger.handlers.clear()
|
|
90
|
+
werkzeug_logger.propagate = True
|
|
91
|
+
werkzeug_logger.setLevel(self.logger.level)
|
|
92
|
+
|
|
93
|
+
# Add cache control for static files to support config reload
|
|
94
|
+
@self.app.after_request
|
|
95
|
+
def add_cache_headers(response):
|
|
96
|
+
# Prevent caching of index.html and service worker to ensure fresh config loads
|
|
97
|
+
if request.path in ("/static/index.html", "/ui", "/static/sw.js", "/sw.js"):
|
|
98
|
+
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
99
|
+
response.headers["Pragma"] = "no-cache"
|
|
100
|
+
response.headers["Expires"] = "0"
|
|
101
|
+
return response
|
|
102
|
+
|
|
103
|
+
# Security token (optional) - auto-generate and persist if empty
|
|
104
|
+
self.token = CONFIG.get("WebUI.Token", fallback=None)
|
|
105
|
+
if not self.token:
|
|
106
|
+
self.token = secrets.token_hex(32)
|
|
107
|
+
try:
|
|
108
|
+
_toml_set(CONFIG.config, "WebUI.Token", self.token)
|
|
109
|
+
CONFIG.save()
|
|
110
|
+
except Exception:
|
|
111
|
+
pass
|
|
112
|
+
else:
|
|
113
|
+
self.logger.notice("Generated new WebUI token")
|
|
114
|
+
self._github_repo = "Feramance/qBitrr"
|
|
115
|
+
self._version_lock = threading.Lock()
|
|
116
|
+
self._version_cache = {
|
|
117
|
+
"current_version": patched_version,
|
|
118
|
+
"latest_version": None,
|
|
119
|
+
"changelog": "", # Latest version changelog
|
|
120
|
+
"current_version_changelog": "", # Current version changelog
|
|
121
|
+
"changelog_url": f"https://github.com/{self._github_repo}/releases",
|
|
122
|
+
"repository_url": f"https://github.com/{self._github_repo}",
|
|
123
|
+
"homepage_url": f"https://github.com/{self._github_repo}",
|
|
124
|
+
"update_available": False,
|
|
125
|
+
"last_checked": None,
|
|
126
|
+
"error": None,
|
|
127
|
+
"installation_type": "unknown",
|
|
128
|
+
"binary_download_url": None,
|
|
129
|
+
"binary_download_name": None,
|
|
130
|
+
"binary_download_size": None,
|
|
131
|
+
"binary_download_error": None,
|
|
132
|
+
}
|
|
133
|
+
self._version_cache_expiry = datetime.utcnow() - timedelta(seconds=1)
|
|
134
|
+
self._update_state = {
|
|
135
|
+
"in_progress": False,
|
|
136
|
+
"last_result": None,
|
|
137
|
+
"last_error": None,
|
|
138
|
+
"completed_at": None,
|
|
139
|
+
}
|
|
140
|
+
self._update_thread: threading.Thread | None = None
|
|
141
|
+
self._rebuilding_arrs = False
|
|
142
|
+
self._register_routes()
|
|
143
|
+
static_root = Path(__file__).with_name("static")
|
|
144
|
+
if not (static_root / "index.html").exists():
|
|
145
|
+
self.logger.warning(
|
|
146
|
+
"WebUI static bundle is missing. Install npm and run "
|
|
147
|
+
"'npm ci && npm run build' inside the 'webui' folder before packaging."
|
|
148
|
+
)
|
|
149
|
+
self._thread: threading.Thread | None = None
|
|
150
|
+
self._use_dev_server: bool | None = None
|
|
151
|
+
|
|
152
|
+
# Shutdown control for graceful restart
|
|
153
|
+
self._shutdown_event = threading.Event()
|
|
154
|
+
self._restart_requested = False
|
|
155
|
+
self._server = None # Will hold Waitress server reference
|
|
156
|
+
|
|
157
|
+
def _fetch_version_info(self) -> dict[str, Any]:
|
|
158
|
+
info = fetch_latest_release(self._github_repo)
|
|
159
|
+
if info.get("error"):
|
|
160
|
+
self.logger.debug("Failed to fetch latest release information: %s", info["error"])
|
|
161
|
+
return {"error": info["error"]}
|
|
162
|
+
latest_display = info.get("raw_tag") or info.get("normalized")
|
|
163
|
+
return {
|
|
164
|
+
"latest_version": latest_display,
|
|
165
|
+
"update_available": bool(info.get("update_available")),
|
|
166
|
+
"changelog": info.get("changelog") or "",
|
|
167
|
+
"changelog_url": info.get("changelog_url"),
|
|
168
|
+
"error": None,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
def _fetch_current_version_changelog(self) -> dict[str, Any]:
|
|
172
|
+
"""Fetch changelog for the current running version."""
|
|
173
|
+
current_ver = tagged_version
|
|
174
|
+
if not current_ver:
|
|
175
|
+
return {
|
|
176
|
+
"changelog": "",
|
|
177
|
+
"changelog_url": f"https://github.com/{self._github_repo}/releases",
|
|
178
|
+
"error": "No current version",
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
info = fetch_release_by_tag(current_ver, self._github_repo)
|
|
182
|
+
if info.get("error"):
|
|
183
|
+
self.logger.debug("Failed to fetch current version changelog: %s", info["error"])
|
|
184
|
+
# Fallback to generic releases page
|
|
185
|
+
return {
|
|
186
|
+
"changelog": "",
|
|
187
|
+
"changelog_url": f"https://github.com/{self._github_repo}/releases",
|
|
188
|
+
"error": info["error"],
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
"changelog": info.get("changelog") or "",
|
|
193
|
+
"changelog_url": info.get("changelog_url")
|
|
194
|
+
or f"https://github.com/{self._github_repo}/releases/tag/v{current_ver}",
|
|
195
|
+
"error": None,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
def _ensure_version_info(self, force: bool = False) -> dict[str, Any]:
|
|
199
|
+
now = datetime.utcnow()
|
|
200
|
+
with self._version_lock:
|
|
201
|
+
if not force and now < self._version_cache_expiry:
|
|
202
|
+
snapshot = dict(self._version_cache)
|
|
203
|
+
snapshot["update_state"] = dict(self._update_state)
|
|
204
|
+
return snapshot
|
|
205
|
+
# optimistic expiry to avoid concurrent fetches
|
|
206
|
+
self._version_cache_expiry = now + timedelta(minutes=5)
|
|
207
|
+
|
|
208
|
+
latest_info = self._fetch_version_info()
|
|
209
|
+
current_ver_info = self._fetch_current_version_changelog()
|
|
210
|
+
|
|
211
|
+
with self._version_lock:
|
|
212
|
+
if latest_info:
|
|
213
|
+
if latest_info.get("latest_version") is not None:
|
|
214
|
+
self._version_cache["latest_version"] = latest_info["latest_version"]
|
|
215
|
+
if latest_info.get("changelog") is not None:
|
|
216
|
+
self._version_cache["changelog"] = latest_info.get("changelog") or ""
|
|
217
|
+
if latest_info.get("changelog_url"):
|
|
218
|
+
self._version_cache["changelog_url"] = latest_info["changelog_url"]
|
|
219
|
+
if "update_available" in latest_info:
|
|
220
|
+
self._version_cache["update_available"] = bool(latest_info["update_available"])
|
|
221
|
+
if "error" in latest_info:
|
|
222
|
+
self._version_cache["error"] = latest_info["error"]
|
|
223
|
+
# Store current version changelog
|
|
224
|
+
if current_ver_info and not current_ver_info.get("error"):
|
|
225
|
+
self._version_cache["current_version_changelog"] = (
|
|
226
|
+
current_ver_info.get("changelog") or ""
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
self._version_cache["current_version"] = patched_version
|
|
230
|
+
self._version_cache["last_checked"] = now.isoformat()
|
|
231
|
+
|
|
232
|
+
# Add installation type and binary download info
|
|
233
|
+
from qBitrr.auto_update import get_binary_download_url, get_installation_type
|
|
234
|
+
|
|
235
|
+
install_type = get_installation_type()
|
|
236
|
+
self._version_cache["installation_type"] = install_type
|
|
237
|
+
|
|
238
|
+
# If binary and update available, get download URL
|
|
239
|
+
if install_type == "binary" and self._version_cache.get("update_available"):
|
|
240
|
+
latest_version = self._version_cache.get("latest_version")
|
|
241
|
+
if latest_version:
|
|
242
|
+
binary_info = get_binary_download_url(latest_version, self.logger)
|
|
243
|
+
self._version_cache["binary_download_url"] = binary_info.get("url")
|
|
244
|
+
self._version_cache["binary_download_name"] = binary_info.get("name")
|
|
245
|
+
self._version_cache["binary_download_size"] = binary_info.get("size")
|
|
246
|
+
if binary_info.get("error"):
|
|
247
|
+
self._version_cache["binary_download_error"] = binary_info["error"]
|
|
248
|
+
|
|
249
|
+
# Extend cache validity if fetch succeeded; otherwise allow quick retry.
|
|
250
|
+
if not latest_info or latest_info.get("error"):
|
|
251
|
+
self._version_cache_expiry = now + timedelta(minutes=5)
|
|
252
|
+
else:
|
|
253
|
+
self._version_cache_expiry = now + timedelta(hours=1)
|
|
254
|
+
snapshot = dict(self._version_cache)
|
|
255
|
+
snapshot["update_state"] = dict(self._update_state)
|
|
256
|
+
return snapshot
|
|
257
|
+
|
|
258
|
+
def _trigger_manual_update(self) -> tuple[bool, str]:
|
|
259
|
+
with self._version_lock:
|
|
260
|
+
if self._update_state["in_progress"]:
|
|
261
|
+
return False, "An update is already in progress."
|
|
262
|
+
update_thread = threading.Thread(
|
|
263
|
+
target=self._run_manual_update, name="ManualUpdater", daemon=True
|
|
264
|
+
)
|
|
265
|
+
self._update_state["in_progress"] = True
|
|
266
|
+
self._update_state["last_error"] = None
|
|
267
|
+
self._update_state["last_result"] = None
|
|
268
|
+
self._update_thread = update_thread
|
|
269
|
+
update_thread.start()
|
|
270
|
+
return True, "started"
|
|
271
|
+
|
|
272
|
+
def _run_manual_update(self) -> None:
|
|
273
|
+
result = "success"
|
|
274
|
+
error_message: str | None = None
|
|
275
|
+
try:
|
|
276
|
+
self.logger.notice("Manual update triggered from WebUI")
|
|
277
|
+
try:
|
|
278
|
+
self.manager._perform_auto_update()
|
|
279
|
+
except AttributeError:
|
|
280
|
+
from qBitrr.auto_update import perform_self_update
|
|
281
|
+
|
|
282
|
+
if not perform_self_update(self.manager.logger):
|
|
283
|
+
raise RuntimeError("pip upgrade did not complete successfully")
|
|
284
|
+
try:
|
|
285
|
+
self.manager.request_restart()
|
|
286
|
+
except Exception:
|
|
287
|
+
self.logger.warning(
|
|
288
|
+
"Update applied but restart request failed; exiting manually."
|
|
289
|
+
)
|
|
290
|
+
except Exception as exc:
|
|
291
|
+
result = "error"
|
|
292
|
+
error_message = str(exc)
|
|
293
|
+
self.logger.exception("Manual update failed")
|
|
294
|
+
finally:
|
|
295
|
+
completed_at = datetime.utcnow().isoformat()
|
|
296
|
+
with self._version_lock:
|
|
297
|
+
self._update_state.update(
|
|
298
|
+
{
|
|
299
|
+
"in_progress": False,
|
|
300
|
+
"last_result": result,
|
|
301
|
+
"last_error": error_message,
|
|
302
|
+
"completed_at": completed_at,
|
|
303
|
+
}
|
|
304
|
+
)
|
|
305
|
+
self._update_thread = None
|
|
306
|
+
self._version_cache_expiry = datetime.utcnow() - timedelta(seconds=1)
|
|
307
|
+
try:
|
|
308
|
+
self.manager.configure_auto_update()
|
|
309
|
+
except Exception:
|
|
310
|
+
self.logger.exception("Failed to reconfigure auto update after manual update")
|
|
311
|
+
try:
|
|
312
|
+
self._ensure_version_info(force=True)
|
|
313
|
+
except Exception:
|
|
314
|
+
self.logger.debug("Version metadata refresh after update failed", exc_info=True)
|
|
315
|
+
|
|
316
|
+
@staticmethod
|
|
317
|
+
def _safe_str(value: Any) -> str:
|
|
318
|
+
if value is None:
|
|
319
|
+
return ""
|
|
320
|
+
return str(value)
|
|
321
|
+
|
|
322
|
+
def _ensure_arr_db(self, arr) -> bool:
|
|
323
|
+
if not getattr(arr, "search_setup_completed", False):
|
|
324
|
+
try:
|
|
325
|
+
arr.register_search_mode()
|
|
326
|
+
except Exception:
|
|
327
|
+
return False
|
|
328
|
+
if not getattr(arr, "search_setup_completed", False):
|
|
329
|
+
return False
|
|
330
|
+
if not getattr(arr, "_webui_db_loaded", False):
|
|
331
|
+
try:
|
|
332
|
+
arr.db_update()
|
|
333
|
+
arr._webui_db_loaded = True
|
|
334
|
+
except Exception:
|
|
335
|
+
arr._webui_db_loaded = False
|
|
336
|
+
return True
|
|
337
|
+
|
|
338
|
+
@staticmethod
|
|
339
|
+
def _safe_bool(value: Any) -> bool:
|
|
340
|
+
return bool(value) and str(value).lower() not in {"0", "false", "none"}
|
|
341
|
+
|
|
342
|
+
def _radarr_movies_from_db(
|
|
343
|
+
self,
|
|
344
|
+
arr,
|
|
345
|
+
search: str | None,
|
|
346
|
+
page: int,
|
|
347
|
+
page_size: int,
|
|
348
|
+
year_min: int | None = None,
|
|
349
|
+
year_max: int | None = None,
|
|
350
|
+
monitored: bool | None = None,
|
|
351
|
+
has_file: bool | None = None,
|
|
352
|
+
quality_met: bool | None = None,
|
|
353
|
+
is_request: bool | None = None,
|
|
354
|
+
) -> dict[str, Any]:
|
|
355
|
+
if not self._ensure_arr_db(arr):
|
|
356
|
+
return {
|
|
357
|
+
"counts": {
|
|
358
|
+
"available": 0,
|
|
359
|
+
"monitored": 0,
|
|
360
|
+
"missing": 0,
|
|
361
|
+
"quality_met": 0,
|
|
362
|
+
"requests": 0,
|
|
363
|
+
},
|
|
364
|
+
"total": 0,
|
|
365
|
+
"page": max(page, 0),
|
|
366
|
+
"page_size": max(page_size, 1),
|
|
367
|
+
"movies": [],
|
|
368
|
+
}
|
|
369
|
+
model = getattr(arr, "model_file", None)
|
|
370
|
+
db = getattr(arr, "db", None)
|
|
371
|
+
if model is None or db is None:
|
|
372
|
+
return {
|
|
373
|
+
"counts": {
|
|
374
|
+
"available": 0,
|
|
375
|
+
"monitored": 0,
|
|
376
|
+
"missing": 0,
|
|
377
|
+
"quality_met": 0,
|
|
378
|
+
"requests": 0,
|
|
379
|
+
},
|
|
380
|
+
"total": 0,
|
|
381
|
+
"page": max(page, 0),
|
|
382
|
+
"page_size": max(page_size, 1),
|
|
383
|
+
"movies": [],
|
|
384
|
+
}
|
|
385
|
+
page = max(page, 0)
|
|
386
|
+
page_size = max(page_size, 1)
|
|
387
|
+
with db.connection_context():
|
|
388
|
+
base_query = model.select()
|
|
389
|
+
|
|
390
|
+
# Calculate counts
|
|
391
|
+
monitored_count = (
|
|
392
|
+
model.select(fn.COUNT(model.EntryId))
|
|
393
|
+
.where(model.Monitored == True) # noqa: E712
|
|
394
|
+
.scalar()
|
|
395
|
+
or 0
|
|
396
|
+
)
|
|
397
|
+
available_count = (
|
|
398
|
+
model.select(fn.COUNT(model.EntryId))
|
|
399
|
+
.where(
|
|
400
|
+
(model.Monitored == True) # noqa: E712
|
|
401
|
+
& (model.MovieFileId.is_null(False))
|
|
402
|
+
& (model.MovieFileId != 0)
|
|
403
|
+
)
|
|
404
|
+
.scalar()
|
|
405
|
+
or 0
|
|
406
|
+
)
|
|
407
|
+
missing_count = max(monitored_count - available_count, 0)
|
|
408
|
+
quality_met_count = (
|
|
409
|
+
model.select(fn.COUNT(model.EntryId))
|
|
410
|
+
.where(model.QualityMet == True) # noqa: E712
|
|
411
|
+
.scalar()
|
|
412
|
+
or 0
|
|
413
|
+
)
|
|
414
|
+
request_count = (
|
|
415
|
+
model.select(fn.COUNT(model.EntryId))
|
|
416
|
+
.where(model.IsRequest == True) # noqa: E712
|
|
417
|
+
.scalar()
|
|
418
|
+
or 0
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
# Build filtered query
|
|
422
|
+
query = base_query
|
|
423
|
+
if search:
|
|
424
|
+
query = query.where(model.Title.contains(search))
|
|
425
|
+
if year_min is not None:
|
|
426
|
+
query = query.where(model.Year >= year_min)
|
|
427
|
+
if year_max is not None:
|
|
428
|
+
query = query.where(model.Year <= year_max)
|
|
429
|
+
if monitored is not None:
|
|
430
|
+
query = query.where(model.Monitored == monitored)
|
|
431
|
+
if has_file is not None:
|
|
432
|
+
if has_file:
|
|
433
|
+
query = query.where(
|
|
434
|
+
(model.MovieFileId.is_null(False)) & (model.MovieFileId != 0)
|
|
435
|
+
)
|
|
436
|
+
else:
|
|
437
|
+
query = query.where(
|
|
438
|
+
(model.MovieFileId.is_null(True)) | (model.MovieFileId == 0)
|
|
439
|
+
)
|
|
440
|
+
if quality_met is not None:
|
|
441
|
+
query = query.where(model.QualityMet == quality_met)
|
|
442
|
+
if is_request is not None:
|
|
443
|
+
query = query.where(model.IsRequest == is_request)
|
|
444
|
+
|
|
445
|
+
total = query.count()
|
|
446
|
+
page_items = query.order_by(model.Title.asc()).paginate(page + 1, page_size).iterator()
|
|
447
|
+
movies = []
|
|
448
|
+
for movie in page_items:
|
|
449
|
+
movies.append(
|
|
450
|
+
{
|
|
451
|
+
"id": movie.EntryId,
|
|
452
|
+
"title": movie.Title or "",
|
|
453
|
+
"year": movie.Year,
|
|
454
|
+
"monitored": self._safe_bool(movie.Monitored),
|
|
455
|
+
"hasFile": self._safe_bool(movie.MovieFileId),
|
|
456
|
+
"qualityMet": self._safe_bool(movie.QualityMet),
|
|
457
|
+
"isRequest": self._safe_bool(movie.IsRequest),
|
|
458
|
+
"upgrade": self._safe_bool(movie.Upgrade),
|
|
459
|
+
"customFormatScore": movie.CustomFormatScore,
|
|
460
|
+
"minCustomFormatScore": movie.MinCustomFormatScore,
|
|
461
|
+
"customFormatMet": self._safe_bool(movie.CustomFormatMet),
|
|
462
|
+
"reason": movie.Reason,
|
|
463
|
+
}
|
|
464
|
+
)
|
|
465
|
+
return {
|
|
466
|
+
"counts": {
|
|
467
|
+
"available": available_count,
|
|
468
|
+
"monitored": monitored_count,
|
|
469
|
+
"missing": missing_count,
|
|
470
|
+
"quality_met": quality_met_count,
|
|
471
|
+
"requests": request_count,
|
|
472
|
+
},
|
|
473
|
+
"total": total,
|
|
474
|
+
"page": page,
|
|
475
|
+
"page_size": page_size,
|
|
476
|
+
"movies": movies,
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
def _lidarr_albums_from_db(
|
|
480
|
+
self,
|
|
481
|
+
arr,
|
|
482
|
+
search: str | None,
|
|
483
|
+
page: int,
|
|
484
|
+
page_size: int,
|
|
485
|
+
monitored: bool | None = None,
|
|
486
|
+
has_file: bool | None = None,
|
|
487
|
+
quality_met: bool | None = None,
|
|
488
|
+
is_request: bool | None = None,
|
|
489
|
+
) -> dict[str, Any]:
|
|
490
|
+
if not self._ensure_arr_db(arr):
|
|
491
|
+
return {
|
|
492
|
+
"counts": {
|
|
493
|
+
"available": 0,
|
|
494
|
+
"monitored": 0,
|
|
495
|
+
"missing": 0,
|
|
496
|
+
"quality_met": 0,
|
|
497
|
+
"requests": 0,
|
|
498
|
+
},
|
|
499
|
+
"total": 0,
|
|
500
|
+
"page": max(page, 0),
|
|
501
|
+
"page_size": max(page_size, 1),
|
|
502
|
+
"albums": [],
|
|
503
|
+
}
|
|
504
|
+
model = getattr(arr, "model_file", None)
|
|
505
|
+
db = getattr(arr, "db", None)
|
|
506
|
+
if model is None or db is None:
|
|
507
|
+
return {
|
|
508
|
+
"counts": {
|
|
509
|
+
"available": 0,
|
|
510
|
+
"monitored": 0,
|
|
511
|
+
"missing": 0,
|
|
512
|
+
"quality_met": 0,
|
|
513
|
+
"requests": 0,
|
|
514
|
+
},
|
|
515
|
+
"total": 0,
|
|
516
|
+
"page": max(page, 0),
|
|
517
|
+
"page_size": max(page_size, 1),
|
|
518
|
+
"albums": [],
|
|
519
|
+
}
|
|
520
|
+
page = max(page, 0)
|
|
521
|
+
page_size = max(page_size, 1)
|
|
522
|
+
with db.connection_context():
|
|
523
|
+
base_query = model.select()
|
|
524
|
+
|
|
525
|
+
# Calculate counts
|
|
526
|
+
monitored_count = (
|
|
527
|
+
model.select(fn.COUNT(model.EntryId))
|
|
528
|
+
.where(model.Monitored == True) # noqa: E712
|
|
529
|
+
.scalar()
|
|
530
|
+
or 0
|
|
531
|
+
)
|
|
532
|
+
available_count = (
|
|
533
|
+
model.select(fn.COUNT(model.EntryId))
|
|
534
|
+
.where(
|
|
535
|
+
(model.Monitored == True) # noqa: E712
|
|
536
|
+
& (model.AlbumFileId.is_null(False))
|
|
537
|
+
& (model.AlbumFileId != 0)
|
|
538
|
+
)
|
|
539
|
+
.scalar()
|
|
540
|
+
or 0
|
|
541
|
+
)
|
|
542
|
+
missing_count = max(monitored_count - available_count, 0)
|
|
543
|
+
quality_met_count = (
|
|
544
|
+
model.select(fn.COUNT(model.EntryId))
|
|
545
|
+
.where(model.QualityMet == True) # noqa: E712
|
|
546
|
+
.scalar()
|
|
547
|
+
or 0
|
|
548
|
+
)
|
|
549
|
+
request_count = (
|
|
550
|
+
model.select(fn.COUNT(model.EntryId))
|
|
551
|
+
.where(model.IsRequest == True) # noqa: E712
|
|
552
|
+
.scalar()
|
|
553
|
+
or 0
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
# Build filtered query
|
|
557
|
+
query = base_query
|
|
558
|
+
if search:
|
|
559
|
+
query = query.where(model.Title.contains(search))
|
|
560
|
+
if monitored is not None:
|
|
561
|
+
query = query.where(model.Monitored == monitored)
|
|
562
|
+
if has_file is not None:
|
|
563
|
+
if has_file:
|
|
564
|
+
query = query.where(
|
|
565
|
+
(model.AlbumFileId.is_null(False)) & (model.AlbumFileId != 0)
|
|
566
|
+
)
|
|
567
|
+
else:
|
|
568
|
+
query = query.where(
|
|
569
|
+
(model.AlbumFileId.is_null(True)) | (model.AlbumFileId == 0)
|
|
570
|
+
)
|
|
571
|
+
if quality_met is not None:
|
|
572
|
+
query = query.where(model.QualityMet == quality_met)
|
|
573
|
+
if is_request is not None:
|
|
574
|
+
query = query.where(model.IsRequest == is_request)
|
|
575
|
+
|
|
576
|
+
total = query.count()
|
|
577
|
+
query = query.order_by(model.Title).paginate(page + 1, page_size)
|
|
578
|
+
albums = []
|
|
579
|
+
for album in query:
|
|
580
|
+
# Always fetch tracks from database (Lidarr only)
|
|
581
|
+
track_model = getattr(arr, "track_file_model", None)
|
|
582
|
+
tracks_list = []
|
|
583
|
+
track_monitored_count = 0
|
|
584
|
+
track_available_count = 0
|
|
585
|
+
|
|
586
|
+
if track_model:
|
|
587
|
+
try:
|
|
588
|
+
# Query tracks from database for this album
|
|
589
|
+
track_query = (
|
|
590
|
+
track_model.select()
|
|
591
|
+
.where(track_model.AlbumId == album.EntryId)
|
|
592
|
+
.order_by(track_model.TrackNumber)
|
|
593
|
+
)
|
|
594
|
+
track_count = track_query.count()
|
|
595
|
+
self.logger.debug(
|
|
596
|
+
f"Album {album.EntryId} ({album.Title}): Found {track_count} tracks in database"
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
for track in track_query:
|
|
600
|
+
is_monitored = self._safe_bool(track.Monitored)
|
|
601
|
+
has_file = self._safe_bool(track.HasFile)
|
|
602
|
+
|
|
603
|
+
if is_monitored:
|
|
604
|
+
track_monitored_count += 1
|
|
605
|
+
if has_file:
|
|
606
|
+
track_available_count += 1
|
|
607
|
+
|
|
608
|
+
tracks_list.append(
|
|
609
|
+
{
|
|
610
|
+
"id": track.EntryId,
|
|
611
|
+
"trackNumber": track.TrackNumber,
|
|
612
|
+
"title": track.Title,
|
|
613
|
+
"duration": track.Duration,
|
|
614
|
+
"hasFile": has_file,
|
|
615
|
+
"trackFileId": track.TrackFileId,
|
|
616
|
+
"monitored": is_monitored,
|
|
617
|
+
}
|
|
618
|
+
)
|
|
619
|
+
except Exception as e:
|
|
620
|
+
self.logger.warning(
|
|
621
|
+
f"Failed to fetch tracks for album {album.EntryId} ({album.Title}): {e}"
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
track_missing_count = max(track_monitored_count - track_available_count, 0)
|
|
625
|
+
|
|
626
|
+
# Build album data in Sonarr-like structure
|
|
627
|
+
album_item = {
|
|
628
|
+
"album": {
|
|
629
|
+
"id": album.EntryId,
|
|
630
|
+
"title": album.Title,
|
|
631
|
+
"artistId": album.ArtistId,
|
|
632
|
+
"artistName": album.ArtistTitle,
|
|
633
|
+
"monitored": self._safe_bool(album.Monitored),
|
|
634
|
+
"hasFile": bool(album.AlbumFileId and album.AlbumFileId != 0),
|
|
635
|
+
"foreignAlbumId": album.ForeignAlbumId,
|
|
636
|
+
"releaseDate": (
|
|
637
|
+
album.ReleaseDate.isoformat()
|
|
638
|
+
if album.ReleaseDate and hasattr(album.ReleaseDate, "isoformat")
|
|
639
|
+
else album.ReleaseDate if isinstance(album.ReleaseDate, str) else None
|
|
640
|
+
),
|
|
641
|
+
"qualityMet": self._safe_bool(album.QualityMet),
|
|
642
|
+
"isRequest": self._safe_bool(album.IsRequest),
|
|
643
|
+
"upgrade": self._safe_bool(album.Upgrade),
|
|
644
|
+
"customFormatScore": album.CustomFormatScore,
|
|
645
|
+
"minCustomFormatScore": album.MinCustomFormatScore,
|
|
646
|
+
"customFormatMet": self._safe_bool(album.CustomFormatMet),
|
|
647
|
+
"reason": album.Reason,
|
|
648
|
+
},
|
|
649
|
+
"totals": {
|
|
650
|
+
"available": track_available_count,
|
|
651
|
+
"monitored": track_monitored_count,
|
|
652
|
+
"missing": track_missing_count,
|
|
653
|
+
},
|
|
654
|
+
"tracks": tracks_list,
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
albums.append(album_item)
|
|
658
|
+
return {
|
|
659
|
+
"counts": {
|
|
660
|
+
"available": available_count,
|
|
661
|
+
"monitored": monitored_count,
|
|
662
|
+
"missing": missing_count,
|
|
663
|
+
"quality_met": quality_met_count,
|
|
664
|
+
"requests": request_count,
|
|
665
|
+
},
|
|
666
|
+
"total": total,
|
|
667
|
+
"page": page,
|
|
668
|
+
"page_size": page_size,
|
|
669
|
+
"albums": albums,
|
|
670
|
+
}
|
|
671
|
+
|
|
672
|
+
def _lidarr_tracks_from_db(
|
|
673
|
+
self,
|
|
674
|
+
arr,
|
|
675
|
+
search: str | None,
|
|
676
|
+
page: int,
|
|
677
|
+
page_size: int,
|
|
678
|
+
monitored: bool | None = None,
|
|
679
|
+
has_file: bool | None = None,
|
|
680
|
+
) -> dict[str, Any]:
|
|
681
|
+
if not self._ensure_arr_db(arr):
|
|
682
|
+
return {
|
|
683
|
+
"counts": {
|
|
684
|
+
"available": 0,
|
|
685
|
+
"monitored": 0,
|
|
686
|
+
"missing": 0,
|
|
687
|
+
},
|
|
688
|
+
"total": 0,
|
|
689
|
+
"page": page,
|
|
690
|
+
"page_size": page_size,
|
|
691
|
+
"tracks": [],
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
track_model = getattr(arr, "track_file_model", None)
|
|
695
|
+
album_model = getattr(arr, "model_file", None)
|
|
696
|
+
|
|
697
|
+
if not track_model or not album_model:
|
|
698
|
+
return {
|
|
699
|
+
"counts": {
|
|
700
|
+
"available": 0,
|
|
701
|
+
"monitored": 0,
|
|
702
|
+
"missing": 0,
|
|
703
|
+
},
|
|
704
|
+
"total": 0,
|
|
705
|
+
"page": page,
|
|
706
|
+
"page_size": page_size,
|
|
707
|
+
"tracks": [],
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
try:
|
|
711
|
+
# Join tracks with albums to get artist/album info
|
|
712
|
+
query = (
|
|
713
|
+
track_model.select(
|
|
714
|
+
track_model,
|
|
715
|
+
album_model.Title.alias("AlbumTitle"),
|
|
716
|
+
album_model.ArtistTitle,
|
|
717
|
+
album_model.ArtistId,
|
|
718
|
+
)
|
|
719
|
+
.join(album_model, on=(track_model.AlbumId == album_model.EntryId))
|
|
720
|
+
.where(True)
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
# Apply filters
|
|
724
|
+
if monitored is not None:
|
|
725
|
+
query = query.where(track_model.Monitored == monitored)
|
|
726
|
+
if has_file is not None:
|
|
727
|
+
query = query.where(track_model.HasFile == has_file)
|
|
728
|
+
if search:
|
|
729
|
+
query = query.where(
|
|
730
|
+
(track_model.Title.contains(search))
|
|
731
|
+
| (album_model.Title.contains(search))
|
|
732
|
+
| (album_model.ArtistTitle.contains(search))
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
# Get counts
|
|
736
|
+
available_count = (
|
|
737
|
+
track_model.select()
|
|
738
|
+
.join(album_model, on=(track_model.AlbumId == album_model.EntryId))
|
|
739
|
+
.where(track_model.HasFile == True)
|
|
740
|
+
.count()
|
|
741
|
+
)
|
|
742
|
+
monitored_count = (
|
|
743
|
+
track_model.select()
|
|
744
|
+
.join(album_model, on=(track_model.AlbumId == album_model.EntryId))
|
|
745
|
+
.where(track_model.Monitored == True)
|
|
746
|
+
.count()
|
|
747
|
+
)
|
|
748
|
+
missing_count = (
|
|
749
|
+
track_model.select()
|
|
750
|
+
.join(album_model, on=(track_model.AlbumId == album_model.EntryId))
|
|
751
|
+
.where(track_model.HasFile == False)
|
|
752
|
+
.count()
|
|
753
|
+
)
|
|
754
|
+
|
|
755
|
+
total = query.count()
|
|
756
|
+
|
|
757
|
+
# Apply pagination
|
|
758
|
+
query = query.order_by(
|
|
759
|
+
album_model.ArtistTitle, album_model.Title, track_model.TrackNumber
|
|
760
|
+
).paginate(page + 1, page_size)
|
|
761
|
+
|
|
762
|
+
tracks = []
|
|
763
|
+
for track in query:
|
|
764
|
+
tracks.append(
|
|
765
|
+
{
|
|
766
|
+
"id": track.EntryId,
|
|
767
|
+
"trackNumber": track.TrackNumber,
|
|
768
|
+
"title": track.Title,
|
|
769
|
+
"duration": track.Duration,
|
|
770
|
+
"hasFile": track.HasFile,
|
|
771
|
+
"trackFileId": track.TrackFileId,
|
|
772
|
+
"monitored": track.Monitored,
|
|
773
|
+
"albumId": track.AlbumId,
|
|
774
|
+
"albumTitle": track.AlbumTitle,
|
|
775
|
+
"artistTitle": track.ArtistTitle,
|
|
776
|
+
"artistId": track.ArtistId,
|
|
777
|
+
}
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
return {
|
|
781
|
+
"counts": {
|
|
782
|
+
"available": available_count,
|
|
783
|
+
"monitored": monitored_count,
|
|
784
|
+
"missing": missing_count,
|
|
785
|
+
},
|
|
786
|
+
"total": total,
|
|
787
|
+
"page": page,
|
|
788
|
+
"page_size": page_size,
|
|
789
|
+
"tracks": tracks,
|
|
790
|
+
}
|
|
791
|
+
except Exception as e:
|
|
792
|
+
self.logger.error(f"Error fetching Lidarr tracks: {e}")
|
|
793
|
+
return {
|
|
794
|
+
"counts": {"available": 0, "monitored": 0, "missing": 0},
|
|
795
|
+
"total": 0,
|
|
796
|
+
"page": page,
|
|
797
|
+
"page_size": page_size,
|
|
798
|
+
"tracks": [],
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
def _sonarr_series_from_db(
|
|
802
|
+
self,
|
|
803
|
+
arr,
|
|
804
|
+
search: str | None,
|
|
805
|
+
page: int,
|
|
806
|
+
page_size: int,
|
|
807
|
+
*,
|
|
808
|
+
missing_only: bool = False,
|
|
809
|
+
) -> dict[str, Any]:
|
|
810
|
+
if not self._ensure_arr_db(arr):
|
|
811
|
+
return {
|
|
812
|
+
"counts": {"available": 0, "monitored": 0, "missing": 0},
|
|
813
|
+
"total": 0,
|
|
814
|
+
"page": max(page, 0),
|
|
815
|
+
"page_size": max(page_size, 1),
|
|
816
|
+
"series": [],
|
|
817
|
+
}
|
|
818
|
+
episodes_model = getattr(arr, "model_file", None)
|
|
819
|
+
series_model = getattr(arr, "series_file_model", None)
|
|
820
|
+
db = getattr(arr, "db", None)
|
|
821
|
+
if episodes_model is None or db is None:
|
|
822
|
+
return {
|
|
823
|
+
"counts": {"available": 0, "monitored": 0, "missing": 0},
|
|
824
|
+
"total": 0,
|
|
825
|
+
"page": max(page, 0),
|
|
826
|
+
"page_size": max(page_size, 1),
|
|
827
|
+
"series": [],
|
|
828
|
+
}
|
|
829
|
+
page = max(page, 0)
|
|
830
|
+
page_size = max(page_size, 1)
|
|
831
|
+
resolved_page = page
|
|
832
|
+
missing_condition = episodes_model.EpisodeFileId.is_null(True) | (
|
|
833
|
+
episodes_model.EpisodeFileId == 0
|
|
834
|
+
)
|
|
835
|
+
with db.connection_context():
|
|
836
|
+
monitored_count = (
|
|
837
|
+
episodes_model.select(fn.COUNT(episodes_model.EntryId))
|
|
838
|
+
.where(episodes_model.Monitored == True) # noqa: E712
|
|
839
|
+
.scalar()
|
|
840
|
+
or 0
|
|
841
|
+
)
|
|
842
|
+
available_count = (
|
|
843
|
+
episodes_model.select(fn.COUNT(episodes_model.EntryId))
|
|
844
|
+
.where(
|
|
845
|
+
(episodes_model.Monitored == True) # noqa: E712
|
|
846
|
+
& (episodes_model.EpisodeFileId.is_null(False))
|
|
847
|
+
& (episodes_model.EpisodeFileId != 0)
|
|
848
|
+
)
|
|
849
|
+
.scalar()
|
|
850
|
+
or 0
|
|
851
|
+
)
|
|
852
|
+
missing_count = max(monitored_count - available_count, 0)
|
|
853
|
+
missing_series_ids: list[int] = []
|
|
854
|
+
if missing_only:
|
|
855
|
+
missing_series_ids = [
|
|
856
|
+
row.SeriesId
|
|
857
|
+
for row in episodes_model.select(episodes_model.SeriesId)
|
|
858
|
+
.where((episodes_model.Monitored == True) & missing_condition) # noqa: E712
|
|
859
|
+
.distinct()
|
|
860
|
+
if getattr(row, "SeriesId", None) is not None
|
|
861
|
+
]
|
|
862
|
+
if not missing_series_ids:
|
|
863
|
+
return {
|
|
864
|
+
"counts": {
|
|
865
|
+
"available": available_count,
|
|
866
|
+
"monitored": monitored_count,
|
|
867
|
+
"missing": missing_count,
|
|
868
|
+
},
|
|
869
|
+
"total": 0,
|
|
870
|
+
"page": resolved_page,
|
|
871
|
+
"page_size": page_size,
|
|
872
|
+
"series": [],
|
|
873
|
+
}
|
|
874
|
+
payload: list[dict[str, Any]] = []
|
|
875
|
+
total_series = 0
|
|
876
|
+
|
|
877
|
+
if series_model is not None:
|
|
878
|
+
series_query = series_model.select()
|
|
879
|
+
if search:
|
|
880
|
+
series_query = series_query.where(series_model.Title.contains(search))
|
|
881
|
+
if missing_only and missing_series_ids:
|
|
882
|
+
series_query = series_query.where(series_model.EntryId.in_(missing_series_ids))
|
|
883
|
+
total_series = series_query.count()
|
|
884
|
+
if total_series:
|
|
885
|
+
max_pages = (total_series + page_size - 1) // page_size
|
|
886
|
+
if max_pages:
|
|
887
|
+
resolved_page = min(resolved_page, max_pages - 1)
|
|
888
|
+
resolved_page = max(resolved_page, 0)
|
|
889
|
+
series_rows = (
|
|
890
|
+
series_query.order_by(series_model.Title.asc())
|
|
891
|
+
.paginate(resolved_page + 1, page_size)
|
|
892
|
+
.iterator()
|
|
893
|
+
)
|
|
894
|
+
for series in series_rows:
|
|
895
|
+
episodes_query = episodes_model.select().where(
|
|
896
|
+
episodes_model.SeriesId == series.EntryId
|
|
897
|
+
)
|
|
898
|
+
if missing_only:
|
|
899
|
+
episodes_query = episodes_query.where(missing_condition)
|
|
900
|
+
episodes_query = episodes_query.order_by(
|
|
901
|
+
episodes_model.SeasonNumber.asc(),
|
|
902
|
+
episodes_model.EpisodeNumber.asc(),
|
|
903
|
+
)
|
|
904
|
+
episodes = episodes_query.iterator()
|
|
905
|
+
episodes_list = list(episodes)
|
|
906
|
+
self.logger.debug(
|
|
907
|
+
f"[Sonarr Series] Series {getattr(series, 'Title', 'unknown')} (ID {getattr(series, 'EntryId', '?')}) has {len(episodes_list)} episodes (missing_only={missing_only})"
|
|
908
|
+
)
|
|
909
|
+
seasons: dict[str, dict[str, Any]] = {}
|
|
910
|
+
series_monitored = 0
|
|
911
|
+
series_available = 0
|
|
912
|
+
for ep in episodes_list:
|
|
913
|
+
season_value = getattr(ep, "SeasonNumber", None)
|
|
914
|
+
season_key = (
|
|
915
|
+
str(season_value) if season_value is not None else "unknown"
|
|
916
|
+
)
|
|
917
|
+
season_bucket = seasons.setdefault(
|
|
918
|
+
season_key,
|
|
919
|
+
{"monitored": 0, "available": 0, "episodes": []},
|
|
920
|
+
)
|
|
921
|
+
is_monitored = self._safe_bool(getattr(ep, "Monitored", None))
|
|
922
|
+
has_file = self._safe_bool(getattr(ep, "EpisodeFileId", None))
|
|
923
|
+
if is_monitored:
|
|
924
|
+
season_bucket["monitored"] += 1
|
|
925
|
+
series_monitored += 1
|
|
926
|
+
if has_file:
|
|
927
|
+
season_bucket["available"] += 1
|
|
928
|
+
if is_monitored:
|
|
929
|
+
series_available += 1
|
|
930
|
+
air_date = getattr(ep, "AirDateUtc", None)
|
|
931
|
+
if hasattr(air_date, "isoformat"):
|
|
932
|
+
try:
|
|
933
|
+
air_value = air_date.isoformat()
|
|
934
|
+
except Exception:
|
|
935
|
+
air_value = str(air_date)
|
|
936
|
+
elif isinstance(air_date, str):
|
|
937
|
+
air_value = air_date
|
|
938
|
+
else:
|
|
939
|
+
air_value = ""
|
|
940
|
+
if (not missing_only) or (not has_file):
|
|
941
|
+
season_bucket["episodes"].append(
|
|
942
|
+
{
|
|
943
|
+
"episodeNumber": getattr(ep, "EpisodeNumber", None),
|
|
944
|
+
"title": getattr(ep, "Title", "") or "",
|
|
945
|
+
"monitored": is_monitored,
|
|
946
|
+
"hasFile": has_file,
|
|
947
|
+
"airDateUtc": air_value,
|
|
948
|
+
"reason": getattr(ep, "Reason", None),
|
|
949
|
+
}
|
|
950
|
+
)
|
|
951
|
+
for bucket in seasons.values():
|
|
952
|
+
monitored_eps = int(bucket.get("monitored", 0) or 0)
|
|
953
|
+
available_eps = int(bucket.get("available", 0) or 0)
|
|
954
|
+
bucket["missing"] = max(
|
|
955
|
+
monitored_eps - min(available_eps, monitored_eps), 0
|
|
956
|
+
)
|
|
957
|
+
series_missing = max(series_monitored - series_available, 0)
|
|
958
|
+
if missing_only:
|
|
959
|
+
seasons = {
|
|
960
|
+
key: data for key, data in seasons.items() if data["episodes"]
|
|
961
|
+
}
|
|
962
|
+
if not seasons:
|
|
963
|
+
continue
|
|
964
|
+
payload.append(
|
|
965
|
+
{
|
|
966
|
+
"series": {
|
|
967
|
+
"id": getattr(series, "EntryId", None),
|
|
968
|
+
"title": getattr(series, "Title", "") or "",
|
|
969
|
+
},
|
|
970
|
+
"totals": {
|
|
971
|
+
"available": series_available,
|
|
972
|
+
"monitored": series_monitored,
|
|
973
|
+
"missing": series_missing,
|
|
974
|
+
},
|
|
975
|
+
"seasons": seasons,
|
|
976
|
+
}
|
|
977
|
+
)
|
|
978
|
+
|
|
979
|
+
if not payload:
|
|
980
|
+
# Fallback: construct series payload from episode data (episode mode)
|
|
981
|
+
base_episode_query = episodes_model.select()
|
|
982
|
+
if search:
|
|
983
|
+
search_filters = []
|
|
984
|
+
if hasattr(episodes_model, "SeriesTitle"):
|
|
985
|
+
search_filters.append(episodes_model.SeriesTitle.contains(search))
|
|
986
|
+
search_filters.append(episodes_model.Title.contains(search))
|
|
987
|
+
expr = search_filters[0]
|
|
988
|
+
for extra in search_filters[1:]:
|
|
989
|
+
expr |= extra
|
|
990
|
+
base_episode_query = base_episode_query.where(expr)
|
|
991
|
+
if missing_only:
|
|
992
|
+
base_episode_query = base_episode_query.where(missing_condition)
|
|
993
|
+
|
|
994
|
+
series_id_field = (
|
|
995
|
+
getattr(episodes_model, "SeriesId", None)
|
|
996
|
+
if hasattr(episodes_model, "SeriesId")
|
|
997
|
+
else None
|
|
998
|
+
)
|
|
999
|
+
series_title_field = (
|
|
1000
|
+
getattr(episodes_model, "SeriesTitle", None)
|
|
1001
|
+
if hasattr(episodes_model, "SeriesTitle")
|
|
1002
|
+
else None
|
|
1003
|
+
)
|
|
1004
|
+
|
|
1005
|
+
distinct_fields = []
|
|
1006
|
+
field_names: list[str] = []
|
|
1007
|
+
if series_id_field is not None:
|
|
1008
|
+
distinct_fields.append(series_id_field)
|
|
1009
|
+
field_names.append("SeriesId")
|
|
1010
|
+
if series_title_field is not None:
|
|
1011
|
+
distinct_fields.append(series_title_field)
|
|
1012
|
+
field_names.append("SeriesTitle")
|
|
1013
|
+
if not distinct_fields:
|
|
1014
|
+
# Fall back to title only to avoid empty select
|
|
1015
|
+
distinct_fields.append(episodes_model.Title.alias("SeriesTitle"))
|
|
1016
|
+
field_names.append("SeriesTitle")
|
|
1017
|
+
|
|
1018
|
+
distinct_query = (
|
|
1019
|
+
base_episode_query.select(*distinct_fields)
|
|
1020
|
+
.distinct()
|
|
1021
|
+
.order_by(
|
|
1022
|
+
series_title_field.asc()
|
|
1023
|
+
if series_title_field is not None
|
|
1024
|
+
else episodes_model.Title.asc()
|
|
1025
|
+
)
|
|
1026
|
+
)
|
|
1027
|
+
series_key_rows = list(distinct_query.tuples())
|
|
1028
|
+
total_series = len(series_key_rows)
|
|
1029
|
+
if total_series:
|
|
1030
|
+
max_pages = (total_series + page_size - 1) // page_size
|
|
1031
|
+
resolved_page = min(resolved_page, max_pages - 1)
|
|
1032
|
+
resolved_page = max(resolved_page, 0)
|
|
1033
|
+
start = resolved_page * page_size
|
|
1034
|
+
end = start + page_size
|
|
1035
|
+
page_keys = series_key_rows[start:end]
|
|
1036
|
+
else:
|
|
1037
|
+
resolved_page = 0
|
|
1038
|
+
page_keys = []
|
|
1039
|
+
|
|
1040
|
+
payload = []
|
|
1041
|
+
for key in page_keys:
|
|
1042
|
+
key_data = dict(zip(field_names, key))
|
|
1043
|
+
series_id = key_data.get("SeriesId")
|
|
1044
|
+
series_title = key_data.get("SeriesTitle")
|
|
1045
|
+
episode_conditions = []
|
|
1046
|
+
if series_id is not None:
|
|
1047
|
+
episode_conditions.append(episodes_model.SeriesId == series_id)
|
|
1048
|
+
if series_title is not None:
|
|
1049
|
+
episode_conditions.append(episodes_model.SeriesTitle == series_title)
|
|
1050
|
+
episodes_query = episodes_model.select()
|
|
1051
|
+
if episode_conditions:
|
|
1052
|
+
condition = episode_conditions[0]
|
|
1053
|
+
for extra in episode_conditions[1:]:
|
|
1054
|
+
condition &= extra
|
|
1055
|
+
episodes_query = episodes_query.where(condition)
|
|
1056
|
+
if missing_only:
|
|
1057
|
+
episodes_query = episodes_query.where(missing_condition)
|
|
1058
|
+
episodes_query = episodes_query.order_by(
|
|
1059
|
+
episodes_model.SeasonNumber.asc(),
|
|
1060
|
+
episodes_model.EpisodeNumber.asc(),
|
|
1061
|
+
)
|
|
1062
|
+
seasons: dict[str, dict[str, Any]] = {}
|
|
1063
|
+
series_monitored = 0
|
|
1064
|
+
series_available = 0
|
|
1065
|
+
for ep in episodes_query.iterator():
|
|
1066
|
+
season_value = getattr(ep, "SeasonNumber", None)
|
|
1067
|
+
season_key = str(season_value) if season_value is not None else "unknown"
|
|
1068
|
+
season_bucket = seasons.setdefault(
|
|
1069
|
+
season_key,
|
|
1070
|
+
{"monitored": 0, "available": 0, "episodes": []},
|
|
1071
|
+
)
|
|
1072
|
+
is_monitored = self._safe_bool(getattr(ep, "Monitored", None))
|
|
1073
|
+
has_file = self._safe_bool(getattr(ep, "EpisodeFileId", None))
|
|
1074
|
+
if is_monitored:
|
|
1075
|
+
season_bucket["monitored"] += 1
|
|
1076
|
+
series_monitored += 1
|
|
1077
|
+
if has_file:
|
|
1078
|
+
season_bucket["available"] += 1
|
|
1079
|
+
if is_monitored:
|
|
1080
|
+
series_available += 1
|
|
1081
|
+
air_date = getattr(ep, "AirDateUtc", None)
|
|
1082
|
+
if hasattr(air_date, "isoformat"):
|
|
1083
|
+
try:
|
|
1084
|
+
air_value = air_date.isoformat()
|
|
1085
|
+
except Exception:
|
|
1086
|
+
air_value = str(air_date)
|
|
1087
|
+
elif isinstance(air_date, str):
|
|
1088
|
+
air_value = air_date
|
|
1089
|
+
else:
|
|
1090
|
+
air_value = ""
|
|
1091
|
+
season_bucket["episodes"].append(
|
|
1092
|
+
{
|
|
1093
|
+
"episodeNumber": getattr(ep, "EpisodeNumber", None),
|
|
1094
|
+
"title": getattr(ep, "Title", "") or "",
|
|
1095
|
+
"monitored": is_monitored,
|
|
1096
|
+
"hasFile": has_file,
|
|
1097
|
+
"airDateUtc": air_value,
|
|
1098
|
+
"reason": getattr(ep, "Reason", None),
|
|
1099
|
+
}
|
|
1100
|
+
)
|
|
1101
|
+
for bucket in seasons.values():
|
|
1102
|
+
monitored_eps = int(bucket.get("monitored", 0) or 0)
|
|
1103
|
+
available_eps = int(bucket.get("available", 0) or 0)
|
|
1104
|
+
bucket["missing"] = max(
|
|
1105
|
+
monitored_eps - min(available_eps, monitored_eps), 0
|
|
1106
|
+
)
|
|
1107
|
+
series_missing = max(series_monitored - series_available, 0)
|
|
1108
|
+
if missing_only:
|
|
1109
|
+
seasons = {key: data for key, data in seasons.items() if data["episodes"]}
|
|
1110
|
+
if not seasons:
|
|
1111
|
+
continue
|
|
1112
|
+
payload.append(
|
|
1113
|
+
{
|
|
1114
|
+
"series": {
|
|
1115
|
+
"id": series_id,
|
|
1116
|
+
"title": (
|
|
1117
|
+
series_title
|
|
1118
|
+
or (
|
|
1119
|
+
f"Series {len(payload) + 1}"
|
|
1120
|
+
if series_id is None
|
|
1121
|
+
else str(series_id)
|
|
1122
|
+
)
|
|
1123
|
+
),
|
|
1124
|
+
},
|
|
1125
|
+
"totals": {
|
|
1126
|
+
"available": series_available,
|
|
1127
|
+
"monitored": series_monitored,
|
|
1128
|
+
"missing": series_missing,
|
|
1129
|
+
},
|
|
1130
|
+
"seasons": seasons,
|
|
1131
|
+
}
|
|
1132
|
+
)
|
|
1133
|
+
|
|
1134
|
+
result = {
|
|
1135
|
+
"counts": {
|
|
1136
|
+
"available": available_count,
|
|
1137
|
+
"monitored": monitored_count,
|
|
1138
|
+
"missing": missing_count,
|
|
1139
|
+
},
|
|
1140
|
+
"total": total_series,
|
|
1141
|
+
"page": resolved_page,
|
|
1142
|
+
"page_size": page_size,
|
|
1143
|
+
"series": payload,
|
|
1144
|
+
}
|
|
1145
|
+
if payload:
|
|
1146
|
+
first_series = payload[0]
|
|
1147
|
+
first_seasons = first_series.get("seasons", {})
|
|
1148
|
+
total_episodes_in_response = sum(
|
|
1149
|
+
len(season.get("episodes", [])) for season in first_seasons.values()
|
|
1150
|
+
)
|
|
1151
|
+
self.logger.info(
|
|
1152
|
+
f"[Sonarr API] Returning {len(payload)} series, "
|
|
1153
|
+
f"first series '{first_series.get('series', {}).get('title', '?')}' has "
|
|
1154
|
+
f"{len(first_seasons)} seasons, {total_episodes_in_response} episodes "
|
|
1155
|
+
f"(missing_only={missing_only})"
|
|
1156
|
+
)
|
|
1157
|
+
return result
|
|
1158
|
+
|
|
1159
|
+
# Routes
|
|
1160
|
+
def _register_routes(self):
|
|
1161
|
+
app = self.app
|
|
1162
|
+
logs_root = (HOME_PATH / "logs").resolve()
|
|
1163
|
+
|
|
1164
|
+
def _resolve_log_file(name: str) -> Path | None:
|
|
1165
|
+
try:
|
|
1166
|
+
candidate = (logs_root / name).resolve(strict=False)
|
|
1167
|
+
except Exception:
|
|
1168
|
+
return None
|
|
1169
|
+
try:
|
|
1170
|
+
candidate.relative_to(logs_root)
|
|
1171
|
+
except ValueError:
|
|
1172
|
+
return None
|
|
1173
|
+
return candidate
|
|
1174
|
+
|
|
1175
|
+
def _managed_objects() -> dict[str, Any]:
|
|
1176
|
+
arr_manager = getattr(self.manager, "arr_manager", None)
|
|
1177
|
+
return getattr(arr_manager, "managed_objects", {}) if arr_manager else {}
|
|
1178
|
+
|
|
1179
|
+
def _ensure_arr_manager_ready() -> bool:
|
|
1180
|
+
return getattr(self.manager, "arr_manager", None) is not None
|
|
1181
|
+
|
|
1182
|
+
@app.get("/health")
|
|
1183
|
+
def health():
|
|
1184
|
+
return jsonify({"status": "ok"})
|
|
1185
|
+
|
|
1186
|
+
@app.get("/")
|
|
1187
|
+
def index():
|
|
1188
|
+
return redirect("/ui")
|
|
1189
|
+
|
|
1190
|
+
def _authorized():
|
|
1191
|
+
if not self.token:
|
|
1192
|
+
return True
|
|
1193
|
+
supplied = request.headers.get("Authorization", "").removeprefix(
|
|
1194
|
+
"Bearer "
|
|
1195
|
+
) or request.args.get("token")
|
|
1196
|
+
return supplied == self.token
|
|
1197
|
+
|
|
1198
|
+
def require_token():
|
|
1199
|
+
if not _authorized():
|
|
1200
|
+
return jsonify({"error": "unauthorized"}), 401
|
|
1201
|
+
return None
|
|
1202
|
+
|
|
1203
|
+
@app.get("/ui")
|
|
1204
|
+
def ui_index():
|
|
1205
|
+
# Serve UI without requiring a token; API remains protected
|
|
1206
|
+
# Add cache-busting parameter based on config reload timestamp
|
|
1207
|
+
from flask import make_response
|
|
1208
|
+
|
|
1209
|
+
response = make_response(redirect("/static/index.html"))
|
|
1210
|
+
# Prevent caching of the UI entry point
|
|
1211
|
+
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
1212
|
+
response.headers["Pragma"] = "no-cache"
|
|
1213
|
+
response.headers["Expires"] = "0"
|
|
1214
|
+
return response
|
|
1215
|
+
|
|
1216
|
+
@app.get("/sw.js")
|
|
1217
|
+
def service_worker():
|
|
1218
|
+
# Service worker must be served directly (not redirected) for PWA support
|
|
1219
|
+
# This allows the endpoint to be whitelisted in auth proxies (e.g., Authentik)
|
|
1220
|
+
import os
|
|
1221
|
+
|
|
1222
|
+
from flask import send_from_directory
|
|
1223
|
+
|
|
1224
|
+
static_dir = os.path.join(os.path.dirname(__file__), "static")
|
|
1225
|
+
response = send_from_directory(static_dir, "sw.js")
|
|
1226
|
+
# Prevent caching of the service worker to ensure updates are picked up
|
|
1227
|
+
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
1228
|
+
response.headers["Pragma"] = "no-cache"
|
|
1229
|
+
response.headers["Expires"] = "0"
|
|
1230
|
+
return response
|
|
1231
|
+
|
|
1232
|
+
def _processes_payload() -> dict[str, Any]:
|
|
1233
|
+
procs = []
|
|
1234
|
+
search_activity_map = fetch_search_activities()
|
|
1235
|
+
|
|
1236
|
+
def _parse_timestamp(raw_value):
|
|
1237
|
+
if not raw_value:
|
|
1238
|
+
return None
|
|
1239
|
+
try:
|
|
1240
|
+
if isinstance(raw_value, (int, float)):
|
|
1241
|
+
return datetime.fromtimestamp(raw_value, timezone.utc).isoformat()
|
|
1242
|
+
if isinstance(raw_value, str):
|
|
1243
|
+
trimmed = raw_value.rstrip("Z")
|
|
1244
|
+
dt = datetime.fromisoformat(trimmed)
|
|
1245
|
+
if raw_value.endswith("Z"):
|
|
1246
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
1247
|
+
elif dt.tzinfo is None:
|
|
1248
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
1249
|
+
return dt.astimezone(timezone.utc).isoformat()
|
|
1250
|
+
except Exception:
|
|
1251
|
+
return None
|
|
1252
|
+
return None
|
|
1253
|
+
|
|
1254
|
+
def _format_queue_summary(arr_obj, record):
|
|
1255
|
+
if not isinstance(record, dict):
|
|
1256
|
+
return None
|
|
1257
|
+
pieces = []
|
|
1258
|
+
arr_type = (getattr(arr_obj, "type", "") or "").lower()
|
|
1259
|
+
if arr_type == "radarr":
|
|
1260
|
+
movie_info = record.get("movie") or {}
|
|
1261
|
+
title = movie_info.get("title")
|
|
1262
|
+
year = movie_info.get("year")
|
|
1263
|
+
release_title = record.get("title") or ""
|
|
1264
|
+
release_name = ""
|
|
1265
|
+
release_year = None
|
|
1266
|
+
if release_title:
|
|
1267
|
+
cleaned = release_title.split("/")[-1]
|
|
1268
|
+
cleaned = re.sub(r"\.[^.]+$", "", cleaned)
|
|
1269
|
+
cleaned = re.sub(r"[-_.]+", " ", cleaned).strip()
|
|
1270
|
+
release_name = cleaned
|
|
1271
|
+
match = re.match(
|
|
1272
|
+
r"(?P<name>.+?)\s+(?P<year>(?:19|20)\d{2})(?:\s|$)",
|
|
1273
|
+
cleaned,
|
|
1274
|
+
)
|
|
1275
|
+
if match:
|
|
1276
|
+
extracted_name = (match.group("name") or "").strip(" .-_")
|
|
1277
|
+
if extracted_name:
|
|
1278
|
+
release_name = re.sub(r"[-_.]+", " ", extracted_name).strip()
|
|
1279
|
+
release_year = match.group("year")
|
|
1280
|
+
if not title and release_name:
|
|
1281
|
+
title = release_name
|
|
1282
|
+
elif title and release_title and title == release_title and release_name:
|
|
1283
|
+
title = release_name
|
|
1284
|
+
if not year:
|
|
1285
|
+
year = release_year or record.get("year")
|
|
1286
|
+
if title:
|
|
1287
|
+
pieces.append(title)
|
|
1288
|
+
if year:
|
|
1289
|
+
pieces.append(str(year))
|
|
1290
|
+
elif arr_type == "sonarr":
|
|
1291
|
+
series = (record.get("series") or {}).get("title")
|
|
1292
|
+
episode = record.get("episode")
|
|
1293
|
+
if series:
|
|
1294
|
+
pieces.append(series)
|
|
1295
|
+
season = None
|
|
1296
|
+
episode_number = None
|
|
1297
|
+
if isinstance(episode, dict):
|
|
1298
|
+
season = episode.get("seasonNumber")
|
|
1299
|
+
episode_number = episode.get("episodeNumber")
|
|
1300
|
+
if season is not None and episode_number is not None:
|
|
1301
|
+
pieces.append(f"S{int(season):02d}E{int(episode_number):02d}")
|
|
1302
|
+
# Intentionally omit individual episode titles/status values
|
|
1303
|
+
else:
|
|
1304
|
+
title = record.get("title")
|
|
1305
|
+
if title:
|
|
1306
|
+
pieces.append(title)
|
|
1307
|
+
cleaned = [str(part) for part in pieces if part]
|
|
1308
|
+
return " | ".join(cleaned) if cleaned else None
|
|
1309
|
+
|
|
1310
|
+
def _collect_metrics(arr_obj):
|
|
1311
|
+
metrics = {
|
|
1312
|
+
"queue": None,
|
|
1313
|
+
"category": None,
|
|
1314
|
+
"summary": None,
|
|
1315
|
+
"timestamp": None,
|
|
1316
|
+
"metric_type": None,
|
|
1317
|
+
}
|
|
1318
|
+
manager_ref = getattr(arr_obj, "manager", None)
|
|
1319
|
+
if manager_ref and hasattr(manager_ref, "qbit_manager"):
|
|
1320
|
+
qbit_manager = manager_ref.qbit_manager
|
|
1321
|
+
else:
|
|
1322
|
+
qbit_manager = getattr(self.manager, "qbit_manager", self.manager)
|
|
1323
|
+
qbit_client = getattr(qbit_manager, "client", None)
|
|
1324
|
+
category = getattr(arr_obj, "category", None)
|
|
1325
|
+
|
|
1326
|
+
if isinstance(arr_obj, FreeSpaceManager):
|
|
1327
|
+
metrics["metric_type"] = "free-space"
|
|
1328
|
+
if qbit_client:
|
|
1329
|
+
try:
|
|
1330
|
+
torrents = qbit_client.torrents_info(status_filter="all")
|
|
1331
|
+
count = 0
|
|
1332
|
+
for torrent in torrents:
|
|
1333
|
+
tags = getattr(torrent, "tags", "") or ""
|
|
1334
|
+
if "qBitrr-free_space_paused" in str(tags):
|
|
1335
|
+
count += 1
|
|
1336
|
+
metrics["category"] = count
|
|
1337
|
+
metrics["queue"] = count
|
|
1338
|
+
except Exception:
|
|
1339
|
+
pass
|
|
1340
|
+
return metrics
|
|
1341
|
+
|
|
1342
|
+
if isinstance(arr_obj, PlaceHolderArr):
|
|
1343
|
+
metrics["metric_type"] = "category"
|
|
1344
|
+
if qbit_client and category:
|
|
1345
|
+
try:
|
|
1346
|
+
torrents = qbit_client.torrents_info(
|
|
1347
|
+
status_filter="all", category=category
|
|
1348
|
+
)
|
|
1349
|
+
count = sum(
|
|
1350
|
+
1
|
|
1351
|
+
for torrent in torrents
|
|
1352
|
+
if getattr(torrent, "category", None) == category
|
|
1353
|
+
)
|
|
1354
|
+
metrics["queue"] = count
|
|
1355
|
+
metrics["category"] = count
|
|
1356
|
+
except Exception:
|
|
1357
|
+
pass
|
|
1358
|
+
return metrics
|
|
1359
|
+
|
|
1360
|
+
# Standard Arr (Radarr/Sonarr)
|
|
1361
|
+
records = []
|
|
1362
|
+
client = getattr(arr_obj, "client", None)
|
|
1363
|
+
if client is not None:
|
|
1364
|
+
try:
|
|
1365
|
+
raw_queue = arr_obj.get_queue(
|
|
1366
|
+
page=1, page_size=50, sort_direction="descending"
|
|
1367
|
+
)
|
|
1368
|
+
if isinstance(raw_queue, dict):
|
|
1369
|
+
records = raw_queue.get("records", []) or []
|
|
1370
|
+
else:
|
|
1371
|
+
records = list(raw_queue or [])
|
|
1372
|
+
except Exception:
|
|
1373
|
+
records = []
|
|
1374
|
+
queue_count = len(records)
|
|
1375
|
+
if queue_count:
|
|
1376
|
+
metrics["queue"] = queue_count
|
|
1377
|
+
records[0]
|
|
1378
|
+
if qbit_client and category:
|
|
1379
|
+
try:
|
|
1380
|
+
torrents = qbit_client.torrents_info(
|
|
1381
|
+
status_filter="all", category=category
|
|
1382
|
+
)
|
|
1383
|
+
metrics["category"] = sum(
|
|
1384
|
+
1
|
|
1385
|
+
for torrent in torrents
|
|
1386
|
+
if getattr(torrent, "category", None) == category
|
|
1387
|
+
)
|
|
1388
|
+
except Exception:
|
|
1389
|
+
pass
|
|
1390
|
+
category_key = getattr(arr_obj, "category", None)
|
|
1391
|
+
if category_key:
|
|
1392
|
+
entry = search_activity_map.get(str(category_key))
|
|
1393
|
+
if isinstance(entry, Mapping):
|
|
1394
|
+
summary = entry.get("summary")
|
|
1395
|
+
timestamp = entry.get("timestamp")
|
|
1396
|
+
if summary:
|
|
1397
|
+
metrics["summary"] = summary
|
|
1398
|
+
if timestamp:
|
|
1399
|
+
metrics["timestamp"] = timestamp
|
|
1400
|
+
if metrics["summary"] is None and not getattr(arr_obj, "_webui_db_loaded", True):
|
|
1401
|
+
metrics["summary"] = "Updating database"
|
|
1402
|
+
return metrics
|
|
1403
|
+
|
|
1404
|
+
metrics_cache: dict[int, dict[str, object]] = {}
|
|
1405
|
+
|
|
1406
|
+
def _populate_process_metadata(arr_obj, proc_kind, payload_dict):
|
|
1407
|
+
metrics = metrics_cache.get(id(arr_obj))
|
|
1408
|
+
if metrics is None:
|
|
1409
|
+
metrics = _collect_metrics(arr_obj)
|
|
1410
|
+
metrics_cache[id(arr_obj)] = metrics
|
|
1411
|
+
if proc_kind == "search":
|
|
1412
|
+
category_key = getattr(arr_obj, "category", None)
|
|
1413
|
+
entry = None
|
|
1414
|
+
if category_key:
|
|
1415
|
+
entry = search_activity_map.get(str(category_key))
|
|
1416
|
+
summary = None
|
|
1417
|
+
timestamp = None
|
|
1418
|
+
if isinstance(entry, Mapping):
|
|
1419
|
+
summary = entry.get("summary")
|
|
1420
|
+
timestamp = entry.get("timestamp")
|
|
1421
|
+
if summary is None:
|
|
1422
|
+
summary = getattr(arr_obj, "last_search_description", None)
|
|
1423
|
+
timestamp = getattr(arr_obj, "last_search_timestamp", None)
|
|
1424
|
+
if summary is None:
|
|
1425
|
+
metrics_summary = metrics.get("summary")
|
|
1426
|
+
if metrics_summary:
|
|
1427
|
+
summary = metrics_summary
|
|
1428
|
+
metrics_timestamp = metrics.get("timestamp")
|
|
1429
|
+
if metrics_timestamp:
|
|
1430
|
+
timestamp = metrics_timestamp
|
|
1431
|
+
if summary:
|
|
1432
|
+
payload_dict["searchSummary"] = summary
|
|
1433
|
+
if timestamp:
|
|
1434
|
+
if isinstance(timestamp, datetime):
|
|
1435
|
+
payload_dict["searchTimestamp"] = timestamp.astimezone(
|
|
1436
|
+
timezone.utc
|
|
1437
|
+
).isoformat()
|
|
1438
|
+
else:
|
|
1439
|
+
payload_dict["searchTimestamp"] = str(timestamp)
|
|
1440
|
+
elif category_key:
|
|
1441
|
+
key = str(category_key)
|
|
1442
|
+
clear_search_activity(key)
|
|
1443
|
+
search_activity_map.pop(key, None)
|
|
1444
|
+
elif proc_kind == "torrent":
|
|
1445
|
+
queue_count = metrics.get("queue")
|
|
1446
|
+
if queue_count is None:
|
|
1447
|
+
queue_count = getattr(arr_obj, "queue_active_count", None)
|
|
1448
|
+
category_count = metrics.get("category")
|
|
1449
|
+
if category_count is None:
|
|
1450
|
+
category_count = getattr(arr_obj, "category_torrent_count", None)
|
|
1451
|
+
metric_type = metrics.get("metric_type")
|
|
1452
|
+
if queue_count is not None:
|
|
1453
|
+
payload_dict["queueCount"] = queue_count
|
|
1454
|
+
if category_count is not None:
|
|
1455
|
+
payload_dict["categoryCount"] = category_count
|
|
1456
|
+
if metric_type:
|
|
1457
|
+
payload_dict["metricType"] = metric_type
|
|
1458
|
+
|
|
1459
|
+
for arr in _managed_objects().values():
|
|
1460
|
+
name = getattr(arr, "_name", "unknown")
|
|
1461
|
+
cat = getattr(arr, "category", name)
|
|
1462
|
+
for kind in ("search", "torrent"):
|
|
1463
|
+
p = getattr(arr, f"process_{kind}_loop", None)
|
|
1464
|
+
if p is None:
|
|
1465
|
+
continue
|
|
1466
|
+
try:
|
|
1467
|
+
payload = {
|
|
1468
|
+
"category": cat,
|
|
1469
|
+
"name": name,
|
|
1470
|
+
"kind": kind,
|
|
1471
|
+
"pid": getattr(p, "pid", None),
|
|
1472
|
+
"alive": bool(p.is_alive()),
|
|
1473
|
+
"rebuilding": self._rebuilding_arrs,
|
|
1474
|
+
}
|
|
1475
|
+
_populate_process_metadata(arr, kind, payload)
|
|
1476
|
+
procs.append(payload)
|
|
1477
|
+
except Exception:
|
|
1478
|
+
payload = {
|
|
1479
|
+
"category": cat,
|
|
1480
|
+
"name": name,
|
|
1481
|
+
"kind": kind,
|
|
1482
|
+
"pid": getattr(p, "pid", None),
|
|
1483
|
+
"alive": False,
|
|
1484
|
+
"rebuilding": self._rebuilding_arrs,
|
|
1485
|
+
}
|
|
1486
|
+
_populate_process_metadata(arr, kind, payload)
|
|
1487
|
+
procs.append(payload)
|
|
1488
|
+
return {"processes": procs}
|
|
1489
|
+
|
|
1490
|
+
@app.get("/api/processes")
|
|
1491
|
+
def api_processes():
|
|
1492
|
+
if (resp := require_token()) is not None:
|
|
1493
|
+
return resp
|
|
1494
|
+
return jsonify(_processes_payload())
|
|
1495
|
+
|
|
1496
|
+
# UI endpoints (mirror of /api/* for first-party WebUI clients)
|
|
1497
|
+
@app.get("/web/processes")
|
|
1498
|
+
def web_processes():
|
|
1499
|
+
return jsonify(_processes_payload())
|
|
1500
|
+
|
|
1501
|
+
def _restart_process(category: str, kind: str):
|
|
1502
|
+
kind_normalized = kind.lower()
|
|
1503
|
+
if kind_normalized not in ("search", "torrent", "all"):
|
|
1504
|
+
return jsonify({"error": "kind must be search, torrent or all"}), 400
|
|
1505
|
+
managed = _managed_objects()
|
|
1506
|
+
if not managed:
|
|
1507
|
+
if not _ensure_arr_manager_ready():
|
|
1508
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1509
|
+
arr = managed.get(category)
|
|
1510
|
+
if arr is None:
|
|
1511
|
+
return jsonify({"error": f"Unknown category {category}"}), 404
|
|
1512
|
+
restarted: list[str] = []
|
|
1513
|
+
for loop_kind in ("search", "torrent"):
|
|
1514
|
+
if kind_normalized != "all" and loop_kind != kind_normalized:
|
|
1515
|
+
continue
|
|
1516
|
+
proc_attr = f"process_{loop_kind}_loop"
|
|
1517
|
+
process = getattr(arr, proc_attr, None)
|
|
1518
|
+
if process is not None:
|
|
1519
|
+
try:
|
|
1520
|
+
process.kill()
|
|
1521
|
+
except Exception:
|
|
1522
|
+
pass
|
|
1523
|
+
try:
|
|
1524
|
+
process.terminate()
|
|
1525
|
+
except Exception:
|
|
1526
|
+
pass
|
|
1527
|
+
try:
|
|
1528
|
+
self.manager.child_processes.remove(process)
|
|
1529
|
+
except Exception:
|
|
1530
|
+
pass
|
|
1531
|
+
target = getattr(arr, f"run_{loop_kind}_loop", None)
|
|
1532
|
+
if target is None:
|
|
1533
|
+
continue
|
|
1534
|
+
import pathos
|
|
1535
|
+
|
|
1536
|
+
new_process = pathos.helpers.mp.Process(target=target, daemon=False)
|
|
1537
|
+
setattr(arr, proc_attr, new_process)
|
|
1538
|
+
self.manager.child_processes.append(new_process)
|
|
1539
|
+
new_process.start()
|
|
1540
|
+
restarted.append(loop_kind)
|
|
1541
|
+
return jsonify({"status": "ok", "restarted": restarted})
|
|
1542
|
+
|
|
1543
|
+
@app.post("/api/processes/<category>/<kind>/restart")
|
|
1544
|
+
def api_restart_process(category: str, kind: str):
|
|
1545
|
+
if (resp := require_token()) is not None:
|
|
1546
|
+
return resp
|
|
1547
|
+
return _restart_process(category, kind)
|
|
1548
|
+
|
|
1549
|
+
@app.post("/web/processes/<category>/<kind>/restart")
|
|
1550
|
+
def web_restart_process(category: str, kind: str):
|
|
1551
|
+
return _restart_process(category, kind)
|
|
1552
|
+
|
|
1553
|
+
@app.post("/api/processes/restart_all")
|
|
1554
|
+
def api_restart_all():
|
|
1555
|
+
if (resp := require_token()) is not None:
|
|
1556
|
+
return resp
|
|
1557
|
+
self._reload_all()
|
|
1558
|
+
return jsonify({"status": "ok"})
|
|
1559
|
+
|
|
1560
|
+
@app.post("/web/processes/restart_all")
|
|
1561
|
+
def web_restart_all():
|
|
1562
|
+
self._reload_all()
|
|
1563
|
+
return jsonify({"status": "ok"})
|
|
1564
|
+
|
|
1565
|
+
@app.post("/api/loglevel")
|
|
1566
|
+
def api_loglevel():
|
|
1567
|
+
if (resp := require_token()) is not None:
|
|
1568
|
+
return resp
|
|
1569
|
+
body = request.get_json(silent=True) or {}
|
|
1570
|
+
level = str(body.get("level", "INFO")).upper()
|
|
1571
|
+
valid = {"CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG", "TRACE"}
|
|
1572
|
+
if level not in valid:
|
|
1573
|
+
return jsonify({"error": f"invalid level {level}"}), 400
|
|
1574
|
+
target_level = getattr(logging, level, logging.INFO)
|
|
1575
|
+
logging.getLogger().setLevel(target_level)
|
|
1576
|
+
for name, lg in logging.root.manager.loggerDict.items():
|
|
1577
|
+
if isinstance(lg, logging.Logger) and str(name).startswith("qBitrr"):
|
|
1578
|
+
lg.setLevel(target_level)
|
|
1579
|
+
try:
|
|
1580
|
+
_toml_set(CONFIG.config, "Settings.ConsoleLevel", level)
|
|
1581
|
+
CONFIG.save()
|
|
1582
|
+
except Exception:
|
|
1583
|
+
pass
|
|
1584
|
+
return jsonify({"status": "ok", "level": level})
|
|
1585
|
+
|
|
1586
|
+
@app.post("/web/loglevel")
|
|
1587
|
+
def web_loglevel():
|
|
1588
|
+
body = request.get_json(silent=True) or {}
|
|
1589
|
+
level = str(body.get("level", "INFO")).upper()
|
|
1590
|
+
valid = {"CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG", "TRACE"}
|
|
1591
|
+
if level not in valid:
|
|
1592
|
+
return jsonify({"error": f"invalid level {level}"}), 400
|
|
1593
|
+
target_level = getattr(logging, level, logging.INFO)
|
|
1594
|
+
logging.getLogger().setLevel(target_level)
|
|
1595
|
+
for name, lg in logging.root.manager.loggerDict.items():
|
|
1596
|
+
if isinstance(lg, logging.Logger) and str(name).startswith("qBitrr"):
|
|
1597
|
+
lg.setLevel(target_level)
|
|
1598
|
+
try:
|
|
1599
|
+
_toml_set(CONFIG.config, "Settings.ConsoleLevel", level)
|
|
1600
|
+
CONFIG.save()
|
|
1601
|
+
except Exception:
|
|
1602
|
+
pass
|
|
1603
|
+
return jsonify({"status": "ok", "level": level})
|
|
1604
|
+
|
|
1605
|
+
@app.post("/api/arr/rebuild")
|
|
1606
|
+
def api_arr_rebuild():
|
|
1607
|
+
if (resp := require_token()) is not None:
|
|
1608
|
+
return resp
|
|
1609
|
+
self._reload_all()
|
|
1610
|
+
return jsonify({"status": "ok"})
|
|
1611
|
+
|
|
1612
|
+
@app.post("/web/arr/rebuild")
|
|
1613
|
+
def web_arr_rebuild():
|
|
1614
|
+
self._reload_all()
|
|
1615
|
+
return jsonify({"status": "ok"})
|
|
1616
|
+
|
|
1617
|
+
def _list_logs() -> list[str]:
|
|
1618
|
+
if not logs_root.exists():
|
|
1619
|
+
return []
|
|
1620
|
+
# Add "All Logs" as first option
|
|
1621
|
+
log_files = sorted(f.name for f in logs_root.glob("*.log*"))
|
|
1622
|
+
return ["All Logs"] + log_files if log_files else []
|
|
1623
|
+
|
|
1624
|
+
@app.get("/api/logs")
|
|
1625
|
+
def api_logs():
|
|
1626
|
+
if (resp := require_token()) is not None:
|
|
1627
|
+
return resp
|
|
1628
|
+
return jsonify({"files": _list_logs()})
|
|
1629
|
+
|
|
1630
|
+
@app.get("/web/logs")
|
|
1631
|
+
def web_logs():
|
|
1632
|
+
return jsonify({"files": _list_logs()})
|
|
1633
|
+
|
|
1634
|
+
@app.get("/api/logs/<name>")
|
|
1635
|
+
def api_log(name: str):
|
|
1636
|
+
if (resp := require_token()) is not None:
|
|
1637
|
+
return resp
|
|
1638
|
+
file = _resolve_log_file(name)
|
|
1639
|
+
if file is None or not file.exists():
|
|
1640
|
+
return jsonify({"error": "not found"}), 404
|
|
1641
|
+
# Return last 2000 lines
|
|
1642
|
+
try:
|
|
1643
|
+
content = file.read_text(encoding="utf-8", errors="ignore").splitlines()
|
|
1644
|
+
tail = "\n".join(content[-2000:])
|
|
1645
|
+
except Exception:
|
|
1646
|
+
tail = ""
|
|
1647
|
+
return send_file(io.BytesIO(tail.encode("utf-8")), mimetype="text/plain")
|
|
1648
|
+
|
|
1649
|
+
@app.get("/web/logs/<name>")
|
|
1650
|
+
def web_log(name: str):
|
|
1651
|
+
# Handle "All Logs" special case - serve the unified All.log file
|
|
1652
|
+
if name == "All Logs":
|
|
1653
|
+
name = "All.log"
|
|
1654
|
+
|
|
1655
|
+
# Regular single log file
|
|
1656
|
+
file = _resolve_log_file(name)
|
|
1657
|
+
if file is None or not file.exists():
|
|
1658
|
+
return jsonify({"error": "not found"}), 404
|
|
1659
|
+
try:
|
|
1660
|
+
content = file.read_text(encoding="utf-8", errors="ignore").splitlines()
|
|
1661
|
+
tail = "\n".join(content[-2000:])
|
|
1662
|
+
except Exception:
|
|
1663
|
+
tail = ""
|
|
1664
|
+
return send_file(io.BytesIO(tail.encode("utf-8")), mimetype="text/plain")
|
|
1665
|
+
|
|
1666
|
+
@app.get("/api/logs/<name>/download")
|
|
1667
|
+
def api_log_download(name: str):
|
|
1668
|
+
if (resp := require_token()) is not None:
|
|
1669
|
+
return resp
|
|
1670
|
+
file = _resolve_log_file(name)
|
|
1671
|
+
if file is None or not file.exists():
|
|
1672
|
+
return jsonify({"error": "not found"}), 404
|
|
1673
|
+
return send_file(file, as_attachment=True)
|
|
1674
|
+
|
|
1675
|
+
@app.get("/web/logs/<name>/download")
|
|
1676
|
+
def web_log_download(name: str):
|
|
1677
|
+
file = _resolve_log_file(name)
|
|
1678
|
+
if file is None or not file.exists():
|
|
1679
|
+
return jsonify({"error": "not found"}), 404
|
|
1680
|
+
return send_file(file, as_attachment=True)
|
|
1681
|
+
|
|
1682
|
+
@app.get("/api/radarr/<category>/movies")
|
|
1683
|
+
def api_radarr_movies(category: str):
|
|
1684
|
+
if (resp := require_token()) is not None:
|
|
1685
|
+
return resp
|
|
1686
|
+
managed = _managed_objects()
|
|
1687
|
+
if not managed:
|
|
1688
|
+
if not _ensure_arr_manager_ready():
|
|
1689
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1690
|
+
arr = managed.get(category)
|
|
1691
|
+
if arr is None or getattr(arr, "type", None) != "radarr":
|
|
1692
|
+
return jsonify({"error": f"Unknown radarr category {category}"}), 404
|
|
1693
|
+
q = request.args.get("q", default=None, type=str)
|
|
1694
|
+
page = request.args.get("page", default=0, type=int)
|
|
1695
|
+
page_size = request.args.get("page_size", default=50, type=int)
|
|
1696
|
+
year_min = request.args.get("year_min", default=None, type=int)
|
|
1697
|
+
year_max = request.args.get("year_max", default=None, type=int)
|
|
1698
|
+
monitored = (
|
|
1699
|
+
self._safe_bool(request.args.get("monitored"))
|
|
1700
|
+
if "monitored" in request.args
|
|
1701
|
+
else None
|
|
1702
|
+
)
|
|
1703
|
+
has_file = (
|
|
1704
|
+
self._safe_bool(request.args.get("has_file"))
|
|
1705
|
+
if "has_file" in request.args
|
|
1706
|
+
else None
|
|
1707
|
+
)
|
|
1708
|
+
quality_met = (
|
|
1709
|
+
self._safe_bool(request.args.get("quality_met"))
|
|
1710
|
+
if "quality_met" in request.args
|
|
1711
|
+
else None
|
|
1712
|
+
)
|
|
1713
|
+
is_request = (
|
|
1714
|
+
self._safe_bool(request.args.get("is_request"))
|
|
1715
|
+
if "is_request" in request.args
|
|
1716
|
+
else None
|
|
1717
|
+
)
|
|
1718
|
+
payload = self._radarr_movies_from_db(
|
|
1719
|
+
arr,
|
|
1720
|
+
q,
|
|
1721
|
+
page,
|
|
1722
|
+
page_size,
|
|
1723
|
+
year_min=year_min,
|
|
1724
|
+
year_max=year_max,
|
|
1725
|
+
monitored=monitored,
|
|
1726
|
+
has_file=has_file,
|
|
1727
|
+
quality_met=quality_met,
|
|
1728
|
+
is_request=is_request,
|
|
1729
|
+
)
|
|
1730
|
+
payload["category"] = category
|
|
1731
|
+
return jsonify(payload)
|
|
1732
|
+
|
|
1733
|
+
@app.get("/web/radarr/<category>/movies")
|
|
1734
|
+
def web_radarr_movies(category: str):
|
|
1735
|
+
managed = _managed_objects()
|
|
1736
|
+
if not managed:
|
|
1737
|
+
if not _ensure_arr_manager_ready():
|
|
1738
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1739
|
+
arr = managed.get(category)
|
|
1740
|
+
if arr is None or getattr(arr, "type", None) != "radarr":
|
|
1741
|
+
return jsonify({"error": f"Unknown radarr category {category}"}), 404
|
|
1742
|
+
q = request.args.get("q", default=None, type=str)
|
|
1743
|
+
page = request.args.get("page", default=0, type=int)
|
|
1744
|
+
page_size = request.args.get("page_size", default=50, type=int)
|
|
1745
|
+
year_min = request.args.get("year_min", default=None, type=int)
|
|
1746
|
+
year_max = request.args.get("year_max", default=None, type=int)
|
|
1747
|
+
monitored = (
|
|
1748
|
+
self._safe_bool(request.args.get("monitored"))
|
|
1749
|
+
if "monitored" in request.args
|
|
1750
|
+
else None
|
|
1751
|
+
)
|
|
1752
|
+
has_file = (
|
|
1753
|
+
self._safe_bool(request.args.get("has_file"))
|
|
1754
|
+
if "has_file" in request.args
|
|
1755
|
+
else None
|
|
1756
|
+
)
|
|
1757
|
+
quality_met = (
|
|
1758
|
+
self._safe_bool(request.args.get("quality_met"))
|
|
1759
|
+
if "quality_met" in request.args
|
|
1760
|
+
else None
|
|
1761
|
+
)
|
|
1762
|
+
is_request = (
|
|
1763
|
+
self._safe_bool(request.args.get("is_request"))
|
|
1764
|
+
if "is_request" in request.args
|
|
1765
|
+
else None
|
|
1766
|
+
)
|
|
1767
|
+
payload = self._radarr_movies_from_db(
|
|
1768
|
+
arr,
|
|
1769
|
+
q,
|
|
1770
|
+
page,
|
|
1771
|
+
page_size,
|
|
1772
|
+
year_min=year_min,
|
|
1773
|
+
year_max=year_max,
|
|
1774
|
+
monitored=monitored,
|
|
1775
|
+
has_file=has_file,
|
|
1776
|
+
quality_met=quality_met,
|
|
1777
|
+
is_request=is_request,
|
|
1778
|
+
)
|
|
1779
|
+
payload["category"] = category
|
|
1780
|
+
return jsonify(payload)
|
|
1781
|
+
|
|
1782
|
+
@app.get("/api/sonarr/<category>/series")
|
|
1783
|
+
def api_sonarr_series(category: str):
|
|
1784
|
+
if (resp := require_token()) is not None:
|
|
1785
|
+
return resp
|
|
1786
|
+
managed = _managed_objects()
|
|
1787
|
+
if not managed:
|
|
1788
|
+
if not _ensure_arr_manager_ready():
|
|
1789
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1790
|
+
arr = managed.get(category)
|
|
1791
|
+
if arr is None or getattr(arr, "type", None) != "sonarr":
|
|
1792
|
+
return jsonify({"error": f"Unknown sonarr category {category}"}), 404
|
|
1793
|
+
q = request.args.get("q", default=None, type=str)
|
|
1794
|
+
page = request.args.get("page", default=0, type=int)
|
|
1795
|
+
page_size = request.args.get("page_size", default=25, type=int)
|
|
1796
|
+
missing_only = self._safe_bool(
|
|
1797
|
+
request.args.get("missing") or request.args.get("only_missing")
|
|
1798
|
+
)
|
|
1799
|
+
payload = self._sonarr_series_from_db(
|
|
1800
|
+
arr, q, page, page_size, missing_only=missing_only
|
|
1801
|
+
)
|
|
1802
|
+
payload["category"] = category
|
|
1803
|
+
return jsonify(payload)
|
|
1804
|
+
|
|
1805
|
+
@app.get("/web/sonarr/<category>/series")
|
|
1806
|
+
def web_sonarr_series(category: str):
|
|
1807
|
+
managed = _managed_objects()
|
|
1808
|
+
if not managed:
|
|
1809
|
+
if not _ensure_arr_manager_ready():
|
|
1810
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1811
|
+
arr = managed.get(category)
|
|
1812
|
+
if arr is None or getattr(arr, "type", None) != "sonarr":
|
|
1813
|
+
return jsonify({"error": f"Unknown sonarr category {category}"}), 404
|
|
1814
|
+
q = request.args.get("q", default=None, type=str)
|
|
1815
|
+
page = request.args.get("page", default=0, type=int)
|
|
1816
|
+
page_size = request.args.get("page_size", default=25, type=int)
|
|
1817
|
+
missing_only = self._safe_bool(
|
|
1818
|
+
request.args.get("missing") or request.args.get("only_missing")
|
|
1819
|
+
)
|
|
1820
|
+
payload = self._sonarr_series_from_db(
|
|
1821
|
+
arr, q, page, page_size, missing_only=missing_only
|
|
1822
|
+
)
|
|
1823
|
+
payload["category"] = category
|
|
1824
|
+
return jsonify(payload)
|
|
1825
|
+
|
|
1826
|
+
@app.get("/web/lidarr/<category>/albums")
|
|
1827
|
+
def web_lidarr_albums(category: str):
|
|
1828
|
+
managed = _managed_objects()
|
|
1829
|
+
if not managed:
|
|
1830
|
+
if not _ensure_arr_manager_ready():
|
|
1831
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
1832
|
+
arr = managed.get(category)
|
|
1833
|
+
if arr is None or getattr(arr, "type", None) != "lidarr":
|
|
1834
|
+
return jsonify({"error": f"Unknown lidarr category {category}"}), 404
|
|
1835
|
+
q = request.args.get("q", default=None, type=str)
|
|
1836
|
+
page = request.args.get("page", default=0, type=int)
|
|
1837
|
+
page_size = request.args.get("page_size", default=50, type=int)
|
|
1838
|
+
monitored = (
|
|
1839
|
+
self._safe_bool(request.args.get("monitored"))
|
|
1840
|
+
if "monitored" in request.args
|
|
1841
|
+
else None
|
|
1842
|
+
)
|
|
1843
|
+
has_file = (
|
|
1844
|
+
self._safe_bool(request.args.get("has_file"))
|
|
1845
|
+
if "has_file" in request.args
|
|
1846
|
+
else None
|
|
1847
|
+
)
|
|
1848
|
+
quality_met = (
|
|
1849
|
+
self._safe_bool(request.args.get("quality_met"))
|
|
1850
|
+
if "quality_met" in request.args
|
|
1851
|
+
else None
|
|
1852
|
+
)
|
|
1853
|
+
is_request = (
|
|
1854
|
+
self._safe_bool(request.args.get("is_request"))
|
|
1855
|
+
if "is_request" in request.args
|
|
1856
|
+
else None
|
|
1857
|
+
)
|
|
1858
|
+
flat_mode = self._safe_bool(request.args.get("flat_mode", False))
|
|
1859
|
+
|
|
1860
|
+
if flat_mode:
|
|
1861
|
+
# Flat mode: return tracks directly
|
|
1862
|
+
payload = self._lidarr_tracks_from_db(
|
|
1863
|
+
arr,
|
|
1864
|
+
q,
|
|
1865
|
+
page,
|
|
1866
|
+
page_size,
|
|
1867
|
+
monitored=monitored,
|
|
1868
|
+
has_file=has_file,
|
|
1869
|
+
)
|
|
1870
|
+
else:
|
|
1871
|
+
# Grouped mode: return albums with tracks (always)
|
|
1872
|
+
payload = self._lidarr_albums_from_db(
|
|
1873
|
+
arr,
|
|
1874
|
+
q,
|
|
1875
|
+
page,
|
|
1876
|
+
page_size,
|
|
1877
|
+
monitored=monitored,
|
|
1878
|
+
has_file=has_file,
|
|
1879
|
+
quality_met=quality_met,
|
|
1880
|
+
is_request=is_request,
|
|
1881
|
+
)
|
|
1882
|
+
payload["category"] = category
|
|
1883
|
+
return jsonify(payload)
|
|
1884
|
+
|
|
1885
|
+
def _arr_list_payload() -> dict[str, Any]:
|
|
1886
|
+
items = []
|
|
1887
|
+
for k, arr in _managed_objects().items():
|
|
1888
|
+
t = getattr(arr, "type", None)
|
|
1889
|
+
if t in ("radarr", "sonarr", "lidarr"):
|
|
1890
|
+
name = getattr(arr, "_name", k)
|
|
1891
|
+
category = getattr(arr, "category", k)
|
|
1892
|
+
items.append({"category": category, "name": name, "type": t})
|
|
1893
|
+
return {"arr": items, "ready": _ensure_arr_manager_ready()}
|
|
1894
|
+
|
|
1895
|
+
@app.get("/api/arr")
|
|
1896
|
+
def api_arr_list():
|
|
1897
|
+
if (resp := require_token()) is not None:
|
|
1898
|
+
return resp
|
|
1899
|
+
return jsonify(_arr_list_payload())
|
|
1900
|
+
|
|
1901
|
+
@app.get("/web/arr")
|
|
1902
|
+
def web_arr_list():
|
|
1903
|
+
return jsonify(_arr_list_payload())
|
|
1904
|
+
|
|
1905
|
+
@app.get("/api/meta")
|
|
1906
|
+
def api_meta():
|
|
1907
|
+
if (resp := require_token()) is not None:
|
|
1908
|
+
return resp
|
|
1909
|
+
force = self._safe_bool(request.args.get("force"))
|
|
1910
|
+
return jsonify(self._ensure_version_info(force=force))
|
|
1911
|
+
|
|
1912
|
+
@app.get("/web/meta")
|
|
1913
|
+
def web_meta():
|
|
1914
|
+
force = self._safe_bool(request.args.get("force"))
|
|
1915
|
+
return jsonify(self._ensure_version_info(force=force))
|
|
1916
|
+
|
|
1917
|
+
@app.post("/api/update")
|
|
1918
|
+
def api_update():
|
|
1919
|
+
if (resp := require_token()) is not None:
|
|
1920
|
+
return resp
|
|
1921
|
+
ok, message = self._trigger_manual_update()
|
|
1922
|
+
if not ok:
|
|
1923
|
+
return jsonify({"error": message}), 409
|
|
1924
|
+
return jsonify({"status": "started"})
|
|
1925
|
+
|
|
1926
|
+
@app.post("/web/update")
|
|
1927
|
+
def web_update():
|
|
1928
|
+
ok, message = self._trigger_manual_update()
|
|
1929
|
+
if not ok:
|
|
1930
|
+
return jsonify({"error": message}), 409
|
|
1931
|
+
return jsonify({"status": "started"})
|
|
1932
|
+
|
|
1933
|
+
@app.get("/api/download-update")
|
|
1934
|
+
def api_download_update():
|
|
1935
|
+
"""Redirect to binary download URL for current platform."""
|
|
1936
|
+
if (resp := require_token()) is not None:
|
|
1937
|
+
return resp
|
|
1938
|
+
|
|
1939
|
+
from qBitrr.auto_update import get_installation_type
|
|
1940
|
+
|
|
1941
|
+
install_type = get_installation_type()
|
|
1942
|
+
|
|
1943
|
+
if install_type != "binary":
|
|
1944
|
+
return jsonify({"error": "Download only available for binary installations"}), 400
|
|
1945
|
+
|
|
1946
|
+
# Get latest version info
|
|
1947
|
+
version_info = self._ensure_version_info()
|
|
1948
|
+
|
|
1949
|
+
if not version_info.get("update_available"):
|
|
1950
|
+
return jsonify({"error": "No update available"}), 404
|
|
1951
|
+
|
|
1952
|
+
download_url = version_info.get("binary_download_url")
|
|
1953
|
+
if not download_url:
|
|
1954
|
+
error = version_info.get(
|
|
1955
|
+
"binary_download_error", "No binary available for your platform"
|
|
1956
|
+
)
|
|
1957
|
+
return jsonify({"error": error}), 404
|
|
1958
|
+
|
|
1959
|
+
# Redirect to GitHub download URL
|
|
1960
|
+
from flask import redirect
|
|
1961
|
+
|
|
1962
|
+
return redirect(download_url)
|
|
1963
|
+
|
|
1964
|
+
@app.get("/web/download-update")
|
|
1965
|
+
def web_download_update():
|
|
1966
|
+
"""Redirect to binary download URL for current platform."""
|
|
1967
|
+
from qBitrr.auto_update import get_installation_type
|
|
1968
|
+
|
|
1969
|
+
install_type = get_installation_type()
|
|
1970
|
+
|
|
1971
|
+
if install_type != "binary":
|
|
1972
|
+
return jsonify({"error": "Download only available for binary installations"}), 400
|
|
1973
|
+
|
|
1974
|
+
# Get latest version info
|
|
1975
|
+
version_info = self._ensure_version_info()
|
|
1976
|
+
|
|
1977
|
+
if not version_info.get("update_available"):
|
|
1978
|
+
return jsonify({"error": "No update available"}), 404
|
|
1979
|
+
|
|
1980
|
+
download_url = version_info.get("binary_download_url")
|
|
1981
|
+
if not download_url:
|
|
1982
|
+
error = version_info.get(
|
|
1983
|
+
"binary_download_error", "No binary available for your platform"
|
|
1984
|
+
)
|
|
1985
|
+
return jsonify({"error": error}), 404
|
|
1986
|
+
|
|
1987
|
+
# Redirect to GitHub download URL
|
|
1988
|
+
from flask import redirect
|
|
1989
|
+
|
|
1990
|
+
return redirect(download_url)
|
|
1991
|
+
|
|
1992
|
+
def _status_payload() -> dict[str, Any]:
|
|
1993
|
+
qb = {
|
|
1994
|
+
"alive": bool(self.manager.is_alive),
|
|
1995
|
+
"host": self.manager.qBit_Host,
|
|
1996
|
+
"port": self.manager.qBit_Port,
|
|
1997
|
+
"version": (
|
|
1998
|
+
str(self.manager.current_qbit_version)
|
|
1999
|
+
if self.manager.current_qbit_version
|
|
2000
|
+
else None
|
|
2001
|
+
),
|
|
2002
|
+
}
|
|
2003
|
+
arrs = []
|
|
2004
|
+
for k, arr in _managed_objects().items():
|
|
2005
|
+
t = getattr(arr, "type", None)
|
|
2006
|
+
if t in ("radarr", "sonarr", "lidarr"):
|
|
2007
|
+
# Determine liveness based on child search/torrent processes
|
|
2008
|
+
alive = False
|
|
2009
|
+
for loop in ("search", "torrent"):
|
|
2010
|
+
p = getattr(arr, f"process_{loop}_loop", None)
|
|
2011
|
+
if p is not None:
|
|
2012
|
+
try:
|
|
2013
|
+
if p.is_alive():
|
|
2014
|
+
alive = True
|
|
2015
|
+
break
|
|
2016
|
+
except Exception:
|
|
2017
|
+
pass
|
|
2018
|
+
name = getattr(arr, "_name", k)
|
|
2019
|
+
category = getattr(arr, "category", k)
|
|
2020
|
+
arrs.append({"category": category, "name": name, "type": t, "alive": alive})
|
|
2021
|
+
return {"qbit": qb, "arrs": arrs, "ready": _ensure_arr_manager_ready()}
|
|
2022
|
+
|
|
2023
|
+
@app.get("/api/status")
|
|
2024
|
+
def api_status():
|
|
2025
|
+
if (resp := require_token()) is not None:
|
|
2026
|
+
return resp
|
|
2027
|
+
return jsonify(_status_payload())
|
|
2028
|
+
|
|
2029
|
+
@app.get("/web/status")
|
|
2030
|
+
def web_status():
|
|
2031
|
+
return jsonify(_status_payload())
|
|
2032
|
+
|
|
2033
|
+
@app.get("/api/token")
|
|
2034
|
+
def api_token():
|
|
2035
|
+
if (resp := require_token()) is not None:
|
|
2036
|
+
return resp
|
|
2037
|
+
# Expose token for API clients only; UI uses /web endpoints
|
|
2038
|
+
return jsonify({"token": self.token})
|
|
2039
|
+
|
|
2040
|
+
@app.post("/api/arr/<section>/restart")
|
|
2041
|
+
def api_arr_restart(section: str):
|
|
2042
|
+
if (resp := require_token()) is not None:
|
|
2043
|
+
return resp
|
|
2044
|
+
# Section is the category key in managed_objects
|
|
2045
|
+
managed = _managed_objects()
|
|
2046
|
+
if not managed:
|
|
2047
|
+
if not _ensure_arr_manager_ready():
|
|
2048
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
2049
|
+
if section not in managed:
|
|
2050
|
+
return jsonify({"error": f"Unknown section {section}"}), 404
|
|
2051
|
+
arr = managed[section]
|
|
2052
|
+
# Restart both loops for this arr
|
|
2053
|
+
restarted = []
|
|
2054
|
+
for k in ("search", "torrent"):
|
|
2055
|
+
proc_attr = f"process_{k}_loop"
|
|
2056
|
+
p = getattr(arr, proc_attr, None)
|
|
2057
|
+
if p is not None:
|
|
2058
|
+
try:
|
|
2059
|
+
p.kill()
|
|
2060
|
+
except Exception:
|
|
2061
|
+
pass
|
|
2062
|
+
try:
|
|
2063
|
+
p.terminate()
|
|
2064
|
+
except Exception:
|
|
2065
|
+
pass
|
|
2066
|
+
try:
|
|
2067
|
+
self.manager.child_processes.remove(p)
|
|
2068
|
+
except Exception:
|
|
2069
|
+
pass
|
|
2070
|
+
import pathos
|
|
2071
|
+
|
|
2072
|
+
target = getattr(arr, f"run_{k}_loop", None)
|
|
2073
|
+
if target is None:
|
|
2074
|
+
continue
|
|
2075
|
+
new_p = pathos.helpers.mp.Process(target=target, daemon=False)
|
|
2076
|
+
setattr(arr, proc_attr, new_p)
|
|
2077
|
+
self.manager.child_processes.append(new_p)
|
|
2078
|
+
new_p.start()
|
|
2079
|
+
restarted.append(k)
|
|
2080
|
+
return jsonify({"status": "ok", "restarted": restarted})
|
|
2081
|
+
|
|
2082
|
+
@app.post("/web/arr/<section>/restart")
|
|
2083
|
+
def web_arr_restart(section: str):
|
|
2084
|
+
managed = _managed_objects()
|
|
2085
|
+
if not managed:
|
|
2086
|
+
if not _ensure_arr_manager_ready():
|
|
2087
|
+
return jsonify({"error": "Arr manager is still initialising"}), 503
|
|
2088
|
+
if section not in managed:
|
|
2089
|
+
return jsonify({"error": f"Unknown section {section}"}), 404
|
|
2090
|
+
arr = managed[section]
|
|
2091
|
+
restarted = []
|
|
2092
|
+
for k in ("search", "torrent"):
|
|
2093
|
+
proc_attr = f"process_{k}_loop"
|
|
2094
|
+
p = getattr(arr, proc_attr, None)
|
|
2095
|
+
if p is not None:
|
|
2096
|
+
try:
|
|
2097
|
+
p.kill()
|
|
2098
|
+
except Exception:
|
|
2099
|
+
pass
|
|
2100
|
+
try:
|
|
2101
|
+
p.terminate()
|
|
2102
|
+
except Exception:
|
|
2103
|
+
pass
|
|
2104
|
+
try:
|
|
2105
|
+
self.manager.child_processes.remove(p)
|
|
2106
|
+
except Exception:
|
|
2107
|
+
pass
|
|
2108
|
+
import pathos
|
|
2109
|
+
|
|
2110
|
+
target = getattr(arr, f"run_{k}_loop", None)
|
|
2111
|
+
if target is None:
|
|
2112
|
+
continue
|
|
2113
|
+
new_p = pathos.helpers.mp.Process(target=target, daemon=False)
|
|
2114
|
+
setattr(arr, proc_attr, new_p)
|
|
2115
|
+
self.manager.child_processes.append(new_p)
|
|
2116
|
+
new_p.start()
|
|
2117
|
+
restarted.append(k)
|
|
2118
|
+
return jsonify({"status": "ok", "restarted": restarted})
|
|
2119
|
+
|
|
2120
|
+
@app.get("/api/config")
|
|
2121
|
+
def api_get_config():
|
|
2122
|
+
if (resp := require_token()) is not None:
|
|
2123
|
+
return resp
|
|
2124
|
+
try:
|
|
2125
|
+
# Reload config from disk to reflect latest file
|
|
2126
|
+
try:
|
|
2127
|
+
CONFIG.load()
|
|
2128
|
+
except Exception:
|
|
2129
|
+
pass
|
|
2130
|
+
# Render current config as a JSON-able dict via tomlkit
|
|
2131
|
+
data = _toml_to_jsonable(CONFIG.config)
|
|
2132
|
+
return jsonify(data)
|
|
2133
|
+
except Exception as e:
|
|
2134
|
+
return jsonify({"error": str(e)}), 500
|
|
2135
|
+
|
|
2136
|
+
@app.get("/web/config")
|
|
2137
|
+
def web_get_config():
|
|
2138
|
+
try:
|
|
2139
|
+
try:
|
|
2140
|
+
CONFIG.load()
|
|
2141
|
+
except Exception:
|
|
2142
|
+
pass
|
|
2143
|
+
data = _toml_to_jsonable(CONFIG.config)
|
|
2144
|
+
return jsonify(data)
|
|
2145
|
+
except Exception as e:
|
|
2146
|
+
return jsonify({"error": str(e)}), 500
|
|
2147
|
+
|
|
2148
|
+
def _handle_config_update():
|
|
2149
|
+
"""Common handler for config updates with intelligent reload detection."""
|
|
2150
|
+
body = request.get_json(silent=True) or {}
|
|
2151
|
+
changes: dict[str, Any] = body.get("changes", {})
|
|
2152
|
+
if not isinstance(changes, dict):
|
|
2153
|
+
return jsonify({"error": "changes must be an object"}), 400
|
|
2154
|
+
|
|
2155
|
+
# Define key categories
|
|
2156
|
+
frontend_only_keys = {
|
|
2157
|
+
"WebUI.LiveArr",
|
|
2158
|
+
"WebUI.GroupSonarr",
|
|
2159
|
+
"WebUI.GroupLidarr",
|
|
2160
|
+
"WebUI.Theme",
|
|
2161
|
+
}
|
|
2162
|
+
webui_restart_keys = {
|
|
2163
|
+
"WebUI.Host",
|
|
2164
|
+
"WebUI.Port",
|
|
2165
|
+
"WebUI.Token",
|
|
2166
|
+
}
|
|
2167
|
+
|
|
2168
|
+
# Analyze changes to determine reload strategy
|
|
2169
|
+
affected_arr_instances = set()
|
|
2170
|
+
has_global_changes = False
|
|
2171
|
+
has_webui_changes = False
|
|
2172
|
+
has_frontend_only_changes = False
|
|
2173
|
+
|
|
2174
|
+
for key in changes.keys():
|
|
2175
|
+
if key in frontend_only_keys:
|
|
2176
|
+
has_frontend_only_changes = True
|
|
2177
|
+
elif key in webui_restart_keys:
|
|
2178
|
+
has_webui_changes = True
|
|
2179
|
+
elif key.startswith("WebUI."):
|
|
2180
|
+
# Unknown WebUI key, treat as webui change for safety
|
|
2181
|
+
has_webui_changes = True
|
|
2182
|
+
elif match := re.match(
|
|
2183
|
+
r"^(Radarr|Sonarr|Lidarr|Animarr)[^.]*\.(.+)$", key, re.IGNORECASE
|
|
2184
|
+
):
|
|
2185
|
+
# Arr instance specific change
|
|
2186
|
+
instance_name = key.split(".")[0]
|
|
2187
|
+
affected_arr_instances.add(instance_name)
|
|
2188
|
+
else:
|
|
2189
|
+
# Settings.*, qBit.*, or unknown - requires full reload
|
|
2190
|
+
has_global_changes = True
|
|
2191
|
+
|
|
2192
|
+
# Apply all changes to config
|
|
2193
|
+
for key, val in changes.items():
|
|
2194
|
+
if val is None:
|
|
2195
|
+
_toml_delete(CONFIG.config, key)
|
|
2196
|
+
if key == "WebUI.Token":
|
|
2197
|
+
self.token = ""
|
|
2198
|
+
continue
|
|
2199
|
+
_toml_set(CONFIG.config, key, val)
|
|
2200
|
+
if key == "WebUI.Token":
|
|
2201
|
+
# Update in-memory token immediately
|
|
2202
|
+
self.token = str(val) if val is not None else ""
|
|
2203
|
+
|
|
2204
|
+
# Persist config
|
|
2205
|
+
try:
|
|
2206
|
+
CONFIG.save()
|
|
2207
|
+
except Exception as e:
|
|
2208
|
+
return jsonify({"error": f"Failed to save config: {e}"}), 500
|
|
2209
|
+
|
|
2210
|
+
# Determine reload strategy
|
|
2211
|
+
reload_type = "none"
|
|
2212
|
+
affected_instances_list = []
|
|
2213
|
+
|
|
2214
|
+
if has_global_changes:
|
|
2215
|
+
# Global settings changed - full reload required
|
|
2216
|
+
# This affects ALL instances (qBit settings, loop timers, etc.)
|
|
2217
|
+
reload_type = "full"
|
|
2218
|
+
self.logger.notice("Global settings changed, performing full reload")
|
|
2219
|
+
try:
|
|
2220
|
+
self.manager.configure_auto_update()
|
|
2221
|
+
except Exception:
|
|
2222
|
+
self.logger.exception("Failed to refresh auto update configuration")
|
|
2223
|
+
self._reload_all()
|
|
2224
|
+
|
|
2225
|
+
elif len(affected_arr_instances) >= 1:
|
|
2226
|
+
# One or more Arr instances changed - reload each individually
|
|
2227
|
+
# NEVER trigger global reload for Arr-only changes
|
|
2228
|
+
reload_type = "multi_arr" if len(affected_arr_instances) > 1 else "single_arr"
|
|
2229
|
+
affected_instances_list = sorted(affected_arr_instances)
|
|
2230
|
+
|
|
2231
|
+
self.logger.notice(
|
|
2232
|
+
f"Reloading {len(affected_instances_list)} Arr instance(s): {', '.join(affected_instances_list)}"
|
|
2233
|
+
)
|
|
2234
|
+
|
|
2235
|
+
# Reload each affected instance in sequence
|
|
2236
|
+
for instance_name in affected_instances_list:
|
|
2237
|
+
self._reload_arr_instance(instance_name)
|
|
2238
|
+
|
|
2239
|
+
elif has_webui_changes:
|
|
2240
|
+
# Only WebUI settings changed - restart WebUI
|
|
2241
|
+
reload_type = "webui"
|
|
2242
|
+
self.logger.notice("WebUI settings changed, restarting WebUI server")
|
|
2243
|
+
# Run restart in background thread to avoid blocking response
|
|
2244
|
+
restart_thread = threading.Thread(
|
|
2245
|
+
target=self._restart_webui, name="WebUIRestart", daemon=True
|
|
2246
|
+
)
|
|
2247
|
+
restart_thread.start()
|
|
2248
|
+
|
|
2249
|
+
elif has_frontend_only_changes:
|
|
2250
|
+
# Only frontend settings changed - no reload
|
|
2251
|
+
reload_type = "frontend"
|
|
2252
|
+
self.logger.debug("Frontend-only settings changed, no reload required")
|
|
2253
|
+
|
|
2254
|
+
# Build response
|
|
2255
|
+
response_data = {
|
|
2256
|
+
"status": "ok",
|
|
2257
|
+
"configReloaded": reload_type not in ("none", "frontend"),
|
|
2258
|
+
"reloadType": reload_type,
|
|
2259
|
+
"affectedInstances": affected_instances_list,
|
|
2260
|
+
}
|
|
2261
|
+
|
|
2262
|
+
response = jsonify(response_data)
|
|
2263
|
+
|
|
2264
|
+
# Add headers for cache control
|
|
2265
|
+
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
2266
|
+
response.headers["Pragma"] = "no-cache"
|
|
2267
|
+
response.headers["Expires"] = "0"
|
|
2268
|
+
|
|
2269
|
+
# Legacy header for compatibility
|
|
2270
|
+
if reload_type in ("full", "single_arr", "multi_arr", "webui"):
|
|
2271
|
+
response.headers["X-Config-Reloaded"] = "true"
|
|
2272
|
+
|
|
2273
|
+
return response
|
|
2274
|
+
|
|
2275
|
+
@app.post("/api/config")
|
|
2276
|
+
def api_update_config():
|
|
2277
|
+
if (resp := require_token()) is not None:
|
|
2278
|
+
return resp
|
|
2279
|
+
return _handle_config_update()
|
|
2280
|
+
|
|
2281
|
+
@app.post("/web/config")
|
|
2282
|
+
def web_update_config():
|
|
2283
|
+
return _handle_config_update()
|
|
2284
|
+
|
|
2285
|
+
def _reload_all(self):
|
|
2286
|
+
# Set rebuilding flag
|
|
2287
|
+
self._rebuilding_arrs = True
|
|
2288
|
+
try:
|
|
2289
|
+
# Stop current processes
|
|
2290
|
+
for p in list(self.manager.child_processes):
|
|
2291
|
+
try:
|
|
2292
|
+
p.kill()
|
|
2293
|
+
except Exception:
|
|
2294
|
+
pass
|
|
2295
|
+
try:
|
|
2296
|
+
p.terminate()
|
|
2297
|
+
except Exception:
|
|
2298
|
+
pass
|
|
2299
|
+
self.manager.child_processes.clear()
|
|
2300
|
+
|
|
2301
|
+
# Delete database files for all arr instances before rebuilding
|
|
2302
|
+
if hasattr(self.manager, "arr_manager") and self.manager.arr_manager:
|
|
2303
|
+
for arr in self.manager.arr_manager.managed_objects.values():
|
|
2304
|
+
try:
|
|
2305
|
+
if hasattr(arr, "search_db_file") and arr.search_db_file:
|
|
2306
|
+
# Delete main database file
|
|
2307
|
+
if arr.search_db_file.exists():
|
|
2308
|
+
self.logger.info(f"Deleting database file: {arr.search_db_file}")
|
|
2309
|
+
arr.search_db_file.unlink()
|
|
2310
|
+
self.logger.success(f"Deleted database file for {arr._name}")
|
|
2311
|
+
# Delete WAL file (Write-Ahead Log)
|
|
2312
|
+
wal_file = arr.search_db_file.with_suffix(".db-wal")
|
|
2313
|
+
if wal_file.exists():
|
|
2314
|
+
self.logger.info(f"Deleting WAL file: {wal_file}")
|
|
2315
|
+
wal_file.unlink()
|
|
2316
|
+
# Delete SHM file (Shared Memory)
|
|
2317
|
+
shm_file = arr.search_db_file.with_suffix(".db-shm")
|
|
2318
|
+
if shm_file.exists():
|
|
2319
|
+
self.logger.info(f"Deleting SHM file: {shm_file}")
|
|
2320
|
+
shm_file.unlink()
|
|
2321
|
+
except Exception as e:
|
|
2322
|
+
self.logger.warning(
|
|
2323
|
+
f"Failed to delete database files for {arr._name}: {e}"
|
|
2324
|
+
)
|
|
2325
|
+
|
|
2326
|
+
# Rebuild arr manager from config and spawn fresh
|
|
2327
|
+
from qBitrr.arss import ArrManager
|
|
2328
|
+
|
|
2329
|
+
self.manager.arr_manager = ArrManager(self.manager).build_arr_instances()
|
|
2330
|
+
self.manager.configure_auto_update()
|
|
2331
|
+
# Spawn and start new processes
|
|
2332
|
+
for arr in self.manager.arr_manager.managed_objects.values():
|
|
2333
|
+
_, procs = arr.spawn_child_processes()
|
|
2334
|
+
for p in procs:
|
|
2335
|
+
try:
|
|
2336
|
+
p.start()
|
|
2337
|
+
except Exception:
|
|
2338
|
+
pass
|
|
2339
|
+
finally:
|
|
2340
|
+
# Clear rebuilding flag
|
|
2341
|
+
self._rebuilding_arrs = False
|
|
2342
|
+
|
|
2343
|
+
def _restart_webui(self):
|
|
2344
|
+
"""
|
|
2345
|
+
Gracefully restart the WebUI server without affecting Arr processes.
|
|
2346
|
+
This is used when WebUI.Host, WebUI.Port, or WebUI.Token changes.
|
|
2347
|
+
"""
|
|
2348
|
+
self.logger.notice("WebUI restart requested (config changed)")
|
|
2349
|
+
|
|
2350
|
+
# Reload config values
|
|
2351
|
+
try:
|
|
2352
|
+
CONFIG.load()
|
|
2353
|
+
except Exception as e:
|
|
2354
|
+
self.logger.warning(f"Failed to reload config: {e}")
|
|
2355
|
+
|
|
2356
|
+
# Update in-memory values
|
|
2357
|
+
new_host = CONFIG.get("WebUI.Host", fallback="0.0.0.0")
|
|
2358
|
+
new_port = CONFIG.get("WebUI.Port", fallback=6969)
|
|
2359
|
+
new_token = CONFIG.get("WebUI.Token", fallback=None)
|
|
2360
|
+
|
|
2361
|
+
# Check if restart is actually needed
|
|
2362
|
+
needs_restart = new_host != self.host or new_port != self.port
|
|
2363
|
+
|
|
2364
|
+
# Token can be updated without restart
|
|
2365
|
+
if new_token != self.token:
|
|
2366
|
+
self.token = new_token
|
|
2367
|
+
self.logger.info("WebUI token updated")
|
|
2368
|
+
|
|
2369
|
+
if not needs_restart:
|
|
2370
|
+
self.logger.info("WebUI Host/Port unchanged, restart not required")
|
|
2371
|
+
return
|
|
2372
|
+
|
|
2373
|
+
# Update host/port
|
|
2374
|
+
self.host = new_host
|
|
2375
|
+
self.port = new_port
|
|
2376
|
+
|
|
2377
|
+
# Signal restart
|
|
2378
|
+
self._restart_requested = True
|
|
2379
|
+
self._shutdown_event.set()
|
|
2380
|
+
|
|
2381
|
+
self.logger.info(f"WebUI will restart on {self.host}:{self.port}")
|
|
2382
|
+
|
|
2383
|
+
def _stop_arr_instance(self, arr, category: str):
|
|
2384
|
+
"""Stop and cleanup a single Arr instance."""
|
|
2385
|
+
self.logger.info(f"Stopping Arr instance: {category}")
|
|
2386
|
+
|
|
2387
|
+
# Stop processes
|
|
2388
|
+
for loop_kind in ("search", "torrent"):
|
|
2389
|
+
proc_attr = f"process_{loop_kind}_loop"
|
|
2390
|
+
process = getattr(arr, proc_attr, None)
|
|
2391
|
+
if process is not None:
|
|
2392
|
+
try:
|
|
2393
|
+
process.kill()
|
|
2394
|
+
except Exception:
|
|
2395
|
+
pass
|
|
2396
|
+
try:
|
|
2397
|
+
process.terminate()
|
|
2398
|
+
except Exception:
|
|
2399
|
+
pass
|
|
2400
|
+
try:
|
|
2401
|
+
self.manager.child_processes.remove(process)
|
|
2402
|
+
except Exception:
|
|
2403
|
+
pass
|
|
2404
|
+
self.logger.debug(f"Stopped {loop_kind} process for {category}")
|
|
2405
|
+
|
|
2406
|
+
# Delete database files
|
|
2407
|
+
try:
|
|
2408
|
+
if hasattr(arr, "search_db_file") and arr.search_db_file:
|
|
2409
|
+
if arr.search_db_file.exists():
|
|
2410
|
+
self.logger.info(f"Deleting database file: {arr.search_db_file}")
|
|
2411
|
+
arr.search_db_file.unlink()
|
|
2412
|
+
self.logger.success(
|
|
2413
|
+
f"Deleted database file for {getattr(arr, '_name', category)}"
|
|
2414
|
+
)
|
|
2415
|
+
# Delete WAL and SHM files
|
|
2416
|
+
for suffix in (".db-wal", ".db-shm"):
|
|
2417
|
+
aux_file = arr.search_db_file.with_suffix(suffix)
|
|
2418
|
+
if aux_file.exists():
|
|
2419
|
+
self.logger.debug(f"Deleting auxiliary file: {aux_file}")
|
|
2420
|
+
aux_file.unlink()
|
|
2421
|
+
except Exception as e:
|
|
2422
|
+
self.logger.warning(
|
|
2423
|
+
f"Failed to delete database files for {getattr(arr, '_name', category)}: {e}"
|
|
2424
|
+
)
|
|
2425
|
+
|
|
2426
|
+
# Remove from managed_objects
|
|
2427
|
+
self.manager.arr_manager.managed_objects.pop(category, None)
|
|
2428
|
+
self.manager.arr_manager.groups.discard(getattr(arr, "_name", ""))
|
|
2429
|
+
self.manager.arr_manager.uris.discard(getattr(arr, "uri", ""))
|
|
2430
|
+
self.manager.arr_manager.arr_categories.discard(category)
|
|
2431
|
+
|
|
2432
|
+
self.logger.success(f"Stopped and cleaned up Arr instance: {category}")
|
|
2433
|
+
|
|
2434
|
+
def _start_arr_instance(self, instance_name: str):
|
|
2435
|
+
"""Create and start a single Arr instance."""
|
|
2436
|
+
self.logger.info(f"Starting Arr instance: {instance_name}")
|
|
2437
|
+
|
|
2438
|
+
# Check if instance is managed
|
|
2439
|
+
if not CONFIG.get(f"{instance_name}.Managed", fallback=False):
|
|
2440
|
+
self.logger.info(f"Instance {instance_name} is not managed, skipping")
|
|
2441
|
+
return
|
|
2442
|
+
|
|
2443
|
+
# Determine client class based on name
|
|
2444
|
+
client_cls = None
|
|
2445
|
+
if re.match(r"^(Rad|rad)arr", instance_name):
|
|
2446
|
+
from pyarr import RadarrAPI
|
|
2447
|
+
|
|
2448
|
+
client_cls = RadarrAPI
|
|
2449
|
+
elif re.match(r"^(Son|son|Anim|anim)arr", instance_name):
|
|
2450
|
+
from pyarr import SonarrAPI
|
|
2451
|
+
|
|
2452
|
+
client_cls = SonarrAPI
|
|
2453
|
+
elif re.match(r"^(Lid|lid)arr", instance_name):
|
|
2454
|
+
from pyarr import LidarrAPI
|
|
2455
|
+
|
|
2456
|
+
client_cls = LidarrAPI
|
|
2457
|
+
else:
|
|
2458
|
+
self.logger.error(f"Unknown Arr type for instance: {instance_name}")
|
|
2459
|
+
return
|
|
2460
|
+
|
|
2461
|
+
try:
|
|
2462
|
+
# Create new Arr instance
|
|
2463
|
+
from qBitrr.arss import Arr
|
|
2464
|
+
from qBitrr.errors import SkipException
|
|
2465
|
+
|
|
2466
|
+
new_arr = Arr(instance_name, self.manager.arr_manager, client_cls=client_cls)
|
|
2467
|
+
|
|
2468
|
+
# Register in manager
|
|
2469
|
+
self.manager.arr_manager.groups.add(instance_name)
|
|
2470
|
+
self.manager.arr_manager.uris.add(new_arr.uri)
|
|
2471
|
+
self.manager.arr_manager.managed_objects[new_arr.category] = new_arr
|
|
2472
|
+
self.manager.arr_manager.arr_categories.add(new_arr.category)
|
|
2473
|
+
|
|
2474
|
+
# Spawn and start processes
|
|
2475
|
+
_, procs = new_arr.spawn_child_processes()
|
|
2476
|
+
for p in procs:
|
|
2477
|
+
try:
|
|
2478
|
+
p.start()
|
|
2479
|
+
self.logger.debug(f"Started process (PID: {p.pid}) for {instance_name}")
|
|
2480
|
+
except Exception as e:
|
|
2481
|
+
self.logger.error(f"Failed to start process for {instance_name}: {e}")
|
|
2482
|
+
|
|
2483
|
+
self.logger.success(
|
|
2484
|
+
f"Started Arr instance: {instance_name} (category: {new_arr.category})"
|
|
2485
|
+
)
|
|
2486
|
+
|
|
2487
|
+
except SkipException:
|
|
2488
|
+
self.logger.info(f"Instance {instance_name} skipped (not managed or disabled)")
|
|
2489
|
+
except Exception as e:
|
|
2490
|
+
self.logger.error(f"Failed to start Arr instance {instance_name}: {e}", exc_info=True)
|
|
2491
|
+
|
|
2492
|
+
def _reload_arr_instance(self, instance_name: str):
|
|
2493
|
+
"""Reload a single Arr instance without affecting others."""
|
|
2494
|
+
self.logger.notice(f"Reloading Arr instance: {instance_name}")
|
|
2495
|
+
|
|
2496
|
+
if not hasattr(self.manager, "arr_manager") or not self.manager.arr_manager:
|
|
2497
|
+
self.logger.warning("Cannot reload Arr instance: ArrManager not initialized")
|
|
2498
|
+
return
|
|
2499
|
+
|
|
2500
|
+
managed_objects = self.manager.arr_manager.managed_objects
|
|
2501
|
+
|
|
2502
|
+
# Find the instance by name (key is category, so search by _name attribute)
|
|
2503
|
+
old_arr = None
|
|
2504
|
+
old_category = None
|
|
2505
|
+
for category, arr in list(managed_objects.items()):
|
|
2506
|
+
if getattr(arr, "_name", None) == instance_name:
|
|
2507
|
+
old_arr = arr
|
|
2508
|
+
old_category = category
|
|
2509
|
+
break
|
|
2510
|
+
|
|
2511
|
+
# Check if instance exists in config
|
|
2512
|
+
instance_exists_in_config = instance_name in CONFIG.sections()
|
|
2513
|
+
|
|
2514
|
+
# Handle deletion case
|
|
2515
|
+
if not instance_exists_in_config:
|
|
2516
|
+
if old_arr:
|
|
2517
|
+
self.logger.info(f"Instance {instance_name} removed from config, stopping...")
|
|
2518
|
+
self._stop_arr_instance(old_arr, old_category)
|
|
2519
|
+
else:
|
|
2520
|
+
self.logger.debug(f"Instance {instance_name} not found in config or memory")
|
|
2521
|
+
return
|
|
2522
|
+
|
|
2523
|
+
# Handle update/addition
|
|
2524
|
+
if old_arr:
|
|
2525
|
+
# Update existing - stop old processes first
|
|
2526
|
+
self.logger.info(f"Updating existing Arr instance: {instance_name}")
|
|
2527
|
+
self._stop_arr_instance(old_arr, old_category)
|
|
2528
|
+
else:
|
|
2529
|
+
self.logger.info(f"Adding new Arr instance: {instance_name}")
|
|
2530
|
+
|
|
2531
|
+
# Small delay to ensure cleanup completes
|
|
2532
|
+
time.sleep(0.5)
|
|
2533
|
+
|
|
2534
|
+
# Create new instance
|
|
2535
|
+
self._start_arr_instance(instance_name)
|
|
2536
|
+
|
|
2537
|
+
self.logger.success(f"Successfully reloaded Arr instance: {instance_name}")
|
|
2538
|
+
|
|
2539
|
+
def start(self):
|
|
2540
|
+
if self._thread and self._thread.is_alive():
|
|
2541
|
+
self.logger.debug("WebUI already running on %s:%s", self.host, self.port)
|
|
2542
|
+
return
|
|
2543
|
+
self.logger.notice("Starting WebUI on %s:%s", self.host, self.port)
|
|
2544
|
+
self._thread = threading.Thread(target=self._serve, name="WebUI", daemon=True)
|
|
2545
|
+
self._thread.start()
|
|
2546
|
+
self.logger.success("WebUI thread started (name=%s)", self._thread.name)
|
|
2547
|
+
|
|
2548
|
+
def _serve(self):
|
|
2549
|
+
try:
|
|
2550
|
+
# Reset shutdown event at start
|
|
2551
|
+
self._shutdown_event.clear()
|
|
2552
|
+
|
|
2553
|
+
if self._should_use_dev_server():
|
|
2554
|
+
self.logger.info("Using Flask development server for WebUI")
|
|
2555
|
+
# Flask dev server - will exit on KeyboardInterrupt
|
|
2556
|
+
try:
|
|
2557
|
+
self.app.run(
|
|
2558
|
+
host=self.host,
|
|
2559
|
+
port=self.port,
|
|
2560
|
+
debug=False,
|
|
2561
|
+
use_reloader=False,
|
|
2562
|
+
threaded=True,
|
|
2563
|
+
)
|
|
2564
|
+
except (KeyboardInterrupt, SystemExit):
|
|
2565
|
+
pass
|
|
2566
|
+
return
|
|
2567
|
+
|
|
2568
|
+
try:
|
|
2569
|
+
from waitress import serve as waitress_serve
|
|
2570
|
+
except Exception:
|
|
2571
|
+
self.logger.warning(
|
|
2572
|
+
"Waitress is unavailable; falling back to Flask development server. "
|
|
2573
|
+
"Install the 'waitress' extra or set QBITRR_USE_DEV_SERVER=1 to silence this message."
|
|
2574
|
+
)
|
|
2575
|
+
self.app.run(host=self.host, port=self.port, debug=False, use_reloader=False)
|
|
2576
|
+
return
|
|
2577
|
+
|
|
2578
|
+
self.logger.info("Using Waitress WSGI server for WebUI")
|
|
2579
|
+
|
|
2580
|
+
# For graceful restart capability, we need to use waitress_serve with channels
|
|
2581
|
+
# However, for now we'll use the simpler approach and just run the server
|
|
2582
|
+
# Restart capability will require stopping the entire process
|
|
2583
|
+
waitress_serve(
|
|
2584
|
+
self.app,
|
|
2585
|
+
host=self.host,
|
|
2586
|
+
port=self.port,
|
|
2587
|
+
ident="qBitrr-WebUI",
|
|
2588
|
+
)
|
|
2589
|
+
|
|
2590
|
+
except KeyboardInterrupt:
|
|
2591
|
+
self.logger.info("WebUI interrupted")
|
|
2592
|
+
except Exception:
|
|
2593
|
+
self.logger.exception("WebUI server terminated unexpectedly")
|
|
2594
|
+
finally:
|
|
2595
|
+
self._server = None
|
|
2596
|
+
|
|
2597
|
+
# If restart was requested, start a new server
|
|
2598
|
+
if self._restart_requested:
|
|
2599
|
+
self._restart_requested = False
|
|
2600
|
+
self.logger.info("Restarting WebUI server...")
|
|
2601
|
+
time.sleep(0.5) # Brief pause
|
|
2602
|
+
self.start() # Restart
|
|
2603
|
+
|
|
2604
|
+
def _should_use_dev_server(self) -> bool:
|
|
2605
|
+
if self._use_dev_server is not None:
|
|
2606
|
+
return self._use_dev_server
|
|
2607
|
+
override = os.environ.get("QBITRR_USE_DEV_SERVER", "")
|
|
2608
|
+
if override:
|
|
2609
|
+
self._use_dev_server = override.strip().lower() not in {"0", "false", "no", "off"}
|
|
2610
|
+
return self._use_dev_server
|
|
2611
|
+
self._use_dev_server = False
|
|
2612
|
+
return self._use_dev_server
|