qBitrr2 5.3.3__py3-none-any.whl → 5.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qBitrr/arss.py +20 -17
- qBitrr/bundled_data.py +2 -2
- qBitrr/webui.py +347 -65
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/METADATA +1 -1
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/RECORD +9 -9
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/WHEEL +0 -0
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/entry_points.txt +0 -0
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/licenses/LICENSE +0 -0
- {qbitrr2-5.3.3.dist-info → qbitrr2-5.4.1.dist-info}/top_level.txt +0 -0
qBitrr/arss.py
CHANGED
|
@@ -2547,7 +2547,7 @@ class Arr:
|
|
|
2547
2547
|
JSONDecodeError,
|
|
2548
2548
|
):
|
|
2549
2549
|
continue
|
|
2550
|
-
if episode
|
|
2550
|
+
if episode.get("monitored", True) or self.search_unmonitored:
|
|
2551
2551
|
while True:
|
|
2552
2552
|
try:
|
|
2553
2553
|
series_info = episode.get("series") or {}
|
|
@@ -2701,14 +2701,12 @@ class Arr:
|
|
|
2701
2701
|
else None
|
|
2702
2702
|
)
|
|
2703
2703
|
AirDateUtc = episode["airDateUtc"]
|
|
2704
|
-
Monitored = episode
|
|
2704
|
+
Monitored = episode.get("monitored", True)
|
|
2705
2705
|
QualityMet = not QualityUnmet if db_entry["hasFile"] else False
|
|
2706
2706
|
customFormatMet = customFormat >= minCustomFormat
|
|
2707
2707
|
|
|
2708
|
-
if
|
|
2709
|
-
# Episode is
|
|
2710
|
-
reason = None
|
|
2711
|
-
elif not episode["hasFile"]:
|
|
2708
|
+
if not episode["hasFile"]:
|
|
2709
|
+
# Episode is missing a file - always mark as Missing
|
|
2712
2710
|
reason = "Missing"
|
|
2713
2711
|
elif self.quality_unmet_search and QualityUnmet:
|
|
2714
2712
|
reason = "Quality"
|
|
@@ -2716,8 +2714,11 @@ class Arr:
|
|
|
2716
2714
|
reason = "CustomFormat"
|
|
2717
2715
|
elif self.do_upgrade_search:
|
|
2718
2716
|
reason = "Upgrade"
|
|
2717
|
+
elif searched:
|
|
2718
|
+
# Episode has file and search is complete
|
|
2719
|
+
reason = "Not being searched"
|
|
2719
2720
|
else:
|
|
2720
|
-
reason =
|
|
2721
|
+
reason = "Not being searched"
|
|
2721
2722
|
|
|
2722
2723
|
to_update = {
|
|
2723
2724
|
self.model_file.Monitored: Monitored,
|
|
@@ -3056,10 +3057,8 @@ class Arr:
|
|
|
3056
3057
|
qualityMet = not QualityUnmet if db_entry["hasFile"] else False
|
|
3057
3058
|
customFormatMet = customFormat >= minCustomFormat
|
|
3058
3059
|
|
|
3059
|
-
if
|
|
3060
|
-
# Movie is
|
|
3061
|
-
reason = None
|
|
3062
|
-
elif not db_entry["hasFile"]:
|
|
3060
|
+
if not db_entry["hasFile"]:
|
|
3061
|
+
# Movie is missing a file - always mark as Missing
|
|
3063
3062
|
reason = "Missing"
|
|
3064
3063
|
elif self.quality_unmet_search and QualityUnmet:
|
|
3065
3064
|
reason = "Quality"
|
|
@@ -3067,8 +3066,11 @@ class Arr:
|
|
|
3067
3066
|
reason = "CustomFormat"
|
|
3068
3067
|
elif self.do_upgrade_search:
|
|
3069
3068
|
reason = "Upgrade"
|
|
3069
|
+
elif searched:
|
|
3070
|
+
# Movie has file and search is complete
|
|
3071
|
+
reason = "Not being searched"
|
|
3070
3072
|
else:
|
|
3071
|
-
reason =
|
|
3073
|
+
reason = "Not being searched"
|
|
3072
3074
|
|
|
3073
3075
|
to_update = {
|
|
3074
3076
|
self.model_file.MovieFileId: movieFileId,
|
|
@@ -3365,10 +3367,8 @@ class Arr:
|
|
|
3365
3367
|
qualityMet = not QualityUnmet if hasAllTracks else False
|
|
3366
3368
|
customFormatMet = customFormat >= minCustomFormat
|
|
3367
3369
|
|
|
3368
|
-
if
|
|
3369
|
-
# Album is
|
|
3370
|
-
reason = None
|
|
3371
|
-
elif not hasAllTracks:
|
|
3370
|
+
if not hasAllTracks:
|
|
3371
|
+
# Album is missing tracks - always mark as Missing
|
|
3372
3372
|
reason = "Missing"
|
|
3373
3373
|
elif self.quality_unmet_search and QualityUnmet:
|
|
3374
3374
|
reason = "Quality"
|
|
@@ -3376,8 +3376,11 @@ class Arr:
|
|
|
3376
3376
|
reason = "CustomFormat"
|
|
3377
3377
|
elif self.do_upgrade_search:
|
|
3378
3378
|
reason = "Upgrade"
|
|
3379
|
+
elif searched:
|
|
3380
|
+
# Album is complete and not being searched
|
|
3381
|
+
reason = "Not being searched"
|
|
3379
3382
|
else:
|
|
3380
|
-
reason =
|
|
3383
|
+
reason = "Not being searched"
|
|
3381
3384
|
|
|
3382
3385
|
to_update = {
|
|
3383
3386
|
self.model_file.AlbumFileId: albumFileId,
|
qBitrr/bundled_data.py
CHANGED
qBitrr/webui.py
CHANGED
|
@@ -6,6 +6,7 @@ import os
|
|
|
6
6
|
import re
|
|
7
7
|
import secrets
|
|
8
8
|
import threading
|
|
9
|
+
import time
|
|
9
10
|
from collections.abc import Mapping
|
|
10
11
|
from datetime import datetime, timedelta, timezone
|
|
11
12
|
from pathlib import Path
|
|
@@ -143,6 +144,11 @@ class WebUI:
|
|
|
143
144
|
self._thread: threading.Thread | None = None
|
|
144
145
|
self._use_dev_server: bool | None = None
|
|
145
146
|
|
|
147
|
+
# Shutdown control for graceful restart
|
|
148
|
+
self._shutdown_event = threading.Event()
|
|
149
|
+
self._restart_requested = False
|
|
150
|
+
self._server = None # Will hold Waitress server reference
|
|
151
|
+
|
|
146
152
|
def _fetch_version_info(self) -> dict[str, Any]:
|
|
147
153
|
info = fetch_latest_release(self._github_repo)
|
|
148
154
|
if info.get("error"):
|
|
@@ -916,6 +922,7 @@ class WebUI:
|
|
|
916
922
|
"monitored": is_monitored,
|
|
917
923
|
"hasFile": has_file,
|
|
918
924
|
"airDateUtc": air_value,
|
|
925
|
+
"reason": getattr(ep, "Reason", None),
|
|
919
926
|
}
|
|
920
927
|
)
|
|
921
928
|
for bucket in seasons.values():
|
|
@@ -1065,6 +1072,7 @@ class WebUI:
|
|
|
1065
1072
|
"monitored": is_monitored,
|
|
1066
1073
|
"hasFile": has_file,
|
|
1067
1074
|
"airDateUtc": air_value,
|
|
1075
|
+
"reason": getattr(ep, "Reason", None),
|
|
1068
1076
|
}
|
|
1069
1077
|
)
|
|
1070
1078
|
for bucket in seasons.values():
|
|
@@ -1182,6 +1190,18 @@ class WebUI:
|
|
|
1182
1190
|
response.headers["Expires"] = "0"
|
|
1183
1191
|
return response
|
|
1184
1192
|
|
|
1193
|
+
@app.get("/sw.js")
|
|
1194
|
+
def service_worker():
|
|
1195
|
+
# Service worker must be served from root path for PWA support
|
|
1196
|
+
from flask import make_response
|
|
1197
|
+
|
|
1198
|
+
response = make_response(redirect("/static/sw.js"))
|
|
1199
|
+
# Prevent caching of the service worker to ensure updates are picked up
|
|
1200
|
+
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
1201
|
+
response.headers["Pragma"] = "no-cache"
|
|
1202
|
+
response.headers["Expires"] = "0"
|
|
1203
|
+
return response
|
|
1204
|
+
|
|
1185
1205
|
def _processes_payload() -> dict[str, Any]:
|
|
1186
1206
|
procs = []
|
|
1187
1207
|
search_activity_map = fetch_search_activities()
|
|
@@ -2039,29 +2059,51 @@ class WebUI:
|
|
|
2039
2059
|
except Exception as e:
|
|
2040
2060
|
return jsonify({"error": str(e)}), 500
|
|
2041
2061
|
|
|
2042
|
-
|
|
2043
|
-
|
|
2062
|
+
def _handle_config_update():
|
|
2063
|
+
"""Common handler for config updates with intelligent reload detection."""
|
|
2044
2064
|
body = request.get_json(silent=True) or {}
|
|
2045
2065
|
changes: dict[str, Any] = body.get("changes", {})
|
|
2046
2066
|
if not isinstance(changes, dict):
|
|
2047
|
-
return jsonify({"error": "
|
|
2067
|
+
return jsonify({"error": "changes must be an object"}), 400
|
|
2048
2068
|
|
|
2049
|
-
#
|
|
2069
|
+
# Define key categories
|
|
2050
2070
|
frontend_only_keys = {
|
|
2051
2071
|
"WebUI.LiveArr",
|
|
2052
2072
|
"WebUI.GroupSonarr",
|
|
2053
2073
|
"WebUI.GroupLidarr",
|
|
2054
2074
|
"WebUI.Theme",
|
|
2055
2075
|
}
|
|
2076
|
+
webui_restart_keys = {
|
|
2077
|
+
"WebUI.Host",
|
|
2078
|
+
"WebUI.Port",
|
|
2079
|
+
"WebUI.Token",
|
|
2080
|
+
}
|
|
2081
|
+
|
|
2082
|
+
# Analyze changes to determine reload strategy
|
|
2083
|
+
affected_arr_instances = set()
|
|
2084
|
+
has_global_changes = False
|
|
2085
|
+
has_webui_changes = False
|
|
2086
|
+
has_frontend_only_changes = False
|
|
2056
2087
|
|
|
2057
|
-
# Check if any changes require backend reload
|
|
2058
|
-
requires_reload = False
|
|
2059
2088
|
for key in changes.keys():
|
|
2060
|
-
if key
|
|
2061
|
-
|
|
2062
|
-
|
|
2089
|
+
if key in frontend_only_keys:
|
|
2090
|
+
has_frontend_only_changes = True
|
|
2091
|
+
elif key in webui_restart_keys:
|
|
2092
|
+
has_webui_changes = True
|
|
2093
|
+
elif key.startswith("WebUI."):
|
|
2094
|
+
# Unknown WebUI key, treat as webui change for safety
|
|
2095
|
+
has_webui_changes = True
|
|
2096
|
+
elif match := re.match(
|
|
2097
|
+
r"^(Radarr|Sonarr|Lidarr|Animarr)[^.]*\.(.+)$", key, re.IGNORECASE
|
|
2098
|
+
):
|
|
2099
|
+
# Arr instance specific change
|
|
2100
|
+
instance_name = key.split(".")[0]
|
|
2101
|
+
affected_arr_instances.add(instance_name)
|
|
2102
|
+
else:
|
|
2103
|
+
# Settings.*, qBit.*, or unknown - requires full reload
|
|
2104
|
+
has_global_changes = True
|
|
2063
2105
|
|
|
2064
|
-
# Apply changes
|
|
2106
|
+
# Apply all changes to config
|
|
2065
2107
|
for key, val in changes.items():
|
|
2066
2108
|
if val is None:
|
|
2067
2109
|
_toml_delete(CONFIG.config, key)
|
|
@@ -2072,75 +2114,88 @@ class WebUI:
|
|
|
2072
2114
|
if key == "WebUI.Token":
|
|
2073
2115
|
# Update in-memory token immediately
|
|
2074
2116
|
self.token = str(val) if val is not None else ""
|
|
2075
|
-
# Persist
|
|
2076
|
-
CONFIG.save()
|
|
2077
2117
|
|
|
2078
|
-
#
|
|
2079
|
-
|
|
2118
|
+
# Persist config
|
|
2119
|
+
try:
|
|
2120
|
+
CONFIG.save()
|
|
2121
|
+
except Exception as e:
|
|
2122
|
+
return jsonify({"error": f"Failed to save config: {e}"}), 500
|
|
2123
|
+
|
|
2124
|
+
# Determine reload strategy
|
|
2125
|
+
reload_type = "none"
|
|
2126
|
+
affected_instances_list = []
|
|
2127
|
+
|
|
2128
|
+
if has_global_changes:
|
|
2129
|
+
# Global settings changed - full reload required
|
|
2130
|
+
# This affects ALL instances (qBit settings, loop timers, etc.)
|
|
2131
|
+
reload_type = "full"
|
|
2132
|
+
self.logger.notice("Global settings changed, performing full reload")
|
|
2080
2133
|
try:
|
|
2081
2134
|
self.manager.configure_auto_update()
|
|
2082
2135
|
except Exception:
|
|
2083
2136
|
self.logger.exception("Failed to refresh auto update configuration")
|
|
2084
|
-
# Live-reload: rebuild Arr instances and restart processes
|
|
2085
2137
|
self._reload_all()
|
|
2086
|
-
response = jsonify({"status": "ok"})
|
|
2087
|
-
# Clear cache headers to force browser to reload
|
|
2088
|
-
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
2089
|
-
response.headers["Pragma"] = "no-cache"
|
|
2090
|
-
response.headers["Expires"] = "0"
|
|
2091
|
-
# Add a custom header to signal the client to reload
|
|
2092
|
-
response.headers["X-Config-Reloaded"] = "true"
|
|
2093
|
-
return response
|
|
2094
2138
|
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
return jsonify({"error": "changes must be an object"}), 400
|
|
2139
|
+
elif len(affected_arr_instances) >= 1:
|
|
2140
|
+
# One or more Arr instances changed - reload each individually
|
|
2141
|
+
# NEVER trigger global reload for Arr-only changes
|
|
2142
|
+
reload_type = "multi_arr" if len(affected_arr_instances) > 1 else "single_arr"
|
|
2143
|
+
affected_instances_list = sorted(affected_arr_instances)
|
|
2101
2144
|
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
|
|
2105
|
-
|
|
2106
|
-
|
|
2107
|
-
|
|
2145
|
+
self.logger.notice(
|
|
2146
|
+
f"Reloading {len(affected_instances_list)} Arr instance(s): {', '.join(affected_instances_list)}"
|
|
2147
|
+
)
|
|
2148
|
+
|
|
2149
|
+
# Reload each affected instance in sequence
|
|
2150
|
+
for instance_name in affected_instances_list:
|
|
2151
|
+
self._reload_arr_instance(instance_name)
|
|
2152
|
+
|
|
2153
|
+
elif has_webui_changes:
|
|
2154
|
+
# Only WebUI settings changed - restart WebUI
|
|
2155
|
+
reload_type = "webui"
|
|
2156
|
+
self.logger.notice("WebUI settings changed, restarting WebUI server")
|
|
2157
|
+
# Run restart in background thread to avoid blocking response
|
|
2158
|
+
restart_thread = threading.Thread(
|
|
2159
|
+
target=self._restart_webui, name="WebUIRestart", daemon=True
|
|
2160
|
+
)
|
|
2161
|
+
restart_thread.start()
|
|
2162
|
+
|
|
2163
|
+
elif has_frontend_only_changes:
|
|
2164
|
+
# Only frontend settings changed - no reload
|
|
2165
|
+
reload_type = "frontend"
|
|
2166
|
+
self.logger.debug("Frontend-only settings changed, no reload required")
|
|
2167
|
+
|
|
2168
|
+
# Build response
|
|
2169
|
+
response_data = {
|
|
2170
|
+
"status": "ok",
|
|
2171
|
+
"configReloaded": reload_type not in ("none", "frontend"),
|
|
2172
|
+
"reloadType": reload_type,
|
|
2173
|
+
"affectedInstances": affected_instances_list,
|
|
2108
2174
|
}
|
|
2109
2175
|
|
|
2110
|
-
|
|
2111
|
-
requires_reload = False
|
|
2112
|
-
for key in changes.keys():
|
|
2113
|
-
if key not in frontend_only_keys:
|
|
2114
|
-
requires_reload = True
|
|
2115
|
-
break
|
|
2176
|
+
response = jsonify(response_data)
|
|
2116
2177
|
|
|
2117
|
-
|
|
2118
|
-
if val is None:
|
|
2119
|
-
_toml_delete(CONFIG.config, key)
|
|
2120
|
-
if key == "WebUI.Token":
|
|
2121
|
-
self.token = ""
|
|
2122
|
-
continue
|
|
2123
|
-
_toml_set(CONFIG.config, key, val)
|
|
2124
|
-
if key == "WebUI.Token":
|
|
2125
|
-
self.token = str(val) if val is not None else ""
|
|
2126
|
-
CONFIG.save()
|
|
2127
|
-
|
|
2128
|
-
# Only reload if changes affect backend behavior
|
|
2129
|
-
if requires_reload:
|
|
2130
|
-
try:
|
|
2131
|
-
self.manager.configure_auto_update()
|
|
2132
|
-
except Exception:
|
|
2133
|
-
self.logger.exception("Failed to refresh auto update configuration")
|
|
2134
|
-
self._reload_all()
|
|
2135
|
-
response = jsonify({"status": "ok"})
|
|
2136
|
-
# Clear cache headers to force browser to reload
|
|
2178
|
+
# Add headers for cache control
|
|
2137
2179
|
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
|
2138
2180
|
response.headers["Pragma"] = "no-cache"
|
|
2139
2181
|
response.headers["Expires"] = "0"
|
|
2140
|
-
|
|
2141
|
-
|
|
2182
|
+
|
|
2183
|
+
# Legacy header for compatibility
|
|
2184
|
+
if reload_type in ("full", "single_arr", "multi_arr", "webui"):
|
|
2185
|
+
response.headers["X-Config-Reloaded"] = "true"
|
|
2186
|
+
|
|
2142
2187
|
return response
|
|
2143
2188
|
|
|
2189
|
+
@app.post("/api/config")
|
|
2190
|
+
def api_update_config():
|
|
2191
|
+
if (resp := require_token()) is not None:
|
|
2192
|
+
return resp
|
|
2193
|
+
return _handle_config_update()
|
|
2194
|
+
|
|
2195
|
+
@app.post("/web/config")
|
|
2196
|
+
def web_update_config():
|
|
2197
|
+
return _handle_config_update()
|
|
2198
|
+
|
|
2144
2199
|
def _reload_all(self):
|
|
2145
2200
|
# Set rebuilding flag
|
|
2146
2201
|
self._rebuilding_arrs = True
|
|
@@ -2199,6 +2254,202 @@ class WebUI:
|
|
|
2199
2254
|
# Clear rebuilding flag
|
|
2200
2255
|
self._rebuilding_arrs = False
|
|
2201
2256
|
|
|
2257
|
+
def _restart_webui(self):
|
|
2258
|
+
"""
|
|
2259
|
+
Gracefully restart the WebUI server without affecting Arr processes.
|
|
2260
|
+
This is used when WebUI.Host, WebUI.Port, or WebUI.Token changes.
|
|
2261
|
+
"""
|
|
2262
|
+
self.logger.notice("WebUI restart requested (config changed)")
|
|
2263
|
+
|
|
2264
|
+
# Reload config values
|
|
2265
|
+
try:
|
|
2266
|
+
CONFIG.load()
|
|
2267
|
+
except Exception as e:
|
|
2268
|
+
self.logger.warning(f"Failed to reload config: {e}")
|
|
2269
|
+
|
|
2270
|
+
# Update in-memory values
|
|
2271
|
+
new_host = CONFIG.get("WebUI.Host", fallback="0.0.0.0")
|
|
2272
|
+
new_port = CONFIG.get("WebUI.Port", fallback=6969)
|
|
2273
|
+
new_token = CONFIG.get("WebUI.Token", fallback=None)
|
|
2274
|
+
|
|
2275
|
+
# Check if restart is actually needed
|
|
2276
|
+
needs_restart = new_host != self.host or new_port != self.port
|
|
2277
|
+
|
|
2278
|
+
# Token can be updated without restart
|
|
2279
|
+
if new_token != self.token:
|
|
2280
|
+
self.token = new_token
|
|
2281
|
+
self.logger.info("WebUI token updated")
|
|
2282
|
+
|
|
2283
|
+
if not needs_restart:
|
|
2284
|
+
self.logger.info("WebUI Host/Port unchanged, restart not required")
|
|
2285
|
+
return
|
|
2286
|
+
|
|
2287
|
+
# Update host/port
|
|
2288
|
+
self.host = new_host
|
|
2289
|
+
self.port = new_port
|
|
2290
|
+
|
|
2291
|
+
# Signal restart
|
|
2292
|
+
self._restart_requested = True
|
|
2293
|
+
self._shutdown_event.set()
|
|
2294
|
+
|
|
2295
|
+
self.logger.info(f"WebUI will restart on {self.host}:{self.port}")
|
|
2296
|
+
|
|
2297
|
+
def _stop_arr_instance(self, arr, category: str):
|
|
2298
|
+
"""Stop and cleanup a single Arr instance."""
|
|
2299
|
+
self.logger.info(f"Stopping Arr instance: {category}")
|
|
2300
|
+
|
|
2301
|
+
# Stop processes
|
|
2302
|
+
for loop_kind in ("search", "torrent"):
|
|
2303
|
+
proc_attr = f"process_{loop_kind}_loop"
|
|
2304
|
+
process = getattr(arr, proc_attr, None)
|
|
2305
|
+
if process is not None:
|
|
2306
|
+
try:
|
|
2307
|
+
process.kill()
|
|
2308
|
+
except Exception:
|
|
2309
|
+
pass
|
|
2310
|
+
try:
|
|
2311
|
+
process.terminate()
|
|
2312
|
+
except Exception:
|
|
2313
|
+
pass
|
|
2314
|
+
try:
|
|
2315
|
+
self.manager.child_processes.remove(process)
|
|
2316
|
+
except Exception:
|
|
2317
|
+
pass
|
|
2318
|
+
self.logger.debug(f"Stopped {loop_kind} process for {category}")
|
|
2319
|
+
|
|
2320
|
+
# Delete database files
|
|
2321
|
+
try:
|
|
2322
|
+
if hasattr(arr, "search_db_file") and arr.search_db_file:
|
|
2323
|
+
if arr.search_db_file.exists():
|
|
2324
|
+
self.logger.info(f"Deleting database file: {arr.search_db_file}")
|
|
2325
|
+
arr.search_db_file.unlink()
|
|
2326
|
+
self.logger.success(
|
|
2327
|
+
f"Deleted database file for {getattr(arr, '_name', category)}"
|
|
2328
|
+
)
|
|
2329
|
+
# Delete WAL and SHM files
|
|
2330
|
+
for suffix in (".db-wal", ".db-shm"):
|
|
2331
|
+
aux_file = arr.search_db_file.with_suffix(suffix)
|
|
2332
|
+
if aux_file.exists():
|
|
2333
|
+
self.logger.debug(f"Deleting auxiliary file: {aux_file}")
|
|
2334
|
+
aux_file.unlink()
|
|
2335
|
+
except Exception as e:
|
|
2336
|
+
self.logger.warning(
|
|
2337
|
+
f"Failed to delete database files for {getattr(arr, '_name', category)}: {e}"
|
|
2338
|
+
)
|
|
2339
|
+
|
|
2340
|
+
# Remove from managed_objects
|
|
2341
|
+
self.manager.arr_manager.managed_objects.pop(category, None)
|
|
2342
|
+
self.manager.arr_manager.groups.discard(getattr(arr, "_name", ""))
|
|
2343
|
+
self.manager.arr_manager.uris.discard(getattr(arr, "uri", ""))
|
|
2344
|
+
self.manager.arr_manager.arr_categories.discard(category)
|
|
2345
|
+
|
|
2346
|
+
self.logger.success(f"Stopped and cleaned up Arr instance: {category}")
|
|
2347
|
+
|
|
2348
|
+
def _start_arr_instance(self, instance_name: str):
|
|
2349
|
+
"""Create and start a single Arr instance."""
|
|
2350
|
+
self.logger.info(f"Starting Arr instance: {instance_name}")
|
|
2351
|
+
|
|
2352
|
+
# Check if instance is managed
|
|
2353
|
+
if not CONFIG.get(f"{instance_name}.Managed", fallback=False):
|
|
2354
|
+
self.logger.info(f"Instance {instance_name} is not managed, skipping")
|
|
2355
|
+
return
|
|
2356
|
+
|
|
2357
|
+
# Determine client class based on name
|
|
2358
|
+
client_cls = None
|
|
2359
|
+
if re.match(r"^(Rad|rad)arr", instance_name):
|
|
2360
|
+
from pyarr import RadarrAPI
|
|
2361
|
+
|
|
2362
|
+
client_cls = RadarrAPI
|
|
2363
|
+
elif re.match(r"^(Son|son|Anim|anim)arr", instance_name):
|
|
2364
|
+
from pyarr import SonarrAPI
|
|
2365
|
+
|
|
2366
|
+
client_cls = SonarrAPI
|
|
2367
|
+
elif re.match(r"^(Lid|lid)arr", instance_name):
|
|
2368
|
+
from pyarr import LidarrAPI
|
|
2369
|
+
|
|
2370
|
+
client_cls = LidarrAPI
|
|
2371
|
+
else:
|
|
2372
|
+
self.logger.error(f"Unknown Arr type for instance: {instance_name}")
|
|
2373
|
+
return
|
|
2374
|
+
|
|
2375
|
+
try:
|
|
2376
|
+
# Create new Arr instance
|
|
2377
|
+
from qBitrr.arss import Arr
|
|
2378
|
+
from qBitrr.errors import SkipException
|
|
2379
|
+
|
|
2380
|
+
new_arr = Arr(instance_name, self.manager.arr_manager, client_cls=client_cls)
|
|
2381
|
+
|
|
2382
|
+
# Register in manager
|
|
2383
|
+
self.manager.arr_manager.groups.add(instance_name)
|
|
2384
|
+
self.manager.arr_manager.uris.add(new_arr.uri)
|
|
2385
|
+
self.manager.arr_manager.managed_objects[new_arr.category] = new_arr
|
|
2386
|
+
self.manager.arr_manager.arr_categories.add(new_arr.category)
|
|
2387
|
+
|
|
2388
|
+
# Spawn and start processes
|
|
2389
|
+
_, procs = new_arr.spawn_child_processes()
|
|
2390
|
+
for p in procs:
|
|
2391
|
+
try:
|
|
2392
|
+
p.start()
|
|
2393
|
+
self.logger.debug(f"Started process (PID: {p.pid}) for {instance_name}")
|
|
2394
|
+
except Exception as e:
|
|
2395
|
+
self.logger.error(f"Failed to start process for {instance_name}: {e}")
|
|
2396
|
+
|
|
2397
|
+
self.logger.success(
|
|
2398
|
+
f"Started Arr instance: {instance_name} (category: {new_arr.category})"
|
|
2399
|
+
)
|
|
2400
|
+
|
|
2401
|
+
except SkipException:
|
|
2402
|
+
self.logger.info(f"Instance {instance_name} skipped (not managed or disabled)")
|
|
2403
|
+
except Exception as e:
|
|
2404
|
+
self.logger.error(f"Failed to start Arr instance {instance_name}: {e}", exc_info=True)
|
|
2405
|
+
|
|
2406
|
+
def _reload_arr_instance(self, instance_name: str):
|
|
2407
|
+
"""Reload a single Arr instance without affecting others."""
|
|
2408
|
+
self.logger.notice(f"Reloading Arr instance: {instance_name}")
|
|
2409
|
+
|
|
2410
|
+
if not hasattr(self.manager, "arr_manager") or not self.manager.arr_manager:
|
|
2411
|
+
self.logger.warning("Cannot reload Arr instance: ArrManager not initialized")
|
|
2412
|
+
return
|
|
2413
|
+
|
|
2414
|
+
managed_objects = self.manager.arr_manager.managed_objects
|
|
2415
|
+
|
|
2416
|
+
# Find the instance by name (key is category, so search by _name attribute)
|
|
2417
|
+
old_arr = None
|
|
2418
|
+
old_category = None
|
|
2419
|
+
for category, arr in list(managed_objects.items()):
|
|
2420
|
+
if getattr(arr, "_name", None) == instance_name:
|
|
2421
|
+
old_arr = arr
|
|
2422
|
+
old_category = category
|
|
2423
|
+
break
|
|
2424
|
+
|
|
2425
|
+
# Check if instance exists in config
|
|
2426
|
+
instance_exists_in_config = instance_name in CONFIG.sections()
|
|
2427
|
+
|
|
2428
|
+
# Handle deletion case
|
|
2429
|
+
if not instance_exists_in_config:
|
|
2430
|
+
if old_arr:
|
|
2431
|
+
self.logger.info(f"Instance {instance_name} removed from config, stopping...")
|
|
2432
|
+
self._stop_arr_instance(old_arr, old_category)
|
|
2433
|
+
else:
|
|
2434
|
+
self.logger.debug(f"Instance {instance_name} not found in config or memory")
|
|
2435
|
+
return
|
|
2436
|
+
|
|
2437
|
+
# Handle update/addition
|
|
2438
|
+
if old_arr:
|
|
2439
|
+
# Update existing - stop old processes first
|
|
2440
|
+
self.logger.info(f"Updating existing Arr instance: {instance_name}")
|
|
2441
|
+
self._stop_arr_instance(old_arr, old_category)
|
|
2442
|
+
else:
|
|
2443
|
+
self.logger.info(f"Adding new Arr instance: {instance_name}")
|
|
2444
|
+
|
|
2445
|
+
# Small delay to ensure cleanup completes
|
|
2446
|
+
time.sleep(0.5)
|
|
2447
|
+
|
|
2448
|
+
# Create new instance
|
|
2449
|
+
self._start_arr_instance(instance_name)
|
|
2450
|
+
|
|
2451
|
+
self.logger.success(f"Successfully reloaded Arr instance: {instance_name}")
|
|
2452
|
+
|
|
2202
2453
|
def start(self):
|
|
2203
2454
|
if self._thread and self._thread.is_alive():
|
|
2204
2455
|
self.logger.debug("WebUI already running on %s:%s", self.host, self.port)
|
|
@@ -2210,10 +2461,24 @@ class WebUI:
|
|
|
2210
2461
|
|
|
2211
2462
|
def _serve(self):
|
|
2212
2463
|
try:
|
|
2464
|
+
# Reset shutdown event at start
|
|
2465
|
+
self._shutdown_event.clear()
|
|
2466
|
+
|
|
2213
2467
|
if self._should_use_dev_server():
|
|
2214
2468
|
self.logger.info("Using Flask development server for WebUI")
|
|
2215
|
-
|
|
2469
|
+
# Flask dev server - will exit on KeyboardInterrupt
|
|
2470
|
+
try:
|
|
2471
|
+
self.app.run(
|
|
2472
|
+
host=self.host,
|
|
2473
|
+
port=self.port,
|
|
2474
|
+
debug=False,
|
|
2475
|
+
use_reloader=False,
|
|
2476
|
+
threaded=True,
|
|
2477
|
+
)
|
|
2478
|
+
except (KeyboardInterrupt, SystemExit):
|
|
2479
|
+
pass
|
|
2216
2480
|
return
|
|
2481
|
+
|
|
2217
2482
|
try:
|
|
2218
2483
|
from waitress import serve as waitress_serve
|
|
2219
2484
|
except Exception:
|
|
@@ -2223,15 +2488,32 @@ class WebUI:
|
|
|
2223
2488
|
)
|
|
2224
2489
|
self.app.run(host=self.host, port=self.port, debug=False, use_reloader=False)
|
|
2225
2490
|
return
|
|
2491
|
+
|
|
2226
2492
|
self.logger.info("Using Waitress WSGI server for WebUI")
|
|
2493
|
+
|
|
2494
|
+
# For graceful restart capability, we need to use waitress_serve with channels
|
|
2495
|
+
# However, for now we'll use the simpler approach and just run the server
|
|
2496
|
+
# Restart capability will require stopping the entire process
|
|
2227
2497
|
waitress_serve(
|
|
2228
2498
|
self.app,
|
|
2229
2499
|
host=self.host,
|
|
2230
2500
|
port=self.port,
|
|
2231
2501
|
ident="qBitrr-WebUI",
|
|
2232
2502
|
)
|
|
2233
|
-
|
|
2503
|
+
|
|
2504
|
+
except KeyboardInterrupt:
|
|
2505
|
+
self.logger.info("WebUI interrupted")
|
|
2506
|
+
except Exception:
|
|
2234
2507
|
self.logger.exception("WebUI server terminated unexpectedly")
|
|
2508
|
+
finally:
|
|
2509
|
+
self._server = None
|
|
2510
|
+
|
|
2511
|
+
# If restart was requested, start a new server
|
|
2512
|
+
if self._restart_requested:
|
|
2513
|
+
self._restart_requested = False
|
|
2514
|
+
self.logger.info("Restarting WebUI server...")
|
|
2515
|
+
time.sleep(0.5) # Brief pause
|
|
2516
|
+
self.start() # Restart
|
|
2235
2517
|
|
|
2236
2518
|
def _should_use_dev_server(self) -> bool:
|
|
2237
2519
|
if self._use_dev_server is not None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: qBitrr2
|
|
3
|
-
Version: 5.
|
|
3
|
+
Version: 5.4.1
|
|
4
4
|
Summary: Intelligent automation for qBittorrent and *Arr apps (Radarr/Sonarr/Lidarr) - health monitoring, instant imports, quality upgrades, request integration
|
|
5
5
|
Home-page: https://github.com/Feramance/qBitrr
|
|
6
6
|
Author: Feramance
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
qBitrr/__init__.py,sha256=smiPIV7d2lMJ_KTtFdAVlxLEBobFTheILdgry1iqpjQ,405
|
|
2
|
-
qBitrr/arss.py,sha256=
|
|
2
|
+
qBitrr/arss.py,sha256=350czDlTAjNjOjmD3MwyGVyTTdtIkYWq9Op--pf_qyg,306059
|
|
3
3
|
qBitrr/auto_update.py,sha256=hVAvAlKEdOHm6AJLlKvtkklbQhjotVcFOCH-MTigHQM,4419
|
|
4
|
-
qBitrr/bundled_data.py,sha256=
|
|
4
|
+
qBitrr/bundled_data.py,sha256=x7l2nQxu9ajAcAjVnwaC4Gbh_o5DoFw6FuJtq1RjHSk,221
|
|
5
5
|
qBitrr/config.py,sha256=e_UL8Jjz2hWAhT53Du8XZpvyY4ULC5mpyus_7i2An18,6306
|
|
6
6
|
qBitrr/db_lock.py,sha256=SRCDIoqg-AFLU-VDChAmGdfx8nhgLGETn6XKF3RdJT4,2449
|
|
7
7
|
qBitrr/env_config.py,sha256=299u_uEoyxlM_ceTD0Z_i41JdYjSHmqO6FKe7qGFgTM,2866
|
|
@@ -15,10 +15,10 @@ qBitrr/search_activity_store.py,sha256=_7MD7fFna4uTSo_pRT7DqoytSVz7tPoU9D2AV2mn-
|
|
|
15
15
|
qBitrr/tables.py,sha256=cumrb5aqJ0Uufu2biDPCIgu1_TP0hlHVi7dgAQKK_bM,3969
|
|
16
16
|
qBitrr/utils.py,sha256=T10win016yHwMMJlJ4yuPTRUI9m-AS_a_MouiAJAtC8,8190
|
|
17
17
|
qBitrr/versioning.py,sha256=00um_zKic8mMrNZ7IHEUPx4ju5Yi_TWCgZxl81IfMaM,3362
|
|
18
|
-
qBitrr/webui.py,sha256=
|
|
19
|
-
qbitrr2-5.
|
|
20
|
-
qbitrr2-5.
|
|
21
|
-
qbitrr2-5.
|
|
22
|
-
qbitrr2-5.
|
|
23
|
-
qbitrr2-5.
|
|
24
|
-
qbitrr2-5.
|
|
18
|
+
qBitrr/webui.py,sha256=YDtfQOfISvu1E6WgPwFV1U9GaNzzx2djaUFTQF-IAFs,106774
|
|
19
|
+
qbitrr2-5.4.1.dist-info/licenses/LICENSE,sha256=P978aVGi7dPbKz8lfvdiryOS5IjTAU7AA47XhBhVBlI,1066
|
|
20
|
+
qbitrr2-5.4.1.dist-info/METADATA,sha256=JGpsWhDLIHIVxf46yGFyv7ehr-vhnU85bXGcG4wk2Dk,33177
|
|
21
|
+
qbitrr2-5.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
22
|
+
qbitrr2-5.4.1.dist-info/entry_points.txt,sha256=MIR-l5s31VBs9qlv3HiAaMdpOOyy0MNGfM7Ib1-fKeQ,43
|
|
23
|
+
qbitrr2-5.4.1.dist-info/top_level.txt,sha256=jIINodarzsPcQeTf-vvK8-_g7cQ8CvxEg41ms14K97g,7
|
|
24
|
+
qbitrr2-5.4.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|