qBitrr2 5.3.2__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qBitrr/arss.py CHANGED
@@ -2705,10 +2705,8 @@ class Arr:
2705
2705
  QualityMet = not QualityUnmet if db_entry["hasFile"] else False
2706
2706
  customFormatMet = customFormat >= minCustomFormat
2707
2707
 
2708
- if searched:
2709
- # Episode is complete and not being searched
2710
- reason = None
2711
- elif not episode["hasFile"]:
2708
+ if not episode["hasFile"]:
2709
+ # Episode is missing a file - always mark as Missing
2712
2710
  reason = "Missing"
2713
2711
  elif self.quality_unmet_search and QualityUnmet:
2714
2712
  reason = "Quality"
@@ -2716,8 +2714,11 @@ class Arr:
2716
2714
  reason = "CustomFormat"
2717
2715
  elif self.do_upgrade_search:
2718
2716
  reason = "Upgrade"
2717
+ elif searched:
2718
+ # Episode has file and search is complete
2719
+ reason = "Not being searched"
2719
2720
  else:
2720
- reason = None
2721
+ reason = "Not being searched"
2721
2722
 
2722
2723
  to_update = {
2723
2724
  self.model_file.Monitored: Monitored,
@@ -3056,10 +3057,8 @@ class Arr:
3056
3057
  qualityMet = not QualityUnmet if db_entry["hasFile"] else False
3057
3058
  customFormatMet = customFormat >= minCustomFormat
3058
3059
 
3059
- if searched:
3060
- # Movie is complete and not being searched
3061
- reason = None
3062
- elif not db_entry["hasFile"]:
3060
+ if not db_entry["hasFile"]:
3061
+ # Movie is missing a file - always mark as Missing
3063
3062
  reason = "Missing"
3064
3063
  elif self.quality_unmet_search and QualityUnmet:
3065
3064
  reason = "Quality"
@@ -3067,8 +3066,11 @@ class Arr:
3067
3066
  reason = "CustomFormat"
3068
3067
  elif self.do_upgrade_search:
3069
3068
  reason = "Upgrade"
3069
+ elif searched:
3070
+ # Movie has file and search is complete
3071
+ reason = "Not being searched"
3070
3072
  else:
3071
- reason = None
3073
+ reason = "Not being searched"
3072
3074
 
3073
3075
  to_update = {
3074
3076
  self.model_file.MovieFileId: movieFileId,
@@ -3365,10 +3367,8 @@ class Arr:
3365
3367
  qualityMet = not QualityUnmet if hasAllTracks else False
3366
3368
  customFormatMet = customFormat >= minCustomFormat
3367
3369
 
3368
- if searched:
3369
- # Album is complete and not being searched
3370
- reason = None
3371
- elif not hasAllTracks:
3370
+ if not hasAllTracks:
3371
+ # Album is missing tracks - always mark as Missing
3372
3372
  reason = "Missing"
3373
3373
  elif self.quality_unmet_search and QualityUnmet:
3374
3374
  reason = "Quality"
@@ -3376,8 +3376,11 @@ class Arr:
3376
3376
  reason = "CustomFormat"
3377
3377
  elif self.do_upgrade_search:
3378
3378
  reason = "Upgrade"
3379
+ elif searched:
3380
+ # Album is complete and not being searched
3381
+ reason = "Not being searched"
3379
3382
  else:
3380
- reason = None
3383
+ reason = "Not being searched"
3381
3384
 
3382
3385
  to_update = {
3383
3386
  self.model_file.AlbumFileId: albumFileId,
qBitrr/bundled_data.py CHANGED
@@ -1,5 +1,5 @@
1
- version = "5.3.2"
2
- git_hash = "4ee2e67"
1
+ version = "5.4.0"
2
+ git_hash = "5cfa06d7"
3
3
  license_text = (
4
4
  "Licence can be found on:\n\nhttps://github.com/Feramance/qBitrr/blob/master/LICENSE"
5
5
  )
qBitrr/main.py CHANGED
@@ -212,20 +212,67 @@ class qBitManager:
212
212
  def _restart():
213
213
  if delay > 0:
214
214
  time.sleep(delay)
215
- self.logger.notice("Exiting to complete restart.")
215
+ self.logger.notice("Restarting qBitrr...")
216
+
217
+ # Set shutdown event to signal all loops to stop
216
218
  try:
217
219
  self.shutdown_event.set()
218
220
  except Exception:
219
221
  pass
222
+
223
+ # Wait for child processes to exit gracefully
220
224
  for proc in list(self.child_processes):
221
225
  with contextlib.suppress(Exception):
222
226
  proc.join(timeout=5)
227
+
228
+ # Force kill any remaining child processes
223
229
  for proc in list(self.child_processes):
224
230
  with contextlib.suppress(Exception):
225
231
  proc.kill()
226
232
  with contextlib.suppress(Exception):
227
233
  proc.terminate()
228
- os._exit(0)
234
+
235
+ # Close database connections explicitly
236
+ try:
237
+ if hasattr(self, "arr_manager") and self.arr_manager:
238
+ for arr in self.arr_manager.managed_objects.values():
239
+ if hasattr(arr, "db") and arr.db:
240
+ with contextlib.suppress(Exception):
241
+ arr.db.close()
242
+ except Exception:
243
+ pass
244
+
245
+ # Flush all log handlers
246
+ try:
247
+ for handler in logging.root.handlers[:]:
248
+ with contextlib.suppress(Exception):
249
+ handler.flush()
250
+ handler.close()
251
+ except Exception:
252
+ pass
253
+
254
+ # Prepare restart arguments
255
+ python = sys.executable
256
+ args = [python] + sys.argv
257
+
258
+ self.logger.notice("Executing restart: %s", " ".join(args))
259
+
260
+ # Flush logs one final time before exec
261
+ try:
262
+ for handler in self.logger.handlers[:]:
263
+ with contextlib.suppress(Exception):
264
+ handler.flush()
265
+ except Exception:
266
+ pass
267
+
268
+ # Replace current process with new instance
269
+ # This works in Docker, native installs, and systemd
270
+ try:
271
+ os.execv(python, args)
272
+ except Exception as e:
273
+ # If execv fails, fall back to exit and hope external supervisor restarts us
274
+ self.logger.critical("Failed to restart via execv: %s. Exiting instead.", e)
275
+ os._exit(1)
229
276
 
230
277
  self._restart_thread = Thread(target=_restart, name="qBitrr-Restart", daemon=True)
231
278
  self._restart_thread.start()
qBitrr/webui.py CHANGED
@@ -6,6 +6,7 @@ import os
6
6
  import re
7
7
  import secrets
8
8
  import threading
9
+ import time
9
10
  from collections.abc import Mapping
10
11
  from datetime import datetime, timedelta, timezone
11
12
  from pathlib import Path
@@ -143,6 +144,11 @@ class WebUI:
143
144
  self._thread: threading.Thread | None = None
144
145
  self._use_dev_server: bool | None = None
145
146
 
147
+ # Shutdown control for graceful restart
148
+ self._shutdown_event = threading.Event()
149
+ self._restart_requested = False
150
+ self._server = None # Will hold Waitress server reference
151
+
146
152
  def _fetch_version_info(self) -> dict[str, Any]:
147
153
  info = fetch_latest_release(self._github_repo)
148
154
  if info.get("error"):
@@ -916,6 +922,7 @@ class WebUI:
916
922
  "monitored": is_monitored,
917
923
  "hasFile": has_file,
918
924
  "airDateUtc": air_value,
925
+ "reason": getattr(ep, "Reason", None),
919
926
  }
920
927
  )
921
928
  for bucket in seasons.values():
@@ -1065,6 +1072,7 @@ class WebUI:
1065
1072
  "monitored": is_monitored,
1066
1073
  "hasFile": has_file,
1067
1074
  "airDateUtc": air_value,
1075
+ "reason": getattr(ep, "Reason", None),
1068
1076
  }
1069
1077
  )
1070
1078
  for bucket in seasons.values():
@@ -2039,29 +2047,51 @@ class WebUI:
2039
2047
  except Exception as e:
2040
2048
  return jsonify({"error": str(e)}), 500
2041
2049
 
2042
- @app.post("/api/config")
2043
- def api_update_config():
2050
+ def _handle_config_update():
2051
+ """Common handler for config updates with intelligent reload detection."""
2044
2052
  body = request.get_json(silent=True) or {}
2045
2053
  changes: dict[str, Any] = body.get("changes", {})
2046
2054
  if not isinstance(changes, dict):
2047
- return jsonify({"error": "Invalid request"}), 400
2055
+ return jsonify({"error": "changes must be an object"}), 400
2048
2056
 
2049
- # Frontend-only WebUI settings that don't require backend reload
2057
+ # Define key categories
2050
2058
  frontend_only_keys = {
2051
2059
  "WebUI.LiveArr",
2052
2060
  "WebUI.GroupSonarr",
2053
2061
  "WebUI.GroupLidarr",
2054
2062
  "WebUI.Theme",
2055
2063
  }
2064
+ webui_restart_keys = {
2065
+ "WebUI.Host",
2066
+ "WebUI.Port",
2067
+ "WebUI.Token",
2068
+ }
2069
+
2070
+ # Analyze changes to determine reload strategy
2071
+ affected_arr_instances = set()
2072
+ has_global_changes = False
2073
+ has_webui_changes = False
2074
+ has_frontend_only_changes = False
2056
2075
 
2057
- # Check if any changes require backend reload
2058
- requires_reload = False
2059
2076
  for key in changes.keys():
2060
- if key not in frontend_only_keys:
2061
- requires_reload = True
2062
- break
2077
+ if key in frontend_only_keys:
2078
+ has_frontend_only_changes = True
2079
+ elif key in webui_restart_keys:
2080
+ has_webui_changes = True
2081
+ elif key.startswith("WebUI."):
2082
+ # Unknown WebUI key, treat as webui change for safety
2083
+ has_webui_changes = True
2084
+ elif match := re.match(
2085
+ r"^(Radarr|Sonarr|Lidarr|Animarr)[^.]*\.(.+)$", key, re.IGNORECASE
2086
+ ):
2087
+ # Arr instance specific change
2088
+ instance_name = key.split(".")[0]
2089
+ affected_arr_instances.add(instance_name)
2090
+ else:
2091
+ # Settings.*, qBit.*, or unknown - requires full reload
2092
+ has_global_changes = True
2063
2093
 
2064
- # Apply changes
2094
+ # Apply all changes to config
2065
2095
  for key, val in changes.items():
2066
2096
  if val is None:
2067
2097
  _toml_delete(CONFIG.config, key)
@@ -2072,75 +2102,88 @@ class WebUI:
2072
2102
  if key == "WebUI.Token":
2073
2103
  # Update in-memory token immediately
2074
2104
  self.token = str(val) if val is not None else ""
2075
- # Persist
2076
- CONFIG.save()
2077
2105
 
2078
- # Only reload if changes affect backend behavior
2079
- if requires_reload:
2106
+ # Persist config
2107
+ try:
2108
+ CONFIG.save()
2109
+ except Exception as e:
2110
+ return jsonify({"error": f"Failed to save config: {e}"}), 500
2111
+
2112
+ # Determine reload strategy
2113
+ reload_type = "none"
2114
+ affected_instances_list = []
2115
+
2116
+ if has_global_changes:
2117
+ # Global settings changed - full reload required
2118
+ # This affects ALL instances (qBit settings, loop timers, etc.)
2119
+ reload_type = "full"
2120
+ self.logger.notice("Global settings changed, performing full reload")
2080
2121
  try:
2081
2122
  self.manager.configure_auto_update()
2082
2123
  except Exception:
2083
2124
  self.logger.exception("Failed to refresh auto update configuration")
2084
- # Live-reload: rebuild Arr instances and restart processes
2085
2125
  self._reload_all()
2086
- response = jsonify({"status": "ok"})
2087
- # Clear cache headers to force browser to reload
2088
- response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
2089
- response.headers["Pragma"] = "no-cache"
2090
- response.headers["Expires"] = "0"
2091
- # Add a custom header to signal the client to reload
2092
- response.headers["X-Config-Reloaded"] = "true"
2093
- return response
2094
2126
 
2095
- @app.post("/web/config")
2096
- def web_update_config():
2097
- body = request.get_json(silent=True) or {}
2098
- changes: dict[str, Any] = body.get("changes", {})
2099
- if not isinstance(changes, dict):
2100
- return jsonify({"error": "changes must be an object"}), 400
2127
+ elif len(affected_arr_instances) >= 1:
2128
+ # One or more Arr instances changed - reload each individually
2129
+ # NEVER trigger global reload for Arr-only changes
2130
+ reload_type = "multi_arr" if len(affected_arr_instances) > 1 else "single_arr"
2131
+ affected_instances_list = sorted(affected_arr_instances)
2101
2132
 
2102
- # Frontend-only WebUI settings that don't require backend reload
2103
- frontend_only_keys = {
2104
- "WebUI.LiveArr",
2105
- "WebUI.GroupSonarr",
2106
- "WebUI.GroupLidarr",
2107
- "WebUI.Theme",
2108
- }
2133
+ self.logger.notice(
2134
+ f"Reloading {len(affected_instances_list)} Arr instance(s): {', '.join(affected_instances_list)}"
2135
+ )
2109
2136
 
2110
- # Check if any changes require backend reload
2111
- requires_reload = False
2112
- for key in changes.keys():
2113
- if key not in frontend_only_keys:
2114
- requires_reload = True
2115
- break
2137
+ # Reload each affected instance in sequence
2138
+ for instance_name in affected_instances_list:
2139
+ self._reload_arr_instance(instance_name)
2140
+
2141
+ elif has_webui_changes:
2142
+ # Only WebUI settings changed - restart WebUI
2143
+ reload_type = "webui"
2144
+ self.logger.notice("WebUI settings changed, restarting WebUI server")
2145
+ # Run restart in background thread to avoid blocking response
2146
+ restart_thread = threading.Thread(
2147
+ target=self._restart_webui, name="WebUIRestart", daemon=True
2148
+ )
2149
+ restart_thread.start()
2150
+
2151
+ elif has_frontend_only_changes:
2152
+ # Only frontend settings changed - no reload
2153
+ reload_type = "frontend"
2154
+ self.logger.debug("Frontend-only settings changed, no reload required")
2155
+
2156
+ # Build response
2157
+ response_data = {
2158
+ "status": "ok",
2159
+ "configReloaded": reload_type not in ("none", "frontend"),
2160
+ "reloadType": reload_type,
2161
+ "affectedInstances": affected_instances_list,
2162
+ }
2116
2163
 
2117
- for key, val in changes.items():
2118
- if val is None:
2119
- _toml_delete(CONFIG.config, key)
2120
- if key == "WebUI.Token":
2121
- self.token = ""
2122
- continue
2123
- _toml_set(CONFIG.config, key, val)
2124
- if key == "WebUI.Token":
2125
- self.token = str(val) if val is not None else ""
2126
- CONFIG.save()
2164
+ response = jsonify(response_data)
2127
2165
 
2128
- # Only reload if changes affect backend behavior
2129
- if requires_reload:
2130
- try:
2131
- self.manager.configure_auto_update()
2132
- except Exception:
2133
- self.logger.exception("Failed to refresh auto update configuration")
2134
- self._reload_all()
2135
- response = jsonify({"status": "ok"})
2136
- # Clear cache headers to force browser to reload
2166
+ # Add headers for cache control
2137
2167
  response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
2138
2168
  response.headers["Pragma"] = "no-cache"
2139
2169
  response.headers["Expires"] = "0"
2140
- # Add a custom header to signal the client to reload
2141
- response.headers["X-Config-Reloaded"] = "true"
2170
+
2171
+ # Legacy header for compatibility
2172
+ if reload_type in ("full", "single_arr", "multi_arr", "webui"):
2173
+ response.headers["X-Config-Reloaded"] = "true"
2174
+
2142
2175
  return response
2143
2176
 
2177
+ @app.post("/api/config")
2178
+ def api_update_config():
2179
+ if (resp := require_token()) is not None:
2180
+ return resp
2181
+ return _handle_config_update()
2182
+
2183
+ @app.post("/web/config")
2184
+ def web_update_config():
2185
+ return _handle_config_update()
2186
+
2144
2187
  def _reload_all(self):
2145
2188
  # Set rebuilding flag
2146
2189
  self._rebuilding_arrs = True
@@ -2199,6 +2242,202 @@ class WebUI:
2199
2242
  # Clear rebuilding flag
2200
2243
  self._rebuilding_arrs = False
2201
2244
 
2245
+ def _restart_webui(self):
2246
+ """
2247
+ Gracefully restart the WebUI server without affecting Arr processes.
2248
+ This is used when WebUI.Host, WebUI.Port, or WebUI.Token changes.
2249
+ """
2250
+ self.logger.notice("WebUI restart requested (config changed)")
2251
+
2252
+ # Reload config values
2253
+ try:
2254
+ CONFIG.load()
2255
+ except Exception as e:
2256
+ self.logger.warning(f"Failed to reload config: {e}")
2257
+
2258
+ # Update in-memory values
2259
+ new_host = CONFIG.get("WebUI.Host", fallback="0.0.0.0")
2260
+ new_port = CONFIG.get("WebUI.Port", fallback=6969)
2261
+ new_token = CONFIG.get("WebUI.Token", fallback=None)
2262
+
2263
+ # Check if restart is actually needed
2264
+ needs_restart = new_host != self.host or new_port != self.port
2265
+
2266
+ # Token can be updated without restart
2267
+ if new_token != self.token:
2268
+ self.token = new_token
2269
+ self.logger.info("WebUI token updated")
2270
+
2271
+ if not needs_restart:
2272
+ self.logger.info("WebUI Host/Port unchanged, restart not required")
2273
+ return
2274
+
2275
+ # Update host/port
2276
+ self.host = new_host
2277
+ self.port = new_port
2278
+
2279
+ # Signal restart
2280
+ self._restart_requested = True
2281
+ self._shutdown_event.set()
2282
+
2283
+ self.logger.info(f"WebUI will restart on {self.host}:{self.port}")
2284
+
2285
+ def _stop_arr_instance(self, arr, category: str):
2286
+ """Stop and cleanup a single Arr instance."""
2287
+ self.logger.info(f"Stopping Arr instance: {category}")
2288
+
2289
+ # Stop processes
2290
+ for loop_kind in ("search", "torrent"):
2291
+ proc_attr = f"process_{loop_kind}_loop"
2292
+ process = getattr(arr, proc_attr, None)
2293
+ if process is not None:
2294
+ try:
2295
+ process.kill()
2296
+ except Exception:
2297
+ pass
2298
+ try:
2299
+ process.terminate()
2300
+ except Exception:
2301
+ pass
2302
+ try:
2303
+ self.manager.child_processes.remove(process)
2304
+ except Exception:
2305
+ pass
2306
+ self.logger.debug(f"Stopped {loop_kind} process for {category}")
2307
+
2308
+ # Delete database files
2309
+ try:
2310
+ if hasattr(arr, "search_db_file") and arr.search_db_file:
2311
+ if arr.search_db_file.exists():
2312
+ self.logger.info(f"Deleting database file: {arr.search_db_file}")
2313
+ arr.search_db_file.unlink()
2314
+ self.logger.success(
2315
+ f"Deleted database file for {getattr(arr, '_name', category)}"
2316
+ )
2317
+ # Delete WAL and SHM files
2318
+ for suffix in (".db-wal", ".db-shm"):
2319
+ aux_file = arr.search_db_file.with_suffix(suffix)
2320
+ if aux_file.exists():
2321
+ self.logger.debug(f"Deleting auxiliary file: {aux_file}")
2322
+ aux_file.unlink()
2323
+ except Exception as e:
2324
+ self.logger.warning(
2325
+ f"Failed to delete database files for {getattr(arr, '_name', category)}: {e}"
2326
+ )
2327
+
2328
+ # Remove from managed_objects
2329
+ self.manager.arr_manager.managed_objects.pop(category, None)
2330
+ self.manager.arr_manager.groups.discard(getattr(arr, "_name", ""))
2331
+ self.manager.arr_manager.uris.discard(getattr(arr, "uri", ""))
2332
+ self.manager.arr_manager.arr_categories.discard(category)
2333
+
2334
+ self.logger.success(f"Stopped and cleaned up Arr instance: {category}")
2335
+
2336
+ def _start_arr_instance(self, instance_name: str):
2337
+ """Create and start a single Arr instance."""
2338
+ self.logger.info(f"Starting Arr instance: {instance_name}")
2339
+
2340
+ # Check if instance is managed
2341
+ if not CONFIG.get(f"{instance_name}.Managed", fallback=False):
2342
+ self.logger.info(f"Instance {instance_name} is not managed, skipping")
2343
+ return
2344
+
2345
+ # Determine client class based on name
2346
+ client_cls = None
2347
+ if re.match(r"^(Rad|rad)arr", instance_name):
2348
+ from pyarr import RadarrAPI
2349
+
2350
+ client_cls = RadarrAPI
2351
+ elif re.match(r"^(Son|son|Anim|anim)arr", instance_name):
2352
+ from pyarr import SonarrAPI
2353
+
2354
+ client_cls = SonarrAPI
2355
+ elif re.match(r"^(Lid|lid)arr", instance_name):
2356
+ from pyarr import LidarrAPI
2357
+
2358
+ client_cls = LidarrAPI
2359
+ else:
2360
+ self.logger.error(f"Unknown Arr type for instance: {instance_name}")
2361
+ return
2362
+
2363
+ try:
2364
+ # Create new Arr instance
2365
+ from qBitrr.arss import Arr
2366
+ from qBitrr.errors import SkipException
2367
+
2368
+ new_arr = Arr(instance_name, self.manager.arr_manager, client_cls=client_cls)
2369
+
2370
+ # Register in manager
2371
+ self.manager.arr_manager.groups.add(instance_name)
2372
+ self.manager.arr_manager.uris.add(new_arr.uri)
2373
+ self.manager.arr_manager.managed_objects[new_arr.category] = new_arr
2374
+ self.manager.arr_manager.arr_categories.add(new_arr.category)
2375
+
2376
+ # Spawn and start processes
2377
+ _, procs = new_arr.spawn_child_processes()
2378
+ for p in procs:
2379
+ try:
2380
+ p.start()
2381
+ self.logger.debug(f"Started process (PID: {p.pid}) for {instance_name}")
2382
+ except Exception as e:
2383
+ self.logger.error(f"Failed to start process for {instance_name}: {e}")
2384
+
2385
+ self.logger.success(
2386
+ f"Started Arr instance: {instance_name} (category: {new_arr.category})"
2387
+ )
2388
+
2389
+ except SkipException:
2390
+ self.logger.info(f"Instance {instance_name} skipped (not managed or disabled)")
2391
+ except Exception as e:
2392
+ self.logger.error(f"Failed to start Arr instance {instance_name}: {e}", exc_info=True)
2393
+
2394
+ def _reload_arr_instance(self, instance_name: str):
2395
+ """Reload a single Arr instance without affecting others."""
2396
+ self.logger.notice(f"Reloading Arr instance: {instance_name}")
2397
+
2398
+ if not hasattr(self.manager, "arr_manager") or not self.manager.arr_manager:
2399
+ self.logger.warning("Cannot reload Arr instance: ArrManager not initialized")
2400
+ return
2401
+
2402
+ managed_objects = self.manager.arr_manager.managed_objects
2403
+
2404
+ # Find the instance by name (key is category, so search by _name attribute)
2405
+ old_arr = None
2406
+ old_category = None
2407
+ for category, arr in list(managed_objects.items()):
2408
+ if getattr(arr, "_name", None) == instance_name:
2409
+ old_arr = arr
2410
+ old_category = category
2411
+ break
2412
+
2413
+ # Check if instance exists in config
2414
+ instance_exists_in_config = instance_name in CONFIG.sections()
2415
+
2416
+ # Handle deletion case
2417
+ if not instance_exists_in_config:
2418
+ if old_arr:
2419
+ self.logger.info(f"Instance {instance_name} removed from config, stopping...")
2420
+ self._stop_arr_instance(old_arr, old_category)
2421
+ else:
2422
+ self.logger.debug(f"Instance {instance_name} not found in config or memory")
2423
+ return
2424
+
2425
+ # Handle update/addition
2426
+ if old_arr:
2427
+ # Update existing - stop old processes first
2428
+ self.logger.info(f"Updating existing Arr instance: {instance_name}")
2429
+ self._stop_arr_instance(old_arr, old_category)
2430
+ else:
2431
+ self.logger.info(f"Adding new Arr instance: {instance_name}")
2432
+
2433
+ # Small delay to ensure cleanup completes
2434
+ time.sleep(0.5)
2435
+
2436
+ # Create new instance
2437
+ self._start_arr_instance(instance_name)
2438
+
2439
+ self.logger.success(f"Successfully reloaded Arr instance: {instance_name}")
2440
+
2202
2441
  def start(self):
2203
2442
  if self._thread and self._thread.is_alive():
2204
2443
  self.logger.debug("WebUI already running on %s:%s", self.host, self.port)
@@ -2210,10 +2449,24 @@ class WebUI:
2210
2449
 
2211
2450
  def _serve(self):
2212
2451
  try:
2452
+ # Reset shutdown event at start
2453
+ self._shutdown_event.clear()
2454
+
2213
2455
  if self._should_use_dev_server():
2214
2456
  self.logger.info("Using Flask development server for WebUI")
2215
- self.app.run(host=self.host, port=self.port, debug=False, use_reloader=False)
2457
+ # Flask dev server - will exit on KeyboardInterrupt
2458
+ try:
2459
+ self.app.run(
2460
+ host=self.host,
2461
+ port=self.port,
2462
+ debug=False,
2463
+ use_reloader=False,
2464
+ threaded=True,
2465
+ )
2466
+ except (KeyboardInterrupt, SystemExit):
2467
+ pass
2216
2468
  return
2469
+
2217
2470
  try:
2218
2471
  from waitress import serve as waitress_serve
2219
2472
  except Exception:
@@ -2223,15 +2476,32 @@ class WebUI:
2223
2476
  )
2224
2477
  self.app.run(host=self.host, port=self.port, debug=False, use_reloader=False)
2225
2478
  return
2479
+
2226
2480
  self.logger.info("Using Waitress WSGI server for WebUI")
2481
+
2482
+ # For graceful restart capability, we need to use waitress_serve with channels
2483
+ # However, for now we'll use the simpler approach and just run the server
2484
+ # Restart capability will require stopping the entire process
2227
2485
  waitress_serve(
2228
2486
  self.app,
2229
2487
  host=self.host,
2230
2488
  port=self.port,
2231
2489
  ident="qBitrr-WebUI",
2232
2490
  )
2233
- except Exception: # pragma: no cover - defensive logging
2491
+
2492
+ except KeyboardInterrupt:
2493
+ self.logger.info("WebUI interrupted")
2494
+ except Exception:
2234
2495
  self.logger.exception("WebUI server terminated unexpectedly")
2496
+ finally:
2497
+ self._server = None
2498
+
2499
+ # If restart was requested, start a new server
2500
+ if self._restart_requested:
2501
+ self._restart_requested = False
2502
+ self.logger.info("Restarting WebUI server...")
2503
+ time.sleep(0.5) # Brief pause
2504
+ self.start() # Restart
2235
2505
 
2236
2506
  def _should_use_dev_server(self) -> bool:
2237
2507
  if self._use_dev_server is not None: