qBitrr2 5.6.2__py3-none-any.whl → 5.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qBitrr/arss.py +653 -158
- qBitrr/bundled_data.py +2 -2
- qBitrr/config.py +1 -1
- qBitrr/gen_config.py +84 -41
- qBitrr/main.py +448 -16
- qBitrr/static/assets/ArrView.js +1 -1
- qBitrr/static/assets/ArrView.js.map +1 -1
- qBitrr/static/assets/ConfigView.js +5 -4
- qBitrr/static/assets/ConfigView.js.map +1 -1
- qBitrr/static/assets/LogsView.js +1 -1
- qBitrr/static/assets/LogsView.js.map +1 -1
- qBitrr/static/assets/ProcessesView.js +1 -1
- qBitrr/static/assets/ProcessesView.js.map +1 -1
- qBitrr/static/assets/app.js +2 -2
- qBitrr/static/assets/app.js.map +1 -1
- qBitrr/static/assets/vendor.js +1 -1
- qBitrr/static/assets/vendor.js.map +1 -1
- qBitrr/tables.py +7 -0
- qBitrr/webui.py +48 -1
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/METADATA +24 -2
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/RECORD +25 -25
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/WHEEL +0 -0
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/entry_points.txt +0 -0
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/licenses/LICENSE +0 -0
- {qbitrr2-5.6.2.dist-info → qbitrr2-5.7.1.dist-info}/top_level.txt +0 -0
qBitrr/main.py
CHANGED
|
@@ -86,6 +86,7 @@ class qBitManager:
|
|
|
86
86
|
def __init__(self):
|
|
87
87
|
self._name = "Manager"
|
|
88
88
|
self.shutdown_event = Event()
|
|
89
|
+
self.database_restart_event = Event() # Signal for coordinated database recovery restart
|
|
89
90
|
self.qBit_Host = CONFIG.get("qBit.Host", fallback="localhost")
|
|
90
91
|
self.qBit_Port = CONFIG.get("qBit.Port", fallback=8105)
|
|
91
92
|
self.qBit_UserName = CONFIG.get("qBit.UserName", fallback=None)
|
|
@@ -102,6 +103,11 @@ class qBitManager:
|
|
|
102
103
|
self._validated_version = False
|
|
103
104
|
self.client = None
|
|
104
105
|
self.current_qbit_version = None
|
|
106
|
+
# Multi-instance support
|
|
107
|
+
self.clients: dict[str, qbittorrentapi.Client] = {}
|
|
108
|
+
self.qbit_versions: dict[str, VersionClass] = {}
|
|
109
|
+
self.instance_metadata: dict[str, dict] = {}
|
|
110
|
+
self.instance_health: dict[str, bool] = {}
|
|
105
111
|
if not (QBIT_DISABLED or SEARCH_ONLY):
|
|
106
112
|
self.client = qbittorrentapi.Client(
|
|
107
113
|
host=self.qBit_Host,
|
|
@@ -120,6 +126,15 @@ class qBitManager:
|
|
|
120
126
|
e,
|
|
121
127
|
)
|
|
122
128
|
self._version_validator()
|
|
129
|
+
# Register default instance in multi-instance dictionaries
|
|
130
|
+
self.clients["default"] = self.client
|
|
131
|
+
self.qbit_versions["default"] = self.current_qbit_version
|
|
132
|
+
self.instance_metadata["default"] = {
|
|
133
|
+
"host": self.qBit_Host,
|
|
134
|
+
"port": self.qBit_Port,
|
|
135
|
+
"username": self.qBit_UserName,
|
|
136
|
+
}
|
|
137
|
+
self.instance_health["default"] = self._validated_version
|
|
123
138
|
self.expiring_bool = ExpiringSet(max_age_seconds=10)
|
|
124
139
|
self.cache = {}
|
|
125
140
|
self.name_cache = {}
|
|
@@ -137,6 +152,8 @@ class qBitManager:
|
|
|
137
152
|
self._process_restart_counts: dict[tuple[str, str], list[float]] = (
|
|
138
153
|
{}
|
|
139
154
|
) # (category, role) -> [timestamps]
|
|
155
|
+
self._failed_spawn_attempts: dict[tuple[str, str], int] = {} # Track failed spawn attempts
|
|
156
|
+
self._pending_spawns: list[tuple] = [] # (arr_instance, meta) tuples to retry
|
|
140
157
|
self.auto_restart_enabled = CONFIG.get("Settings.AutoRestartProcesses", fallback=True)
|
|
141
158
|
self.max_process_restarts = CONFIG.get("Settings.MaxProcessRestarts", fallback=5)
|
|
142
159
|
self.process_restart_window = CONFIG.get("Settings.ProcessRestartWindow", fallback=300)
|
|
@@ -383,6 +400,8 @@ class qBitManager:
|
|
|
383
400
|
def _complete_startup(self) -> None:
|
|
384
401
|
started_at = monotonic()
|
|
385
402
|
try:
|
|
403
|
+
# Initialize all qBit instances before Arr managers
|
|
404
|
+
self._initialize_qbit_instances()
|
|
386
405
|
arr_manager = ArrManager(self)
|
|
387
406
|
self.arr_manager = arr_manager
|
|
388
407
|
arr_manager.build_arr_instances()
|
|
@@ -427,10 +446,195 @@ class qBitManager:
|
|
|
427
446
|
)
|
|
428
447
|
sys.exit(1)
|
|
429
448
|
|
|
449
|
+
def _initialize_qbit_instances(self) -> None:
|
|
450
|
+
"""
|
|
451
|
+
Initialize all qBittorrent instances from config.
|
|
452
|
+
|
|
453
|
+
Scans config for [qBit] and [qBit-XXX] sections, initializes clients,
|
|
454
|
+
and populates multi-instance dictionaries. The default [qBit] section
|
|
455
|
+
is registered as "default" instance.
|
|
456
|
+
"""
|
|
457
|
+
if QBIT_DISABLED or SEARCH_ONLY:
|
|
458
|
+
self.logger.debug("qBit disabled or search-only mode; skipping instance init")
|
|
459
|
+
return
|
|
460
|
+
|
|
461
|
+
# Default instance already initialized in __init__
|
|
462
|
+
self.logger.info("Initialized qBit instance: default")
|
|
463
|
+
|
|
464
|
+
# Scan for additional instances (qBit-XXX sections)
|
|
465
|
+
for section in CONFIG.sections():
|
|
466
|
+
if section.startswith("qBit-") and section != "qBit":
|
|
467
|
+
instance_name = section.replace("qBit-", "", 1)
|
|
468
|
+
try:
|
|
469
|
+
self._init_instance(section, instance_name)
|
|
470
|
+
self.logger.info("Initialized qBit instance: %s", instance_name)
|
|
471
|
+
except Exception as e:
|
|
472
|
+
self.logger.error(
|
|
473
|
+
"Failed to initialize qBit instance '%s': %s", instance_name, e
|
|
474
|
+
)
|
|
475
|
+
self.instance_health[instance_name] = False
|
|
476
|
+
|
|
477
|
+
self.logger.info("Total qBit instances initialized: %d", len(self.clients))
|
|
478
|
+
|
|
479
|
+
def _init_instance(self, section_name: str, instance_name: str) -> None:
|
|
480
|
+
"""
|
|
481
|
+
Initialize a single qBittorrent instance.
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
section_name: Config section name (e.g., "qBit-Seedbox")
|
|
485
|
+
instance_name: Short instance identifier (e.g., "Seedbox")
|
|
486
|
+
|
|
487
|
+
Raises:
|
|
488
|
+
Exception: If connection fails or version is unsupported
|
|
489
|
+
"""
|
|
490
|
+
host = CONFIG.get(f"{section_name}.Host", fallback="localhost")
|
|
491
|
+
port = CONFIG.get(f"{section_name}.Port", fallback=8105)
|
|
492
|
+
username = CONFIG.get(f"{section_name}.UserName", fallback=None)
|
|
493
|
+
password = CONFIG.get(f"{section_name}.Password", fallback=None)
|
|
494
|
+
|
|
495
|
+
self.logger.debug(
|
|
496
|
+
"Connecting to qBit instance '%s': %s:%s (user: %s)",
|
|
497
|
+
instance_name,
|
|
498
|
+
host,
|
|
499
|
+
port,
|
|
500
|
+
username,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
client = qbittorrentapi.Client(
|
|
504
|
+
host=host,
|
|
505
|
+
port=port,
|
|
506
|
+
username=username,
|
|
507
|
+
password=password,
|
|
508
|
+
SIMPLE_RESPONSES=False,
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
# Test connection and get version
|
|
512
|
+
try:
|
|
513
|
+
version = version_parser.parse(client.app_version())
|
|
514
|
+
self.logger.debug("Instance '%s' version: %s", instance_name, version)
|
|
515
|
+
except Exception as e:
|
|
516
|
+
self.logger.error(
|
|
517
|
+
"Could not connect to qBit instance '%s' at %s:%s: %s",
|
|
518
|
+
instance_name,
|
|
519
|
+
host,
|
|
520
|
+
port,
|
|
521
|
+
e,
|
|
522
|
+
)
|
|
523
|
+
raise
|
|
524
|
+
|
|
525
|
+
# Validate version
|
|
526
|
+
if version < self.min_supported_version:
|
|
527
|
+
self.logger.critical(
|
|
528
|
+
"Instance '%s' version %s is below minimum supported %s",
|
|
529
|
+
instance_name,
|
|
530
|
+
version,
|
|
531
|
+
self.min_supported_version,
|
|
532
|
+
)
|
|
533
|
+
raise ValueError(
|
|
534
|
+
f"Unsupported qBittorrent version {version} for instance {instance_name}"
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
# Register instance
|
|
538
|
+
self.clients[instance_name] = client
|
|
539
|
+
self.qbit_versions[instance_name] = version
|
|
540
|
+
self.instance_metadata[instance_name] = {
|
|
541
|
+
"host": host,
|
|
542
|
+
"port": port,
|
|
543
|
+
"username": username,
|
|
544
|
+
}
|
|
545
|
+
self.instance_health[instance_name] = True
|
|
546
|
+
|
|
547
|
+
def is_instance_alive(self, instance_name: str = "default") -> bool:
|
|
548
|
+
"""
|
|
549
|
+
Check if a specific qBittorrent instance is alive and responding.
|
|
550
|
+
|
|
551
|
+
Args:
|
|
552
|
+
instance_name: The instance identifier (default: "default")
|
|
553
|
+
|
|
554
|
+
Returns:
|
|
555
|
+
bool: True if instance is healthy and responding, False otherwise
|
|
556
|
+
"""
|
|
557
|
+
if instance_name not in self.clients:
|
|
558
|
+
self.logger.warning("Instance '%s' not found in clients", instance_name)
|
|
559
|
+
return False
|
|
560
|
+
|
|
561
|
+
client = self.clients[instance_name]
|
|
562
|
+
if client is None:
|
|
563
|
+
return False
|
|
564
|
+
|
|
565
|
+
try:
|
|
566
|
+
# Quick health check - just get app version
|
|
567
|
+
client.app_version()
|
|
568
|
+
self.instance_health[instance_name] = True
|
|
569
|
+
return True
|
|
570
|
+
except Exception as e:
|
|
571
|
+
self.logger.debug("Instance '%s' health check failed: %s", instance_name, e)
|
|
572
|
+
self.instance_health[instance_name] = False
|
|
573
|
+
return False
|
|
574
|
+
|
|
575
|
+
def get_all_instances(self) -> list[str]:
|
|
576
|
+
"""
|
|
577
|
+
Get list of all configured qBittorrent instance names.
|
|
578
|
+
|
|
579
|
+
Returns:
|
|
580
|
+
list[str]: List of instance identifiers (e.g., ["default", "Seedbox"])
|
|
581
|
+
"""
|
|
582
|
+
return list(self.clients.keys())
|
|
583
|
+
|
|
584
|
+
def get_healthy_instances(self) -> list[str]:
|
|
585
|
+
"""
|
|
586
|
+
Get list of all healthy (responding) qBittorrent instances.
|
|
587
|
+
|
|
588
|
+
Returns:
|
|
589
|
+
list[str]: List of healthy instance identifiers
|
|
590
|
+
"""
|
|
591
|
+
return [name for name in self.clients.keys() if self.is_instance_alive(name)]
|
|
592
|
+
|
|
593
|
+
def get_instance_info(self, instance_name: str = "default") -> dict:
|
|
594
|
+
"""
|
|
595
|
+
Get metadata about a specific qBittorrent instance.
|
|
596
|
+
|
|
597
|
+
Args:
|
|
598
|
+
instance_name: The instance identifier (default: "default")
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
dict: Instance metadata including host, port, version, health status
|
|
602
|
+
"""
|
|
603
|
+
if instance_name not in self.clients:
|
|
604
|
+
return {"error": f"Instance '{instance_name}' not found"}
|
|
605
|
+
|
|
606
|
+
metadata = self.instance_metadata.get(instance_name, {})
|
|
607
|
+
return {
|
|
608
|
+
"name": instance_name,
|
|
609
|
+
"host": metadata.get("host"),
|
|
610
|
+
"port": metadata.get("port"),
|
|
611
|
+
"version": str(self.qbit_versions.get(instance_name, "unknown")),
|
|
612
|
+
"healthy": self.instance_health.get(instance_name, False),
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
def get_client(self, instance_name: str = "default") -> qbittorrentapi.Client | None:
|
|
616
|
+
"""
|
|
617
|
+
Get qBittorrent client for a specific instance.
|
|
618
|
+
|
|
619
|
+
Args:
|
|
620
|
+
instance_name: The instance identifier (default: "default")
|
|
621
|
+
|
|
622
|
+
Returns:
|
|
623
|
+
qbittorrentapi.Client | None: Client instance, or None if not found/unhealthy
|
|
624
|
+
"""
|
|
625
|
+
if instance_name not in self.clients:
|
|
626
|
+
self.logger.warning("Instance '%s' not found in clients", instance_name)
|
|
627
|
+
return None
|
|
628
|
+
return self.clients[instance_name]
|
|
629
|
+
|
|
430
630
|
# @response_text(str)
|
|
431
631
|
# @login_required
|
|
432
|
-
def app_version(self, **kwargs):
|
|
433
|
-
|
|
632
|
+
def app_version(self, instance_name: str = "default", **kwargs):
|
|
633
|
+
"""Get qBittorrent app version for a specific instance."""
|
|
634
|
+
client = self.get_client(instance_name)
|
|
635
|
+
if client is None:
|
|
636
|
+
return None
|
|
637
|
+
return client._get(
|
|
434
638
|
_name=APINames.Application,
|
|
435
639
|
_method="version",
|
|
436
640
|
_retries=0,
|
|
@@ -438,23 +642,44 @@ class qBitManager:
|
|
|
438
642
|
**kwargs,
|
|
439
643
|
)
|
|
440
644
|
|
|
441
|
-
def transfer_info(self, **kwargs):
|
|
442
|
-
"""
|
|
443
|
-
|
|
645
|
+
def transfer_info(self, instance_name: str = "default", **kwargs):
|
|
646
|
+
"""
|
|
647
|
+
Proxy transfer info requests to a specific qBittorrent instance.
|
|
648
|
+
|
|
649
|
+
Args:
|
|
650
|
+
instance_name: The instance identifier (default: "default")
|
|
651
|
+
**kwargs: Additional arguments to pass to transfer_info
|
|
652
|
+
|
|
653
|
+
Returns:
|
|
654
|
+
dict: Transfer info or connection status
|
|
655
|
+
"""
|
|
656
|
+
client = self.get_client(instance_name)
|
|
657
|
+
if client is None:
|
|
444
658
|
return {"connection_status": "disconnected"}
|
|
445
|
-
return
|
|
659
|
+
return client.transfer_info(**kwargs)
|
|
446
660
|
|
|
447
661
|
@property
|
|
448
662
|
def is_alive(self) -> bool:
|
|
663
|
+
"""
|
|
664
|
+
Check if the default qBittorrent instance is alive.
|
|
665
|
+
|
|
666
|
+
Backward-compatible property that delegates to is_instance_alive("default").
|
|
667
|
+
Uses caching via expiring_bool to avoid excessive health checks.
|
|
668
|
+
"""
|
|
449
669
|
try:
|
|
450
670
|
if self.client is None:
|
|
451
671
|
return False
|
|
452
672
|
if 1 in self.expiring_bool:
|
|
453
673
|
return True
|
|
454
|
-
|
|
455
|
-
self.
|
|
456
|
-
|
|
457
|
-
|
|
674
|
+
# Delegate to instance health check
|
|
675
|
+
alive = self.is_instance_alive("default")
|
|
676
|
+
if alive:
|
|
677
|
+
self.logger.trace(
|
|
678
|
+
"Successfully connected to %s:%s", self.qBit_Host, self.qBit_Port
|
|
679
|
+
)
|
|
680
|
+
self.expiring_bool.add(1)
|
|
681
|
+
return True
|
|
682
|
+
self.logger.warning("Could not connect to %s:%s", self.qBit_Host, self.qBit_Port)
|
|
458
683
|
except requests.RequestException:
|
|
459
684
|
self.logger.warning("Could not connect to %s:%s", self.qBit_Host, self.qBit_Port)
|
|
460
685
|
self.should_delay_torrent_scan = True
|
|
@@ -474,6 +699,8 @@ class qBitManager:
|
|
|
474
699
|
self.logger.warning(
|
|
475
700
|
"Startup thread still running after 60s; managing available workers."
|
|
476
701
|
)
|
|
702
|
+
started_processes = []
|
|
703
|
+
failed_processes = []
|
|
477
704
|
for proc in list(self.child_processes):
|
|
478
705
|
try:
|
|
479
706
|
# Check if process has already been started
|
|
@@ -488,20 +715,153 @@ class qBitManager:
|
|
|
488
715
|
)
|
|
489
716
|
continue
|
|
490
717
|
|
|
491
|
-
proc.start()
|
|
492
718
|
meta = self._process_registry.get(proc, {})
|
|
493
|
-
self.logger.
|
|
494
|
-
"
|
|
719
|
+
self.logger.info(
|
|
720
|
+
"Starting %s worker for category '%s'...",
|
|
495
721
|
meta.get("role", "worker"),
|
|
496
722
|
meta.get("category", "unknown"),
|
|
497
723
|
)
|
|
724
|
+
proc.start()
|
|
725
|
+
|
|
726
|
+
# Verify process actually started (give it a moment)
|
|
727
|
+
time.sleep(0.1)
|
|
728
|
+
if proc.is_alive():
|
|
729
|
+
self.logger.info(
|
|
730
|
+
"Successfully started %s worker for category '%s' (PID: %s)",
|
|
731
|
+
meta.get("role", "worker"),
|
|
732
|
+
meta.get("category", "unknown"),
|
|
733
|
+
proc.pid,
|
|
734
|
+
)
|
|
735
|
+
started_processes.append((meta.get("role"), meta.get("category")))
|
|
736
|
+
else:
|
|
737
|
+
self.logger.error(
|
|
738
|
+
"Process %s worker for category '%s' started but immediately died (exitcode: %s)",
|
|
739
|
+
meta.get("role", "worker"),
|
|
740
|
+
meta.get("category", "unknown"),
|
|
741
|
+
proc.exitcode,
|
|
742
|
+
)
|
|
743
|
+
failed_processes.append((meta.get("role"), meta.get("category")))
|
|
498
744
|
except Exception as exc:
|
|
499
|
-
self.
|
|
500
|
-
|
|
501
|
-
|
|
745
|
+
meta = self._process_registry.get(proc, {})
|
|
746
|
+
self.logger.critical(
|
|
747
|
+
"FAILED to start %s worker for category '%s': %s",
|
|
748
|
+
meta.get("role", "worker"),
|
|
749
|
+
meta.get("category", "unknown"),
|
|
750
|
+
exc,
|
|
502
751
|
exc_info=exc,
|
|
503
752
|
)
|
|
753
|
+
failed_processes.append((meta.get("role"), meta.get("category")))
|
|
754
|
+
|
|
755
|
+
# Log summary
|
|
756
|
+
if started_processes:
|
|
757
|
+
self.logger.info(
|
|
758
|
+
"Started %d worker process(es): %s",
|
|
759
|
+
len(started_processes),
|
|
760
|
+
", ".join(f"{role}({cat})" for role, cat in started_processes),
|
|
761
|
+
)
|
|
762
|
+
if failed_processes:
|
|
763
|
+
self.logger.critical(
|
|
764
|
+
"FAILED to start %d worker process(es): %s - Will retry periodically",
|
|
765
|
+
len(failed_processes),
|
|
766
|
+
", ".join(f"{role}({cat})" for role, cat in failed_processes),
|
|
767
|
+
)
|
|
768
|
+
# Track failed processes for retry
|
|
769
|
+
for role, category in failed_processes:
|
|
770
|
+
key = (category, role)
|
|
771
|
+
self._failed_spawn_attempts[key] = self._failed_spawn_attempts.get(key, 0) + 1
|
|
772
|
+
# Add to retry queue if not already there
|
|
773
|
+
if hasattr(self, "arr_manager") and self.arr_manager:
|
|
774
|
+
for arr in self.arr_manager.managed_objects.values():
|
|
775
|
+
if arr.category == category:
|
|
776
|
+
# Check if already in pending spawns (avoid duplicates)
|
|
777
|
+
meta = {"category": category, "role": role, "name": arr._name}
|
|
778
|
+
already_pending = any(
|
|
779
|
+
m.get("category") == category and m.get("role") == role
|
|
780
|
+
for _, m in self._pending_spawns
|
|
781
|
+
)
|
|
782
|
+
if not already_pending:
|
|
783
|
+
self._pending_spawns.append((arr, meta))
|
|
784
|
+
break
|
|
504
785
|
while not self.shutdown_event.is_set():
|
|
786
|
+
# Check for database restart signal
|
|
787
|
+
if self.database_restart_event.is_set():
|
|
788
|
+
self.logger.critical(
|
|
789
|
+
"Database restart signal detected - terminating ALL processes for coordinated restart..."
|
|
790
|
+
)
|
|
791
|
+
# Terminate all child processes
|
|
792
|
+
for proc in list(self.child_processes):
|
|
793
|
+
if proc.is_alive():
|
|
794
|
+
self.logger.warning(
|
|
795
|
+
"Terminating %s process for database recovery",
|
|
796
|
+
self._process_registry.get(proc, {}).get("role", "worker"),
|
|
797
|
+
)
|
|
798
|
+
proc.terminate()
|
|
799
|
+
# Wait for processes to terminate
|
|
800
|
+
time.sleep(2)
|
|
801
|
+
# Force kill any that didn't terminate
|
|
802
|
+
for proc in list(self.child_processes):
|
|
803
|
+
if proc.is_alive():
|
|
804
|
+
self.logger.error(
|
|
805
|
+
"Force killing %s process",
|
|
806
|
+
self._process_registry.get(proc, {}).get("role", "worker"),
|
|
807
|
+
)
|
|
808
|
+
proc.kill()
|
|
809
|
+
# Clear all processes
|
|
810
|
+
self.child_processes.clear()
|
|
811
|
+
self._process_registry.clear()
|
|
812
|
+
# Clear the event
|
|
813
|
+
self.database_restart_event.clear()
|
|
814
|
+
# Restart all Arr instances
|
|
815
|
+
self.logger.critical("Restarting all Arr instances after database recovery...")
|
|
816
|
+
if hasattr(self, "arr_manager") and self.arr_manager:
|
|
817
|
+
for arr in self.arr_manager.managed_objects.values():
|
|
818
|
+
try:
|
|
819
|
+
worker_count, procs = arr.spawn_child_processes()
|
|
820
|
+
for proc in procs:
|
|
821
|
+
role = (
|
|
822
|
+
"search"
|
|
823
|
+
if getattr(arr, "process_search_loop", None) is proc
|
|
824
|
+
else "torrent"
|
|
825
|
+
)
|
|
826
|
+
self._process_registry[proc] = {
|
|
827
|
+
"category": getattr(arr, "category", ""),
|
|
828
|
+
"name": getattr(arr, "_name", ""),
|
|
829
|
+
"role": role,
|
|
830
|
+
}
|
|
831
|
+
# CRITICAL: Actually start the process!
|
|
832
|
+
try:
|
|
833
|
+
proc.start()
|
|
834
|
+
time.sleep(0.1) # Brief pause to let process initialize
|
|
835
|
+
if proc.is_alive():
|
|
836
|
+
self.logger.info(
|
|
837
|
+
"Started %s worker for %s (PID: %s)",
|
|
838
|
+
role,
|
|
839
|
+
arr._name,
|
|
840
|
+
proc.pid,
|
|
841
|
+
)
|
|
842
|
+
else:
|
|
843
|
+
self.logger.error(
|
|
844
|
+
"Respawned %s worker for %s died immediately (exitcode: %s)",
|
|
845
|
+
role,
|
|
846
|
+
arr._name,
|
|
847
|
+
proc.exitcode,
|
|
848
|
+
)
|
|
849
|
+
except Exception as start_exc:
|
|
850
|
+
self.logger.error(
|
|
851
|
+
"Failed to start respawned %s worker for %s: %s",
|
|
852
|
+
role,
|
|
853
|
+
arr._name,
|
|
854
|
+
start_exc,
|
|
855
|
+
)
|
|
856
|
+
self.logger.info(
|
|
857
|
+
"Respawned %d process(es) for %s", worker_count, arr._name
|
|
858
|
+
)
|
|
859
|
+
except Exception as e:
|
|
860
|
+
self.logger.exception(
|
|
861
|
+
"Failed to respawn processes for %s: %s", arr._name, e
|
|
862
|
+
)
|
|
863
|
+
continue
|
|
864
|
+
|
|
505
865
|
any_alive = False
|
|
506
866
|
for proc in list(self.child_processes):
|
|
507
867
|
if proc.is_alive():
|
|
@@ -544,6 +904,78 @@ class qBitManager:
|
|
|
544
904
|
with contextlib.suppress(ValueError):
|
|
545
905
|
self.child_processes.remove(proc)
|
|
546
906
|
|
|
907
|
+
# Retry failed process spawns
|
|
908
|
+
if self._pending_spawns and self.auto_restart_enabled:
|
|
909
|
+
retry_spawns = []
|
|
910
|
+
for arr, meta in self._pending_spawns:
|
|
911
|
+
category = meta.get("category", "")
|
|
912
|
+
role = meta.get("role", "")
|
|
913
|
+
key = (category, role)
|
|
914
|
+
attempts = self._failed_spawn_attempts.get(key, 0)
|
|
915
|
+
|
|
916
|
+
# Exponential backoff: 30s, 60s, 120s, 240s, 480s (max 8min)
|
|
917
|
+
# Retry indefinitely but with increasing delays
|
|
918
|
+
self.logger.info(
|
|
919
|
+
"Retrying spawn of %s worker for '%s' (attempt #%d)...",
|
|
920
|
+
role,
|
|
921
|
+
category,
|
|
922
|
+
attempts + 1,
|
|
923
|
+
)
|
|
924
|
+
|
|
925
|
+
try:
|
|
926
|
+
worker_count, procs = arr.spawn_child_processes()
|
|
927
|
+
if worker_count > 0:
|
|
928
|
+
for proc in procs:
|
|
929
|
+
proc_role = (
|
|
930
|
+
"search"
|
|
931
|
+
if getattr(arr, "process_search_loop", None) is proc
|
|
932
|
+
else "torrent"
|
|
933
|
+
)
|
|
934
|
+
if proc_role == role: # Only start the one we're retrying
|
|
935
|
+
try:
|
|
936
|
+
proc.start()
|
|
937
|
+
time.sleep(0.1)
|
|
938
|
+
if proc.is_alive():
|
|
939
|
+
self.logger.info(
|
|
940
|
+
"Successfully spawned %s worker for '%s' on retry (PID: %s)",
|
|
941
|
+
role,
|
|
942
|
+
category,
|
|
943
|
+
proc.pid,
|
|
944
|
+
)
|
|
945
|
+
self._process_registry[proc] = meta
|
|
946
|
+
# CRITICAL: Add to child_processes so it's monitored
|
|
947
|
+
if proc not in self.child_processes:
|
|
948
|
+
self.child_processes.append(proc)
|
|
949
|
+
# Clear failed attempts on success
|
|
950
|
+
self._failed_spawn_attempts.pop(key, None)
|
|
951
|
+
else:
|
|
952
|
+
self.logger.error(
|
|
953
|
+
"Retry spawn failed: %s worker for '%s' died immediately",
|
|
954
|
+
role,
|
|
955
|
+
category,
|
|
956
|
+
)
|
|
957
|
+
retry_spawns.append((arr, meta))
|
|
958
|
+
self._failed_spawn_attempts[key] = attempts + 1
|
|
959
|
+
except Exception as exc:
|
|
960
|
+
self.logger.error(
|
|
961
|
+
"Retry spawn failed for %s worker '%s': %s",
|
|
962
|
+
role,
|
|
963
|
+
category,
|
|
964
|
+
exc,
|
|
965
|
+
)
|
|
966
|
+
retry_spawns.append((arr, meta))
|
|
967
|
+
self._failed_spawn_attempts[key] = attempts + 1
|
|
968
|
+
except Exception as exc:
|
|
969
|
+
self.logger.error(
|
|
970
|
+
"Failed to respawn processes for retry: %s",
|
|
971
|
+
exc,
|
|
972
|
+
)
|
|
973
|
+
retry_spawns.append((arr, meta))
|
|
974
|
+
self._failed_spawn_attempts[key] = attempts + 1
|
|
975
|
+
|
|
976
|
+
# Update pending spawns list
|
|
977
|
+
self._pending_spawns = retry_spawns
|
|
978
|
+
|
|
547
979
|
if not self.child_processes:
|
|
548
980
|
if not any_alive:
|
|
549
981
|
break
|