qBitrr2 5.7.0__py3-none-any.whl → 5.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qBitrr/bundled_data.py CHANGED
@@ -1,5 +1,5 @@
1
- version = "5.7.0"
2
- git_hash = "f7e3e092"
1
+ version = "5.8.0"
2
+ git_hash = "ae0807b0"
3
3
  license_text = (
4
4
  "Licence can be found on:\n\nhttps://github.com/Feramance/qBitrr/blob/master/LICENSE"
5
5
  )
qBitrr/config.py CHANGED
@@ -151,7 +151,7 @@ PING_URLS = ENVIRO_CONFIG.settings.ping_urls or CONFIG.get(
151
151
  "Settings.PingURLS", fallback=["one.one.one.one", "dns.google.com"]
152
152
  )
153
153
  IGNORE_TORRENTS_YOUNGER_THAN = ENVIRO_CONFIG.settings.ignore_torrents_younger_than or CONFIG.get(
154
- "Settings.IgnoreTorrentsYoungerThan", fallback=600
154
+ "Settings.IgnoreTorrentsYoungerThan", fallback=180
155
155
  )
156
156
  QBIT_DISABLED = (
157
157
  CONFIG.get("qBit.Disabled", fallback=False)
qBitrr/database.py ADDED
@@ -0,0 +1,79 @@
1
+ """Single consolidated database for all Arr instances."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from pathlib import Path
7
+
8
+ from peewee import SqliteDatabase
9
+
10
+ from qBitrr.config import APPDATA_FOLDER
11
+ from qBitrr.db_lock import with_database_retry
12
+ from qBitrr.tables import (
13
+ AlbumFilesModel,
14
+ AlbumQueueModel,
15
+ ArtistFilesModel,
16
+ EpisodeFilesModel,
17
+ EpisodeQueueModel,
18
+ FilesQueued,
19
+ MovieQueueModel,
20
+ MoviesFilesModel,
21
+ SeriesFilesModel,
22
+ TorrentLibrary,
23
+ TrackFilesModel,
24
+ )
25
+
26
+ logger = logging.getLogger("qBitrr.database")
27
+
28
+ # Global database instance
29
+ _db: SqliteDatabase | None = None
30
+
31
+
32
+ def get_database() -> SqliteDatabase:
33
+ """Get or create the global database instance."""
34
+ global _db
35
+ if _db is None:
36
+ db_path = Path(APPDATA_FOLDER) / "qbitrr.db"
37
+ db_path.parent.mkdir(parents=True, exist_ok=True)
38
+
39
+ _db = SqliteDatabase(
40
+ str(db_path),
41
+ pragmas={
42
+ "journal_mode": "wal",
43
+ "cache_size": -64_000,
44
+ "foreign_keys": 1,
45
+ "ignore_check_constraints": 0,
46
+ "synchronous": 0,
47
+ "read_uncommitted": 1,
48
+ },
49
+ timeout=15,
50
+ )
51
+
52
+ # Connect with retry logic
53
+ with_database_retry(
54
+ lambda: _db.connect(reuse_if_open=True),
55
+ logger=logger,
56
+ )
57
+
58
+ # Bind models to database
59
+ models = [
60
+ MoviesFilesModel,
61
+ EpisodeFilesModel,
62
+ AlbumFilesModel,
63
+ SeriesFilesModel,
64
+ ArtistFilesModel,
65
+ TrackFilesModel,
66
+ MovieQueueModel,
67
+ EpisodeQueueModel,
68
+ AlbumQueueModel,
69
+ FilesQueued,
70
+ TorrentLibrary,
71
+ ]
72
+ _db.bind(models)
73
+
74
+ # Create all tables
75
+ _db.create_tables(models, safe=True)
76
+
77
+ logger.info("Initialized single database: %s", db_path)
78
+
79
+ return _db
qBitrr/gen_config.py CHANGED
@@ -385,48 +385,56 @@ def _gen_default_torrent_table(category: str, cat_default: Table):
385
385
  "CaseSensitiveMatches",
386
386
  False,
387
387
  )
388
- if "anime" not in category.lower():
389
- _gen_default_line(
390
- torrent_table,
391
- [
392
- "These regex values will match any folder where the full name matches the specified values here, comma separated strings.",
393
- "These regex need to be escaped, that's why you see so many backslashes.",
394
- ],
395
- "FolderExclusionRegex",
396
- [
397
- r"\bextras?\b",
398
- r"\bfeaturettes?\b",
399
- r"\bsamples?\b",
400
- r"\bscreens?\b",
401
- r"\bnc(ed|op)?(\\d+)?\b",
402
- ],
403
- )
388
+ # Set folder exclusions based on category type
389
+ if "anime" in category.lower():
390
+ # Anime-specific exclusions (includes OVA, specials, NCOP/NCED)
391
+ folder_exclusions = [
392
+ r"\bextras?\b",
393
+ r"\bfeaturettes?\b",
394
+ r"\bsamples?\b",
395
+ r"\bscreens?\b",
396
+ r"\bspecials?\b",
397
+ r"\bova\b",
398
+ r"\bnc(ed|op)?(\\d+)?\b",
399
+ ]
400
+ elif "lidarr" in category.lower():
401
+ # Music-specific exclusions (no NCOP/NCED, no featurettes)
402
+ folder_exclusions = [
403
+ r"\bextras?\b",
404
+ r"\bsamples?\b",
405
+ r"\bscreens?\b",
406
+ ]
404
407
  else:
405
- _gen_default_line(
406
- torrent_table,
407
- [
408
- "These regex values will match any folder where the full name matches the specified values here, comma separated strings.",
409
- "These regex need to be escaped, that's why you see so many backslashes.",
410
- ],
411
- "FolderExclusionRegex",
412
- [
413
- r"\bextras?\b",
414
- r"\bfeaturettes?\b",
415
- r"\bsamples?\b",
416
- r"\bscreens?\b",
417
- r"\bspecials?\b",
418
- r"\bova\b",
419
- r"\bnc(ed|op)?(\\d+)?\b",
420
- ],
421
- )
408
+ # Standard video exclusions (movies/TV shows)
409
+ folder_exclusions = [
410
+ r"\bextras?\b",
411
+ r"\bfeaturettes?\b",
412
+ r"\bsamples?\b",
413
+ r"\bscreens?\b",
414
+ r"\bnc(ed|op)?(\\d+)?\b",
415
+ ]
416
+
422
417
  _gen_default_line(
423
418
  torrent_table,
424
419
  [
425
420
  "These regex values will match any folder where the full name matches the specified values here, comma separated strings.",
426
421
  "These regex need to be escaped, that's why you see so many backslashes.",
427
422
  ],
428
- "FileNameExclusionRegex",
429
- [
423
+ "FolderExclusionRegex",
424
+ folder_exclusions,
425
+ )
426
+ # Set filename exclusions based on category type
427
+ if "lidarr" in category.lower():
428
+ # Music-specific exclusions (no NCOP/NCED, no "music video" since that's actual music content)
429
+ filename_exclusions = [
430
+ r"\bsample\b",
431
+ r"brarbg.com\b",
432
+ r"\btrailer\b",
433
+ r"comandotorrents.com",
434
+ ]
435
+ else:
436
+ # Video exclusions (movies/TV/anime)
437
+ filename_exclusions = [
430
438
  r"\bncop\\d+?\b",
431
439
  r"\bnced\\d+?\b",
432
440
  r"\bsample\b",
@@ -434,13 +442,40 @@ def _gen_default_torrent_table(category: str, cat_default: Table):
434
442
  r"\btrailer\b",
435
443
  r"music video",
436
444
  r"comandotorrents.com",
445
+ ]
446
+
447
+ _gen_default_line(
448
+ torrent_table,
449
+ [
450
+ "These regex values will match any folder where the full name matches the specified values here, comma separated strings.",
451
+ "These regex need to be escaped, that's why you see so many backslashes.",
437
452
  ],
438
- )
453
+ "FileNameExclusionRegex",
454
+ filename_exclusions,
455
+ )
456
+ # Set appropriate file extensions based on category type
457
+ if "lidarr" in category.lower():
458
+ file_extensions = [
459
+ ".mp3",
460
+ ".flac",
461
+ ".m4a",
462
+ ".aac",
463
+ ".ogg",
464
+ ".opus",
465
+ ".wav",
466
+ ".ape",
467
+ ".wma",
468
+ ".!qB",
469
+ ".parts",
470
+ ]
471
+ else:
472
+ file_extensions = [".mp4", ".mkv", ".sub", ".ass", ".srt", ".!qB", ".parts"]
473
+
439
474
  _gen_default_line(
440
475
  torrent_table,
441
476
  "Only files with these extensions will be allowed to be downloaded, comma separated strings or regex, leave it empty to allow all extensions",
442
477
  "FileExtensionAllowlist",
443
- [".mp4", ".mkv", ".sub", ".ass", ".srt", ".!qB", ".parts"],
478
+ file_extensions,
444
479
  )
445
480
  _gen_default_line(
446
481
  torrent_table,
@@ -698,9 +733,15 @@ def _gen_default_search_table(category: str, cat_default: Table):
698
733
  )
699
734
  # SearchByYear doesn't apply to Lidarr (music albums)
700
735
  if "lidarr" not in category.lower():
736
+ if "sonarr" in category.lower():
737
+ search_by_year_comment = (
738
+ "It will order searches by the year the episode was first aired"
739
+ )
740
+ else:
741
+ search_by_year_comment = "It will order searches by the year the movie was released"
701
742
  _gen_default_line(
702
743
  search_table,
703
- "It will order searches by the year the EPISODE was first aired",
744
+ search_by_year_comment,
704
745
  "SearchByYear",
705
746
  True,
706
747
  )
@@ -711,12 +752,14 @@ def _gen_default_search_table(category: str, cat_default: Table):
711
752
  False,
712
753
  )
713
754
  _gen_default_line(
714
- search_table, "Delay between request searches in seconds", "SearchRequestsEvery", 300
755
+ search_table,
756
+ "Delay (in seconds) between checking for new Overseerr/Ombi requests. Does NOT affect delay between individual search commands (use Settings.SearchLoopDelay for that).",
757
+ "SearchRequestsEvery",
758
+ 300,
715
759
  )
716
760
  _gen_default_line(
717
761
  search_table,
718
- "Search movies which already have a file in the database in hopes of finding a "
719
- "better quality version.",
762
+ "Search media which already have a file in hopes of finding a better quality version.",
720
763
  "DoUpgradeSearch",
721
764
  False,
722
765
  )
qBitrr/main.py CHANGED
@@ -53,28 +53,34 @@ def _mask_secret(value: str | None) -> str:
53
53
 
54
54
  def _delete_all_databases() -> None:
55
55
  """
56
- Delete all database files from the APPDATA_FOLDER on startup.
56
+ Delete old per-instance database files from the APPDATA_FOLDER on startup.
57
57
 
58
- This includes:
59
- - All .db files (SQLite databases)
60
- - All .db-wal files (Write-Ahead Log files)
61
- - All .db-shm files (Shared Memory files)
58
+ Preserves the consolidated database (qbitrr.db) and Torrents.db.
59
+ Deletes old per-instance databases and their WAL/SHM files.
62
60
  """
63
61
  db_patterns = ["*.db", "*.db-wal", "*.db-shm"]
64
62
  deleted_files = []
63
+ # Files to preserve (consolidated database)
64
+ preserve_files = {"qbitrr.db", "Torrents.db"}
65
65
 
66
66
  for pattern in db_patterns:
67
67
  for db_file in glob.glob(str(APPDATA_FOLDER.joinpath(pattern))):
68
+ base_name = os.path.basename(db_file)
69
+ # Preserve consolidated database and its WAL/SHM files
70
+ should_preserve = any(base_name.startswith(f) for f in preserve_files)
71
+ if should_preserve:
72
+ continue
73
+
68
74
  try:
69
75
  os.remove(db_file)
70
- deleted_files.append(os.path.basename(db_file))
76
+ deleted_files.append(base_name)
71
77
  except Exception as e:
72
78
  logger.error("Failed to delete database file %s: %s", db_file, e)
73
79
 
74
80
  if deleted_files:
75
- logger.info("Deleted database files on startup: %s", ", ".join(deleted_files))
81
+ logger.info("Deleted old database files on startup: %s", ", ".join(deleted_files))
76
82
  else:
77
- logger.debug("No database files found to delete on startup")
83
+ logger.debug("No old database files found to delete on startup")
78
84
 
79
85
 
80
86
  class qBitManager:
@@ -86,6 +92,7 @@ class qBitManager:
86
92
  def __init__(self):
87
93
  self._name = "Manager"
88
94
  self.shutdown_event = Event()
95
+ self.database_restart_event = Event() # Signal for coordinated database recovery restart
89
96
  self.qBit_Host = CONFIG.get("qBit.Host", fallback="localhost")
90
97
  self.qBit_Port = CONFIG.get("qBit.Port", fallback=8105)
91
98
  self.qBit_UserName = CONFIG.get("qBit.UserName", fallback=None)
@@ -151,6 +158,8 @@ class qBitManager:
151
158
  self._process_restart_counts: dict[tuple[str, str], list[float]] = (
152
159
  {}
153
160
  ) # (category, role) -> [timestamps]
161
+ self._failed_spawn_attempts: dict[tuple[str, str], int] = {} # Track failed spawn attempts
162
+ self._pending_spawns: list[tuple] = [] # (arr_instance, meta) tuples to retry
154
163
  self.auto_restart_enabled = CONFIG.get("Settings.AutoRestartProcesses", fallback=True)
155
164
  self.max_process_restarts = CONFIG.get("Settings.MaxProcessRestarts", fallback=5)
156
165
  self.process_restart_window = CONFIG.get("Settings.ProcessRestartWindow", fallback=300)
@@ -696,6 +705,8 @@ class qBitManager:
696
705
  self.logger.warning(
697
706
  "Startup thread still running after 60s; managing available workers."
698
707
  )
708
+ started_processes = []
709
+ failed_processes = []
699
710
  for proc in list(self.child_processes):
700
711
  try:
701
712
  # Check if process has already been started
@@ -710,20 +721,153 @@ class qBitManager:
710
721
  )
711
722
  continue
712
723
 
713
- proc.start()
714
724
  meta = self._process_registry.get(proc, {})
715
- self.logger.debug(
716
- "Started %s worker for category '%s'",
725
+ self.logger.info(
726
+ "Starting %s worker for category '%s'...",
717
727
  meta.get("role", "worker"),
718
728
  meta.get("category", "unknown"),
719
729
  )
730
+ proc.start()
731
+
732
+ # Verify process actually started (give it a moment)
733
+ time.sleep(0.1)
734
+ if proc.is_alive():
735
+ self.logger.info(
736
+ "Successfully started %s worker for category '%s' (PID: %s)",
737
+ meta.get("role", "worker"),
738
+ meta.get("category", "unknown"),
739
+ proc.pid,
740
+ )
741
+ started_processes.append((meta.get("role"), meta.get("category")))
742
+ else:
743
+ self.logger.error(
744
+ "Process %s worker for category '%s' started but immediately died (exitcode: %s)",
745
+ meta.get("role", "worker"),
746
+ meta.get("category", "unknown"),
747
+ proc.exitcode,
748
+ )
749
+ failed_processes.append((meta.get("role"), meta.get("category")))
720
750
  except Exception as exc:
721
- self.logger.exception(
722
- "Failed to start worker process %s",
723
- getattr(proc, "name", repr(proc)),
751
+ meta = self._process_registry.get(proc, {})
752
+ self.logger.critical(
753
+ "FAILED to start %s worker for category '%s': %s",
754
+ meta.get("role", "worker"),
755
+ meta.get("category", "unknown"),
756
+ exc,
724
757
  exc_info=exc,
725
758
  )
759
+ failed_processes.append((meta.get("role"), meta.get("category")))
760
+
761
+ # Log summary
762
+ if started_processes:
763
+ self.logger.info(
764
+ "Started %d worker process(es): %s",
765
+ len(started_processes),
766
+ ", ".join(f"{role}({cat})" for role, cat in started_processes),
767
+ )
768
+ if failed_processes:
769
+ self.logger.critical(
770
+ "FAILED to start %d worker process(es): %s - Will retry periodically",
771
+ len(failed_processes),
772
+ ", ".join(f"{role}({cat})" for role, cat in failed_processes),
773
+ )
774
+ # Track failed processes for retry
775
+ for role, category in failed_processes:
776
+ key = (category, role)
777
+ self._failed_spawn_attempts[key] = self._failed_spawn_attempts.get(key, 0) + 1
778
+ # Add to retry queue if not already there
779
+ if hasattr(self, "arr_manager") and self.arr_manager:
780
+ for arr in self.arr_manager.managed_objects.values():
781
+ if arr.category == category:
782
+ # Check if already in pending spawns (avoid duplicates)
783
+ meta = {"category": category, "role": role, "name": arr._name}
784
+ already_pending = any(
785
+ m.get("category") == category and m.get("role") == role
786
+ for _, m in self._pending_spawns
787
+ )
788
+ if not already_pending:
789
+ self._pending_spawns.append((arr, meta))
790
+ break
726
791
  while not self.shutdown_event.is_set():
792
+ # Check for database restart signal
793
+ if self.database_restart_event.is_set():
794
+ self.logger.critical(
795
+ "Database restart signal detected - terminating ALL processes for coordinated restart..."
796
+ )
797
+ # Terminate all child processes
798
+ for proc in list(self.child_processes):
799
+ if proc.is_alive():
800
+ self.logger.warning(
801
+ "Terminating %s process for database recovery",
802
+ self._process_registry.get(proc, {}).get("role", "worker"),
803
+ )
804
+ proc.terminate()
805
+ # Wait for processes to terminate
806
+ time.sleep(2)
807
+ # Force kill any that didn't terminate
808
+ for proc in list(self.child_processes):
809
+ if proc.is_alive():
810
+ self.logger.error(
811
+ "Force killing %s process",
812
+ self._process_registry.get(proc, {}).get("role", "worker"),
813
+ )
814
+ proc.kill()
815
+ # Clear all processes
816
+ self.child_processes.clear()
817
+ self._process_registry.clear()
818
+ # Clear the event
819
+ self.database_restart_event.clear()
820
+ # Restart all Arr instances
821
+ self.logger.critical("Restarting all Arr instances after database recovery...")
822
+ if hasattr(self, "arr_manager") and self.arr_manager:
823
+ for arr in self.arr_manager.managed_objects.values():
824
+ try:
825
+ worker_count, procs = arr.spawn_child_processes()
826
+ for proc in procs:
827
+ role = (
828
+ "search"
829
+ if getattr(arr, "process_search_loop", None) is proc
830
+ else "torrent"
831
+ )
832
+ self._process_registry[proc] = {
833
+ "category": getattr(arr, "category", ""),
834
+ "name": getattr(arr, "_name", ""),
835
+ "role": role,
836
+ }
837
+ # CRITICAL: Actually start the process!
838
+ try:
839
+ proc.start()
840
+ time.sleep(0.1) # Brief pause to let process initialize
841
+ if proc.is_alive():
842
+ self.logger.info(
843
+ "Started %s worker for %s (PID: %s)",
844
+ role,
845
+ arr._name,
846
+ proc.pid,
847
+ )
848
+ else:
849
+ self.logger.error(
850
+ "Respawned %s worker for %s died immediately (exitcode: %s)",
851
+ role,
852
+ arr._name,
853
+ proc.exitcode,
854
+ )
855
+ except Exception as start_exc:
856
+ self.logger.error(
857
+ "Failed to start respawned %s worker for %s: %s",
858
+ role,
859
+ arr._name,
860
+ start_exc,
861
+ )
862
+ self.logger.info(
863
+ "Respawned %d process(es) for %s", worker_count, arr._name
864
+ )
865
+ except Exception as e:
866
+ self.logger.exception(
867
+ "Failed to respawn processes for %s: %s", arr._name, e
868
+ )
869
+ continue
870
+
727
871
  any_alive = False
728
872
  for proc in list(self.child_processes):
729
873
  if proc.is_alive():
@@ -766,6 +910,78 @@ class qBitManager:
766
910
  with contextlib.suppress(ValueError):
767
911
  self.child_processes.remove(proc)
768
912
 
913
+ # Retry failed process spawns
914
+ if self._pending_spawns and self.auto_restart_enabled:
915
+ retry_spawns = []
916
+ for arr, meta in self._pending_spawns:
917
+ category = meta.get("category", "")
918
+ role = meta.get("role", "")
919
+ key = (category, role)
920
+ attempts = self._failed_spawn_attempts.get(key, 0)
921
+
922
+ # Exponential backoff: 30s, 60s, 120s, 240s, 480s (max 8min)
923
+ # Retry indefinitely but with increasing delays
924
+ self.logger.info(
925
+ "Retrying spawn of %s worker for '%s' (attempt #%d)...",
926
+ role,
927
+ category,
928
+ attempts + 1,
929
+ )
930
+
931
+ try:
932
+ worker_count, procs = arr.spawn_child_processes()
933
+ if worker_count > 0:
934
+ for proc in procs:
935
+ proc_role = (
936
+ "search"
937
+ if getattr(arr, "process_search_loop", None) is proc
938
+ else "torrent"
939
+ )
940
+ if proc_role == role: # Only start the one we're retrying
941
+ try:
942
+ proc.start()
943
+ time.sleep(0.1)
944
+ if proc.is_alive():
945
+ self.logger.info(
946
+ "Successfully spawned %s worker for '%s' on retry (PID: %s)",
947
+ role,
948
+ category,
949
+ proc.pid,
950
+ )
951
+ self._process_registry[proc] = meta
952
+ # CRITICAL: Add to child_processes so it's monitored
953
+ if proc not in self.child_processes:
954
+ self.child_processes.append(proc)
955
+ # Clear failed attempts on success
956
+ self._failed_spawn_attempts.pop(key, None)
957
+ else:
958
+ self.logger.error(
959
+ "Retry spawn failed: %s worker for '%s' died immediately",
960
+ role,
961
+ category,
962
+ )
963
+ retry_spawns.append((arr, meta))
964
+ self._failed_spawn_attempts[key] = attempts + 1
965
+ except Exception as exc:
966
+ self.logger.error(
967
+ "Retry spawn failed for %s worker '%s': %s",
968
+ role,
969
+ category,
970
+ exc,
971
+ )
972
+ retry_spawns.append((arr, meta))
973
+ self._failed_spawn_attempts[key] = attempts + 1
974
+ except Exception as exc:
975
+ self.logger.error(
976
+ "Failed to respawn processes for retry: %s",
977
+ exc,
978
+ )
979
+ retry_spawns.append((arr, meta))
980
+ self._failed_spawn_attempts[key] = attempts + 1
981
+
982
+ # Update pending spawns list
983
+ self._pending_spawns = retry_spawns
984
+
769
985
  if not self.child_processes:
770
986
  if not any_alive:
771
987
  break
@@ -3,52 +3,25 @@ from __future__ import annotations
3
3
  from threading import RLock
4
4
  from typing import Any
5
5
 
6
- from peewee import Model, SqliteDatabase, TextField
6
+ from peewee import Model, TextField
7
7
 
8
- from qBitrr.db_lock import with_database_retry
9
- from qBitrr.home_path import APPDATA_FOLDER
8
+ from qBitrr.database import get_database
10
9
 
11
10
  _DB_LOCK = RLock()
12
- _DB_INSTANCE: SqliteDatabase | None = None
13
11
 
14
12
 
15
- def _get_database() -> SqliteDatabase:
16
- global _DB_INSTANCE
17
- if _DB_INSTANCE is None:
18
- path = APPDATA_FOLDER.joinpath("webui_activity.db")
19
- path.parent.mkdir(parents=True, exist_ok=True)
20
- _DB_INSTANCE = SqliteDatabase(
21
- str(path),
22
- pragmas={
23
- "journal_mode": "wal",
24
- "cache_size": -64_000,
25
- "foreign_keys": 1,
26
- "ignore_check_constraints": 0,
27
- "synchronous": 0,
28
- "read_uncommitted": 1,
29
- },
30
- timeout=15,
31
- check_same_thread=False,
32
- )
33
- return _DB_INSTANCE
34
-
35
-
36
- class BaseModel(Model):
37
- class Meta:
38
- database = _get_database()
39
-
40
-
41
- class SearchActivity(BaseModel):
13
+ class SearchActivity(Model):
42
14
  category = TextField(primary_key=True)
43
15
  summary = TextField(null=True)
44
16
  timestamp = TextField(null=True)
45
17
 
46
18
 
47
19
  def _ensure_tables() -> None:
48
- db = _get_database()
20
+ db = get_database()
49
21
  with _DB_LOCK:
50
- # Connect with retry logic for transient I/O errors
51
- with_database_retry(lambda: db.connect(reuse_if_open=True))
22
+ # Bind model to database if not already bound
23
+ if not SearchActivity._meta.database:
24
+ db.bind([SearchActivity])
52
25
  db.create_tables([SearchActivity], safe=True)
53
26
 
54
27
 
@@ -59,7 +32,7 @@ def record_search_activity(category: str, summary: str | None, timestamp: str |
59
32
  if timestamp is not None and not isinstance(timestamp, str):
60
33
  timestamp = str(timestamp)
61
34
  data: dict[str, Any] = {"summary": summary, "timestamp": timestamp}
62
- with _get_database().atomic():
35
+ with get_database().atomic():
63
36
  SearchActivity.insert(category=category, **data).on_conflict(
64
37
  conflict_target=[SearchActivity.category],
65
38
  update=data,
@@ -69,9 +42,6 @@ def record_search_activity(category: str, summary: str | None, timestamp: str |
69
42
  def fetch_search_activities() -> dict[str, dict[str, str | None]]:
70
43
  _ensure_tables()
71
44
  activities: dict[str, dict[str, str | None]] = {}
72
- db = _get_database()
73
- # Connect with retry logic for transient I/O errors
74
- with_database_retry(lambda: db.connect(reuse_if_open=True))
75
45
  try:
76
46
  query = SearchActivity.select()
77
47
  except Exception:
@@ -88,5 +58,5 @@ def clear_search_activity(category: str) -> None:
88
58
  if not category:
89
59
  return
90
60
  _ensure_tables()
91
- with _get_database().atomic():
61
+ with get_database().atomic():
92
62
  SearchActivity.delete().where(SearchActivity.category == category).execute()