qBitrr2 5.4.5__py3-none-any.whl → 5.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qBitrr/arss.py +457 -127
- qBitrr/bundled_data.py +2 -2
- qBitrr/config_version.py +144 -0
- qBitrr/db_lock.py +189 -0
- qBitrr/db_recovery.py +202 -0
- qBitrr/gen_config.py +285 -3
- qBitrr/main.py +171 -5
- qBitrr/search_activity_store.py +6 -2
- qBitrr/static/assets/ArrView.js +1 -1
- qBitrr/static/assets/ArrView.js.map +1 -1
- qBitrr/static/assets/ConfigView.js +4 -3
- qBitrr/static/assets/ConfigView.js.map +1 -1
- qBitrr/static/assets/LogsView.js +17 -39
- qBitrr/static/assets/LogsView.js.map +1 -1
- qBitrr/static/assets/ProcessesView.js +1 -1
- qBitrr/static/assets/ProcessesView.js.map +1 -1
- qBitrr/static/assets/app.css +1 -1
- qBitrr/static/assets/app.js +1 -9
- qBitrr/static/assets/app.js.map +1 -1
- qBitrr/static/assets/react-select.esm.js +1 -8
- qBitrr/static/assets/react-select.esm.js.map +1 -1
- qBitrr/static/assets/table.js +2 -20
- qBitrr/static/assets/table.js.map +1 -1
- qBitrr/static/assets/vendor.js +1 -25
- qBitrr/static/assets/vendor.js.map +1 -1
- qBitrr/static/sw.js +5 -0
- qBitrr/tables.py +27 -0
- qBitrr/webui.py +523 -23
- {qbitrr2-5.4.5.dist-info → qbitrr2-5.5.0.dist-info}/METADATA +88 -13
- qbitrr2-5.5.0.dist-info/RECORD +63 -0
- qbitrr2-5.4.5.dist-info/RECORD +0 -61
- {qbitrr2-5.4.5.dist-info → qbitrr2-5.5.0.dist-info}/WHEEL +0 -0
- {qbitrr2-5.4.5.dist-info → qbitrr2-5.5.0.dist-info}/entry_points.txt +0 -0
- {qbitrr2-5.4.5.dist-info → qbitrr2-5.5.0.dist-info}/licenses/LICENSE +0 -0
- {qbitrr2-5.4.5.dist-info → qbitrr2-5.5.0.dist-info}/top_level.txt +0 -0
qBitrr/bundled_data.py
CHANGED
qBitrr/config_version.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""Configuration version management for qBitrr.
|
|
2
|
+
|
|
3
|
+
This module manages config schema versioning and migrations.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from qBitrr.gen_config import MyConfig
|
|
15
|
+
|
|
16
|
+
# Current expected config version - increment when schema changes require migration
|
|
17
|
+
EXPECTED_CONFIG_VERSION = 3
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_config_version(config: MyConfig) -> int:
|
|
23
|
+
"""
|
|
24
|
+
Get the ConfigVersion from the config file.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
config: MyConfig instance
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Config version as integer, defaults to 1 if not found
|
|
31
|
+
"""
|
|
32
|
+
version = config.get("Settings.ConfigVersion", fallback=1)
|
|
33
|
+
try:
|
|
34
|
+
return int(version)
|
|
35
|
+
except (ValueError, TypeError):
|
|
36
|
+
logger.warning(f"Invalid ConfigVersion value: {version}, defaulting to 1")
|
|
37
|
+
return 1
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def set_config_version(config: MyConfig, version: int) -> None:
|
|
41
|
+
"""
|
|
42
|
+
Set the ConfigVersion in the config file.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
config: MyConfig instance
|
|
46
|
+
version: Version number to set
|
|
47
|
+
"""
|
|
48
|
+
if "Settings" not in config.config:
|
|
49
|
+
from tomlkit import table
|
|
50
|
+
|
|
51
|
+
config.config["Settings"] = table()
|
|
52
|
+
|
|
53
|
+
config.config["Settings"]["ConfigVersion"] = version
|
|
54
|
+
logger.info(f"Set ConfigVersion to {version}")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def validate_config_version(config: MyConfig) -> tuple[bool, str | None]:
|
|
58
|
+
"""
|
|
59
|
+
Validate config version and determine if migration is needed.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
config: MyConfig instance
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Tuple of (is_valid, error_message)
|
|
66
|
+
- (True, None): Config version matches expected
|
|
67
|
+
- (True, "migration_needed"): Config version is older, migration required
|
|
68
|
+
- (False, error_msg): Config version is newer, show error to user
|
|
69
|
+
"""
|
|
70
|
+
current_version = get_config_version(config)
|
|
71
|
+
|
|
72
|
+
if current_version == EXPECTED_CONFIG_VERSION:
|
|
73
|
+
logger.debug(f"Config version matches expected: {EXPECTED_CONFIG_VERSION}")
|
|
74
|
+
return True, None
|
|
75
|
+
|
|
76
|
+
if current_version < EXPECTED_CONFIG_VERSION:
|
|
77
|
+
logger.info(
|
|
78
|
+
f"Config version {current_version} is older than expected {EXPECTED_CONFIG_VERSION}, "
|
|
79
|
+
"migration needed"
|
|
80
|
+
)
|
|
81
|
+
return True, "migration_needed"
|
|
82
|
+
|
|
83
|
+
# Config version is newer than expected
|
|
84
|
+
error_msg = (
|
|
85
|
+
f"Config version mismatch: found {current_version}, expected {EXPECTED_CONFIG_VERSION}. "
|
|
86
|
+
f"Your config may have been created with a newer version of qBitrr and may not work correctly. "
|
|
87
|
+
f"Please update qBitrr or restore a compatible config backup."
|
|
88
|
+
)
|
|
89
|
+
logger.error(error_msg)
|
|
90
|
+
return False, error_msg
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def backup_config(config_path: Path) -> Path | None:
|
|
94
|
+
"""
|
|
95
|
+
Create a timestamped backup of the config file.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
config_path: Path to config.toml
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Path to backup file, or None if backup failed
|
|
102
|
+
"""
|
|
103
|
+
if not config_path.exists():
|
|
104
|
+
logger.warning(f"Config file not found for backup: {config_path}")
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
108
|
+
backup_path = config_path.parent / f"{config_path.stem}.backup.{timestamp}{config_path.suffix}"
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
import shutil
|
|
112
|
+
|
|
113
|
+
shutil.copy2(config_path, backup_path)
|
|
114
|
+
logger.info(f"Created config backup: {backup_path}")
|
|
115
|
+
return backup_path
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.error(f"Failed to create config backup: {e}")
|
|
118
|
+
return None
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def restore_config_backup(backup_path: Path, config_path: Path) -> bool:
|
|
122
|
+
"""
|
|
123
|
+
Restore config from a backup file.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
backup_path: Path to backup file
|
|
127
|
+
config_path: Path to config.toml
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
True if restore succeeded, False otherwise
|
|
131
|
+
"""
|
|
132
|
+
if not backup_path.exists():
|
|
133
|
+
logger.error(f"Backup file not found: {backup_path}")
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
import shutil
|
|
138
|
+
|
|
139
|
+
shutil.copy2(backup_path, config_path)
|
|
140
|
+
logger.info(f"Restored config from backup: {backup_path}")
|
|
141
|
+
return True
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.error(f"Failed to restore config from backup: {e}")
|
|
144
|
+
return False
|
qBitrr/db_lock.py
CHANGED
|
@@ -77,3 +77,192 @@ def database_lock() -> Iterator[None]:
|
|
|
77
77
|
"""Provide a shared lock used to serialize SQLite access across processes."""
|
|
78
78
|
with _DB_LOCK.context():
|
|
79
79
|
yield
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def with_database_retry(
|
|
83
|
+
func,
|
|
84
|
+
*,
|
|
85
|
+
retries: int = 5,
|
|
86
|
+
backoff: float = 0.5,
|
|
87
|
+
max_backoff: float = 10.0,
|
|
88
|
+
jitter: float = 0.25,
|
|
89
|
+
logger=None,
|
|
90
|
+
):
|
|
91
|
+
"""
|
|
92
|
+
Execute database operation with retry logic for transient I/O errors.
|
|
93
|
+
|
|
94
|
+
Catches:
|
|
95
|
+
- sqlite3.OperationalError (disk I/O, database locked)
|
|
96
|
+
- sqlite3.DatabaseError (corruption that may resolve)
|
|
97
|
+
|
|
98
|
+
Does NOT retry:
|
|
99
|
+
- sqlite3.IntegrityError (data constraint violations)
|
|
100
|
+
- sqlite3.ProgrammingError (SQL syntax errors)
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
func: Callable to execute (should take no arguments)
|
|
104
|
+
retries: Maximum number of retry attempts (default: 5)
|
|
105
|
+
backoff: Initial backoff delay in seconds (default: 0.5)
|
|
106
|
+
max_backoff: Maximum backoff delay in seconds (default: 10.0)
|
|
107
|
+
jitter: Random jitter added to delay in seconds (default: 0.25)
|
|
108
|
+
logger: Logger instance for logging retry attempts
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Result of func() if successful
|
|
112
|
+
|
|
113
|
+
Raises:
|
|
114
|
+
sqlite3.OperationalError or sqlite3.DatabaseError if retries exhausted
|
|
115
|
+
"""
|
|
116
|
+
import random
|
|
117
|
+
import sqlite3
|
|
118
|
+
import time
|
|
119
|
+
|
|
120
|
+
attempt = 0
|
|
121
|
+
while True:
|
|
122
|
+
try:
|
|
123
|
+
return func()
|
|
124
|
+
except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
|
|
125
|
+
error_msg = str(e).lower()
|
|
126
|
+
|
|
127
|
+
# Don't retry on non-transient errors
|
|
128
|
+
if "syntax" in error_msg or "constraint" in error_msg:
|
|
129
|
+
raise
|
|
130
|
+
|
|
131
|
+
attempt += 1
|
|
132
|
+
if attempt >= retries:
|
|
133
|
+
if logger:
|
|
134
|
+
logger.error(
|
|
135
|
+
"Database operation failed after %s attempts: %s",
|
|
136
|
+
retries,
|
|
137
|
+
e,
|
|
138
|
+
)
|
|
139
|
+
raise
|
|
140
|
+
|
|
141
|
+
delay = min(max_backoff, backoff * (2 ** (attempt - 1)))
|
|
142
|
+
delay += random.random() * jitter
|
|
143
|
+
|
|
144
|
+
if logger:
|
|
145
|
+
logger.warning(
|
|
146
|
+
"Database I/O error (attempt %s/%s): %s. Retrying in %.2fs",
|
|
147
|
+
attempt,
|
|
148
|
+
retries,
|
|
149
|
+
e,
|
|
150
|
+
delay,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
time.sleep(delay)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class ResilientSqliteDatabase:
|
|
157
|
+
"""
|
|
158
|
+
Wrapper for Peewee SqliteDatabase that adds retry logic to connection attempts.
|
|
159
|
+
|
|
160
|
+
This solves the issue where disk I/O errors occur during database connection
|
|
161
|
+
(specifically when setting PRAGMAs), before query-level retry logic can help.
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
def __init__(self, database, max_retries=5, backoff=0.5):
|
|
165
|
+
"""
|
|
166
|
+
Args:
|
|
167
|
+
database: Peewee SqliteDatabase instance to wrap
|
|
168
|
+
max_retries: Maximum connection retry attempts
|
|
169
|
+
backoff: Initial backoff delay in seconds
|
|
170
|
+
"""
|
|
171
|
+
self._db = database
|
|
172
|
+
self._max_retries = max_retries
|
|
173
|
+
self._backoff = backoff
|
|
174
|
+
|
|
175
|
+
def __getattr__(self, name):
|
|
176
|
+
"""Delegate all attribute access to the wrapped database."""
|
|
177
|
+
return getattr(self._db, name)
|
|
178
|
+
|
|
179
|
+
def connect(self, reuse_if_open=False):
|
|
180
|
+
"""
|
|
181
|
+
Connect to database with retry logic for transient I/O errors.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
reuse_if_open: If True, return without error if already connected
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Result from underlying database.connect()
|
|
188
|
+
"""
|
|
189
|
+
import random
|
|
190
|
+
import sqlite3
|
|
191
|
+
import time
|
|
192
|
+
|
|
193
|
+
from peewee import DatabaseError, OperationalError
|
|
194
|
+
|
|
195
|
+
last_error = None
|
|
196
|
+
delay = self._backoff
|
|
197
|
+
|
|
198
|
+
for attempt in range(1, self._max_retries + 1):
|
|
199
|
+
try:
|
|
200
|
+
return self._db.connect(reuse_if_open=reuse_if_open)
|
|
201
|
+
except (OperationalError, DatabaseError, sqlite3.OperationalError) as e:
|
|
202
|
+
error_msg = str(e).lower()
|
|
203
|
+
|
|
204
|
+
# Only retry on transient I/O errors
|
|
205
|
+
if "disk i/o error" in error_msg or "database is locked" in error_msg:
|
|
206
|
+
last_error = e
|
|
207
|
+
|
|
208
|
+
if attempt < self._max_retries:
|
|
209
|
+
# Add jitter to prevent thundering herd
|
|
210
|
+
jittered_delay = delay * (1 + random.uniform(-0.25, 0.25))
|
|
211
|
+
time.sleep(jittered_delay)
|
|
212
|
+
delay = min(delay * 2, 10.0) # Exponential backoff, max 10s
|
|
213
|
+
else:
|
|
214
|
+
# Final attempt failed
|
|
215
|
+
raise
|
|
216
|
+
else:
|
|
217
|
+
# Non-transient error, fail immediately
|
|
218
|
+
raise
|
|
219
|
+
|
|
220
|
+
# Should never reach here, but just in case
|
|
221
|
+
if last_error:
|
|
222
|
+
raise last_error
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def check_database_health(db_path: Path, logger=None) -> tuple[bool, str]:
|
|
226
|
+
"""
|
|
227
|
+
Perform lightweight SQLite integrity check.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
db_path: Path to SQLite database file
|
|
231
|
+
logger: Logger instance for logging health check results
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
(is_healthy, error_message) - True if healthy, False with error message otherwise
|
|
235
|
+
"""
|
|
236
|
+
import sqlite3
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
# Use a short timeout to avoid blocking
|
|
240
|
+
conn = sqlite3.connect(str(db_path), timeout=5.0)
|
|
241
|
+
cursor = conn.cursor()
|
|
242
|
+
|
|
243
|
+
# Quick integrity check (fast, catches major corruption)
|
|
244
|
+
cursor.execute("PRAGMA quick_check")
|
|
245
|
+
result = cursor.fetchone()[0]
|
|
246
|
+
|
|
247
|
+
conn.close()
|
|
248
|
+
|
|
249
|
+
if result != "ok":
|
|
250
|
+
error_msg = f"PRAGMA quick_check failed: {result}"
|
|
251
|
+
if logger:
|
|
252
|
+
logger.error("Database health check failed: %s", error_msg)
|
|
253
|
+
return False, error_msg
|
|
254
|
+
|
|
255
|
+
if logger:
|
|
256
|
+
logger.debug("Database health check passed")
|
|
257
|
+
return True, "Database healthy"
|
|
258
|
+
|
|
259
|
+
except sqlite3.OperationalError as e:
|
|
260
|
+
error_msg = f"Cannot access database: {e}"
|
|
261
|
+
if logger:
|
|
262
|
+
logger.error("Database health check failed: %s", error_msg)
|
|
263
|
+
return False, error_msg
|
|
264
|
+
except Exception as e:
|
|
265
|
+
error_msg = f"Unexpected error during health check: {e}"
|
|
266
|
+
if logger:
|
|
267
|
+
logger.error("Database health check failed: %s", error_msg)
|
|
268
|
+
return False, error_msg
|
qBitrr/db_recovery.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
"""SQLite database recovery utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import shutil
|
|
7
|
+
import sqlite3
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger("qBitrr.DBRecovery")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DatabaseRecoveryError(Exception):
|
|
14
|
+
"""Raised when database recovery fails."""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def checkpoint_wal(db_path: Path, logger_override=None) -> bool:
|
|
18
|
+
"""
|
|
19
|
+
Force checkpoint of WAL file to main database.
|
|
20
|
+
|
|
21
|
+
This operation flushes all Write-Ahead Log entries to the main database
|
|
22
|
+
file, which can resolve certain types of corruption and reduce the risk
|
|
23
|
+
of data loss.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
db_path: Path to SQLite database file
|
|
27
|
+
logger_override: Optional logger instance to use instead of module logger
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
True if successful, False otherwise
|
|
31
|
+
"""
|
|
32
|
+
log = logger_override or logger
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
log.info("Starting WAL checkpoint for database: %s", db_path)
|
|
36
|
+
conn = sqlite3.connect(str(db_path), timeout=10.0)
|
|
37
|
+
cursor = conn.cursor()
|
|
38
|
+
|
|
39
|
+
# Force WAL checkpoint with TRUNCATE mode
|
|
40
|
+
# This checkpoints all frames and truncates the WAL file
|
|
41
|
+
cursor.execute("PRAGMA wal_checkpoint(TRUNCATE)")
|
|
42
|
+
result = cursor.fetchone()
|
|
43
|
+
|
|
44
|
+
conn.close()
|
|
45
|
+
|
|
46
|
+
# Result is (busy, log_pages, checkpointed_pages)
|
|
47
|
+
# If busy=0, checkpoint was fully successful
|
|
48
|
+
if result and result[0] == 0:
|
|
49
|
+
log.info(
|
|
50
|
+
"WAL checkpoint successful: %s frames checkpointed, %s pages in log",
|
|
51
|
+
result[2],
|
|
52
|
+
result[1],
|
|
53
|
+
)
|
|
54
|
+
return True
|
|
55
|
+
else:
|
|
56
|
+
log.warning(
|
|
57
|
+
"WAL checkpoint partially successful: result=%s (database may be busy)",
|
|
58
|
+
result,
|
|
59
|
+
)
|
|
60
|
+
return True # Still consider partial success as success
|
|
61
|
+
|
|
62
|
+
except sqlite3.OperationalError as e:
|
|
63
|
+
log.error("WAL checkpoint failed (OperationalError): %s", e)
|
|
64
|
+
return False
|
|
65
|
+
except Exception as e:
|
|
66
|
+
log.error("WAL checkpoint failed (unexpected error): %s", e)
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def repair_database(db_path: Path, backup: bool = True, logger_override=None) -> bool:
|
|
71
|
+
"""
|
|
72
|
+
Attempt to repair corrupted SQLite database via dump/restore.
|
|
73
|
+
|
|
74
|
+
This operation:
|
|
75
|
+
1. Creates a backup of the corrupted database
|
|
76
|
+
2. Dumps all recoverable data to a temporary database
|
|
77
|
+
3. Replaces the original with the repaired copy
|
|
78
|
+
4. Verifies integrity of the repaired database
|
|
79
|
+
|
|
80
|
+
WARNING: Some data may be lost if corruption is severe.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
db_path: Path to SQLite database file
|
|
84
|
+
backup: Whether to create backup before repair (default: True)
|
|
85
|
+
logger_override: Optional logger instance to use instead of module logger
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
True if repair successful, False otherwise
|
|
89
|
+
|
|
90
|
+
Raises:
|
|
91
|
+
DatabaseRecoveryError: If repair fails critically
|
|
92
|
+
"""
|
|
93
|
+
log = logger_override or logger
|
|
94
|
+
|
|
95
|
+
backup_path = db_path.with_suffix(".db.backup")
|
|
96
|
+
temp_path = db_path.with_suffix(".db.temp")
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
# Step 1: Backup original
|
|
100
|
+
if backup:
|
|
101
|
+
log.info("Creating backup: %s", backup_path)
|
|
102
|
+
shutil.copy2(db_path, backup_path)
|
|
103
|
+
|
|
104
|
+
# Step 2: Dump recoverable data
|
|
105
|
+
log.info("Dumping recoverable data from corrupted database...")
|
|
106
|
+
source_conn = sqlite3.connect(str(db_path))
|
|
107
|
+
|
|
108
|
+
temp_conn = sqlite3.connect(str(temp_path))
|
|
109
|
+
|
|
110
|
+
# Dump schema and data
|
|
111
|
+
skipped_rows = 0
|
|
112
|
+
for line in source_conn.iterdump():
|
|
113
|
+
try:
|
|
114
|
+
temp_conn.execute(line)
|
|
115
|
+
except sqlite3.Error as e:
|
|
116
|
+
# Log but continue - recover what we can
|
|
117
|
+
skipped_rows += 1
|
|
118
|
+
log.debug("Skipping corrupted row during dump: %s", e)
|
|
119
|
+
|
|
120
|
+
if skipped_rows > 0:
|
|
121
|
+
log.warning("Skipped %s corrupted rows during dump", skipped_rows)
|
|
122
|
+
|
|
123
|
+
temp_conn.commit()
|
|
124
|
+
temp_conn.close()
|
|
125
|
+
source_conn.close()
|
|
126
|
+
|
|
127
|
+
# Step 3: Replace original with repaired copy
|
|
128
|
+
log.info("Replacing database with repaired version...")
|
|
129
|
+
db_path.unlink()
|
|
130
|
+
shutil.move(str(temp_path), str(db_path))
|
|
131
|
+
|
|
132
|
+
# Step 4: Verify integrity
|
|
133
|
+
log.info("Verifying integrity of repaired database...")
|
|
134
|
+
verify_conn = sqlite3.connect(str(db_path))
|
|
135
|
+
cursor = verify_conn.cursor()
|
|
136
|
+
cursor.execute("PRAGMA integrity_check")
|
|
137
|
+
result = cursor.fetchone()[0]
|
|
138
|
+
verify_conn.close()
|
|
139
|
+
|
|
140
|
+
if result != "ok":
|
|
141
|
+
raise DatabaseRecoveryError(f"Repair verification failed: {result}")
|
|
142
|
+
|
|
143
|
+
log.info("Database repair successful!")
|
|
144
|
+
return True
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
log.error("Database repair failed: %s", e)
|
|
148
|
+
|
|
149
|
+
# Attempt to restore backup
|
|
150
|
+
if backup and backup_path.exists():
|
|
151
|
+
log.warning("Restoring from backup...")
|
|
152
|
+
try:
|
|
153
|
+
shutil.copy2(backup_path, db_path)
|
|
154
|
+
log.info("Backup restored successfully")
|
|
155
|
+
except Exception as restore_error:
|
|
156
|
+
log.error("Failed to restore backup: %s", restore_error)
|
|
157
|
+
|
|
158
|
+
# Cleanup temp files
|
|
159
|
+
if temp_path.exists():
|
|
160
|
+
try:
|
|
161
|
+
temp_path.unlink()
|
|
162
|
+
except Exception:
|
|
163
|
+
pass # Best effort cleanup
|
|
164
|
+
|
|
165
|
+
raise DatabaseRecoveryError(f"Repair failed: {e}") from e
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def vacuum_database(db_path: Path, logger_override=None) -> bool:
|
|
169
|
+
"""
|
|
170
|
+
Run VACUUM to reclaim space and optimize database.
|
|
171
|
+
|
|
172
|
+
VACUUM rebuilds the database file, repacking it into a minimal amount of
|
|
173
|
+
disk space. This can help resolve some types of corruption and improve
|
|
174
|
+
performance.
|
|
175
|
+
|
|
176
|
+
Note: VACUUM requires free disk space approximately 2x the database size.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
db_path: Path to SQLite database file
|
|
180
|
+
logger_override: Optional logger instance to use instead of module logger
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
True if successful, False otherwise
|
|
184
|
+
"""
|
|
185
|
+
log = logger_override or logger
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
log.info("Running VACUUM on database: %s", db_path)
|
|
189
|
+
conn = sqlite3.connect(str(db_path), timeout=30.0)
|
|
190
|
+
|
|
191
|
+
conn.execute("VACUUM")
|
|
192
|
+
conn.close()
|
|
193
|
+
|
|
194
|
+
log.info("VACUUM completed successfully")
|
|
195
|
+
return True
|
|
196
|
+
|
|
197
|
+
except sqlite3.OperationalError as e:
|
|
198
|
+
log.error("VACUUM failed (OperationalError): %s", e)
|
|
199
|
+
return False
|
|
200
|
+
except Exception as e:
|
|
201
|
+
log.error("VACUUM failed (unexpected error): %s", e)
|
|
202
|
+
return False
|