qBitrr2 5.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. qBitrr/__init__.py +14 -0
  2. qBitrr/arss.py +7100 -0
  3. qBitrr/auto_update.py +382 -0
  4. qBitrr/bundled_data.py +7 -0
  5. qBitrr/config.py +192 -0
  6. qBitrr/config_version.py +144 -0
  7. qBitrr/db_lock.py +400 -0
  8. qBitrr/db_recovery.py +202 -0
  9. qBitrr/env_config.py +73 -0
  10. qBitrr/errors.py +41 -0
  11. qBitrr/ffprobe.py +105 -0
  12. qBitrr/gen_config.py +1331 -0
  13. qBitrr/home_path.py +23 -0
  14. qBitrr/logger.py +235 -0
  15. qBitrr/main.py +790 -0
  16. qBitrr/search_activity_store.py +92 -0
  17. qBitrr/static/assets/ArrView.js +2 -0
  18. qBitrr/static/assets/ArrView.js.map +1 -0
  19. qBitrr/static/assets/ConfigView.js +4 -0
  20. qBitrr/static/assets/ConfigView.js.map +1 -0
  21. qBitrr/static/assets/LogsView.js +2 -0
  22. qBitrr/static/assets/LogsView.js.map +1 -0
  23. qBitrr/static/assets/ProcessesView.js +2 -0
  24. qBitrr/static/assets/ProcessesView.js.map +1 -0
  25. qBitrr/static/assets/app.css +1 -0
  26. qBitrr/static/assets/app.js +11 -0
  27. qBitrr/static/assets/app.js.map +1 -0
  28. qBitrr/static/assets/build.svg +3 -0
  29. qBitrr/static/assets/check-mark.svg +5 -0
  30. qBitrr/static/assets/close.svg +4 -0
  31. qBitrr/static/assets/download.svg +5 -0
  32. qBitrr/static/assets/gear.svg +5 -0
  33. qBitrr/static/assets/live-streaming.svg +8 -0
  34. qBitrr/static/assets/log.svg +3 -0
  35. qBitrr/static/assets/logo.svg +48 -0
  36. qBitrr/static/assets/plus.svg +4 -0
  37. qBitrr/static/assets/process.svg +15 -0
  38. qBitrr/static/assets/react-select.esm.js +7 -0
  39. qBitrr/static/assets/react-select.esm.js.map +1 -0
  40. qBitrr/static/assets/refresh-arrow.svg +3 -0
  41. qBitrr/static/assets/table.js +5 -0
  42. qBitrr/static/assets/table.js.map +1 -0
  43. qBitrr/static/assets/trash.svg +8 -0
  44. qBitrr/static/assets/up-arrow.svg +3 -0
  45. qBitrr/static/assets/useInterval.js +2 -0
  46. qBitrr/static/assets/useInterval.js.map +1 -0
  47. qBitrr/static/assets/vendor.js +2 -0
  48. qBitrr/static/assets/vendor.js.map +1 -0
  49. qBitrr/static/assets/visibility.svg +9 -0
  50. qBitrr/static/index.html +33 -0
  51. qBitrr/static/logov2-clean.svg +48 -0
  52. qBitrr/static/manifest.json +23 -0
  53. qBitrr/static/sw.js +87 -0
  54. qBitrr/static/vite.svg +1 -0
  55. qBitrr/tables.py +143 -0
  56. qBitrr/utils.py +274 -0
  57. qBitrr/versioning.py +136 -0
  58. qBitrr/webui.py +3114 -0
  59. qbitrr2-5.5.5.dist-info/METADATA +1191 -0
  60. qbitrr2-5.5.5.dist-info/RECORD +64 -0
  61. qbitrr2-5.5.5.dist-info/WHEEL +5 -0
  62. qbitrr2-5.5.5.dist-info/entry_points.txt +2 -0
  63. qbitrr2-5.5.5.dist-info/licenses/LICENSE +21 -0
  64. qbitrr2-5.5.5.dist-info/top_level.txt +1 -0
@@ -0,0 +1,144 @@
1
+ """Configuration version management for qBitrr.
2
+
3
+ This module manages config schema versioning and migrations.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import TYPE_CHECKING
12
+
13
+ if TYPE_CHECKING:
14
+ from qBitrr.gen_config import MyConfig
15
+
16
+ # Current expected config version - increment when schema changes require migration
17
+ EXPECTED_CONFIG_VERSION = 3
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ def get_config_version(config: MyConfig) -> int:
23
+ """
24
+ Get the ConfigVersion from the config file.
25
+
26
+ Args:
27
+ config: MyConfig instance
28
+
29
+ Returns:
30
+ Config version as integer, defaults to 1 if not found
31
+ """
32
+ version = config.get("Settings.ConfigVersion", fallback=1)
33
+ try:
34
+ return int(version)
35
+ except (ValueError, TypeError):
36
+ logger.warning(f"Invalid ConfigVersion value: {version}, defaulting to 1")
37
+ return 1
38
+
39
+
40
+ def set_config_version(config: MyConfig, version: int) -> None:
41
+ """
42
+ Set the ConfigVersion in the config file.
43
+
44
+ Args:
45
+ config: MyConfig instance
46
+ version: Version number to set
47
+ """
48
+ if "Settings" not in config.config:
49
+ from tomlkit import table
50
+
51
+ config.config["Settings"] = table()
52
+
53
+ config.config["Settings"]["ConfigVersion"] = version
54
+ logger.info(f"Set ConfigVersion to {version}")
55
+
56
+
57
+ def validate_config_version(config: MyConfig) -> tuple[bool, str | None]:
58
+ """
59
+ Validate config version and determine if migration is needed.
60
+
61
+ Args:
62
+ config: MyConfig instance
63
+
64
+ Returns:
65
+ Tuple of (is_valid, error_message)
66
+ - (True, None): Config version matches expected
67
+ - (True, "migration_needed"): Config version is older, migration required
68
+ - (False, error_msg): Config version is newer, show error to user
69
+ """
70
+ current_version = get_config_version(config)
71
+
72
+ if current_version == EXPECTED_CONFIG_VERSION:
73
+ logger.debug(f"Config version matches expected: {EXPECTED_CONFIG_VERSION}")
74
+ return True, None
75
+
76
+ if current_version < EXPECTED_CONFIG_VERSION:
77
+ logger.info(
78
+ f"Config version {current_version} is older than expected {EXPECTED_CONFIG_VERSION}, "
79
+ "migration needed"
80
+ )
81
+ return True, "migration_needed"
82
+
83
+ # Config version is newer than expected
84
+ error_msg = (
85
+ f"Config version mismatch: found {current_version}, expected {EXPECTED_CONFIG_VERSION}. "
86
+ f"Your config may have been created with a newer version of qBitrr and may not work correctly. "
87
+ f"Please update qBitrr or restore a compatible config backup."
88
+ )
89
+ logger.error(error_msg)
90
+ return False, error_msg
91
+
92
+
93
+ def backup_config(config_path: Path) -> Path | None:
94
+ """
95
+ Create a timestamped backup of the config file.
96
+
97
+ Args:
98
+ config_path: Path to config.toml
99
+
100
+ Returns:
101
+ Path to backup file, or None if backup failed
102
+ """
103
+ if not config_path.exists():
104
+ logger.warning(f"Config file not found for backup: {config_path}")
105
+ return None
106
+
107
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
108
+ backup_path = config_path.parent / f"{config_path.stem}.backup.{timestamp}{config_path.suffix}"
109
+
110
+ try:
111
+ import shutil
112
+
113
+ shutil.copy2(config_path, backup_path)
114
+ logger.info(f"Created config backup: {backup_path}")
115
+ return backup_path
116
+ except Exception as e:
117
+ logger.error(f"Failed to create config backup: {e}")
118
+ return None
119
+
120
+
121
+ def restore_config_backup(backup_path: Path, config_path: Path) -> bool:
122
+ """
123
+ Restore config from a backup file.
124
+
125
+ Args:
126
+ backup_path: Path to backup file
127
+ config_path: Path to config.toml
128
+
129
+ Returns:
130
+ True if restore succeeded, False otherwise
131
+ """
132
+ if not backup_path.exists():
133
+ logger.error(f"Backup file not found: {backup_path}")
134
+ return False
135
+
136
+ try:
137
+ import shutil
138
+
139
+ shutil.copy2(backup_path, config_path)
140
+ logger.info(f"Restored config from backup: {backup_path}")
141
+ return True
142
+ except Exception as e:
143
+ logger.error(f"Failed to restore config from backup: {e}")
144
+ return False
qBitrr/db_lock.py ADDED
@@ -0,0 +1,400 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import threading
5
+ from contextlib import contextmanager
6
+ from pathlib import Path
7
+ from typing import Iterator
8
+
9
+ from qBitrr.db_recovery import checkpoint_wal, repair_database
10
+ from qBitrr.home_path import APPDATA_FOLDER
11
+
12
+ if os.name == "nt": # pragma: no cover - platform specific
13
+ import msvcrt
14
+ else: # pragma: no cover
15
+ import fcntl
16
+
17
+ _LOCK_FILE = APPDATA_FOLDER.joinpath("qbitrr.db.lock")
18
+
19
+
20
+ class _InterProcessFileLock:
21
+ """Cross-process, re-entrant file lock to guard SQLite access."""
22
+
23
+ def __init__(self, path: Path):
24
+ self._path = path
25
+ self._thread_gate = threading.RLock()
26
+ self._local = threading.local()
27
+
28
+ def acquire(self) -> None:
29
+ depth = getattr(self._local, "depth", 0)
30
+ if depth == 0:
31
+ self._thread_gate.acquire()
32
+ self._path.parent.mkdir(parents=True, exist_ok=True)
33
+ handle = open(self._path, "a+b")
34
+ try:
35
+ if os.name == "nt": # pragma: no cover - Windows specific branch
36
+ msvcrt.locking(handle.fileno(), msvcrt.LK_LOCK, 1)
37
+ else: # pragma: no cover - POSIX branch
38
+ fcntl.flock(handle, fcntl.LOCK_EX)
39
+ except Exception:
40
+ handle.close()
41
+ self._thread_gate.release()
42
+ raise
43
+ self._local.handle = handle
44
+ self._local.depth = depth + 1
45
+
46
+ def release(self) -> None:
47
+ depth = getattr(self._local, "depth", 0)
48
+ if depth <= 0:
49
+ raise RuntimeError("Attempted to release an unacquired database lock")
50
+ depth -= 1
51
+ if depth == 0:
52
+ handle = getattr(self._local, "handle")
53
+ try:
54
+ if os.name == "nt": # pragma: no cover
55
+ msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1)
56
+ else: # pragma: no cover
57
+ fcntl.flock(handle, fcntl.LOCK_UN)
58
+ finally:
59
+ handle.close()
60
+ del self._local.handle
61
+ self._thread_gate.release()
62
+ self._local.depth = depth
63
+
64
+ @contextmanager
65
+ def context(self) -> Iterator[None]:
66
+ self.acquire()
67
+ try:
68
+ yield
69
+ finally:
70
+ self.release()
71
+
72
+
73
+ _DB_LOCK = _InterProcessFileLock(_LOCK_FILE)
74
+
75
+
76
+ @contextmanager
77
+ def database_lock() -> Iterator[None]:
78
+ """Provide a shared lock used to serialize SQLite access across processes."""
79
+ with _DB_LOCK.context():
80
+ yield
81
+
82
+
83
+ def with_database_retry(
84
+ func,
85
+ *,
86
+ retries: int = 5,
87
+ backoff: float = 0.5,
88
+ max_backoff: float = 10.0,
89
+ jitter: float = 0.25,
90
+ logger=None,
91
+ ):
92
+ """
93
+ Execute database operation with retry logic for transient I/O errors.
94
+
95
+ Catches:
96
+ - sqlite3.OperationalError (disk I/O, database locked)
97
+ - sqlite3.DatabaseError (corruption that may resolve)
98
+
99
+ Does NOT retry:
100
+ - sqlite3.IntegrityError (data constraint violations)
101
+ - sqlite3.ProgrammingError (SQL syntax errors)
102
+
103
+ On detecting database corruption, attempts automatic recovery before retrying.
104
+
105
+ Args:
106
+ func: Callable to execute (should take no arguments)
107
+ retries: Maximum number of retry attempts (default: 5)
108
+ backoff: Initial backoff delay in seconds (default: 0.5)
109
+ max_backoff: Maximum backoff delay in seconds (default: 10.0)
110
+ jitter: Random jitter added to delay in seconds (default: 0.25)
111
+ logger: Logger instance for logging retry attempts
112
+
113
+ Returns:
114
+ Result of func() if successful
115
+
116
+ Raises:
117
+ sqlite3.OperationalError or sqlite3.DatabaseError if retries exhausted
118
+ """
119
+ import random
120
+ import sqlite3
121
+ import time
122
+
123
+ attempt = 0
124
+ corruption_recovery_attempted = False
125
+
126
+ while True:
127
+ try:
128
+ return func()
129
+ except (sqlite3.OperationalError, sqlite3.DatabaseError) as e:
130
+ error_msg = str(e).lower()
131
+
132
+ # Don't retry on non-transient errors
133
+ if "syntax" in error_msg or "constraint" in error_msg:
134
+ raise
135
+
136
+ # Detect corruption and attempt recovery (only once)
137
+ if not corruption_recovery_attempted and (
138
+ "disk image is malformed" in error_msg
139
+ or "database disk image is malformed" in error_msg
140
+ or "database corruption" in error_msg
141
+ ):
142
+ corruption_recovery_attempted = True
143
+ if logger:
144
+ logger.error(
145
+ "Database corruption detected: %s. Attempting automatic recovery...",
146
+ e,
147
+ )
148
+
149
+ recovery_succeeded = False
150
+ try:
151
+ db_path = APPDATA_FOLDER / "qbitrr.db"
152
+
153
+ # Step 1: Try WAL checkpoint (least invasive)
154
+ if logger:
155
+ logger.info("Attempting WAL checkpoint...")
156
+ if checkpoint_wal(db_path, logger):
157
+ if logger:
158
+ logger.info("WAL checkpoint successful - retrying operation")
159
+ recovery_succeeded = True
160
+ else:
161
+ # Step 2: Try full repair (more invasive)
162
+ if logger:
163
+ logger.warning(
164
+ "WAL checkpoint failed - attempting full database repair..."
165
+ )
166
+ if repair_database(db_path, backup=True, logger_override=logger):
167
+ if logger:
168
+ logger.info("Database repair successful - retrying operation")
169
+ recovery_succeeded = True
170
+
171
+ except Exception as recovery_error:
172
+ if logger:
173
+ logger.error(
174
+ "Database recovery error: %s",
175
+ recovery_error,
176
+ )
177
+
178
+ if recovery_succeeded:
179
+ # Reset attempt counter after successful recovery
180
+ attempt = 0
181
+ time.sleep(1) # Brief pause before retry
182
+ continue
183
+
184
+ # If we reach here, recovery failed - log and continue with normal retry
185
+ if logger:
186
+ logger.critical(
187
+ "Automatic database recovery failed. "
188
+ "Manual intervention may be required. Attempting normal retry..."
189
+ )
190
+
191
+ attempt += 1
192
+ if attempt >= retries:
193
+ if logger:
194
+ logger.error(
195
+ "Database operation failed after %s attempts: %s",
196
+ retries,
197
+ e,
198
+ )
199
+ raise
200
+
201
+ delay = min(max_backoff, backoff * (2 ** (attempt - 1)))
202
+ delay += random.random() * jitter
203
+
204
+ if logger:
205
+ logger.warning(
206
+ "Database I/O error (attempt %s/%s): %s. Retrying in %.2fs",
207
+ attempt,
208
+ retries,
209
+ e,
210
+ delay,
211
+ )
212
+
213
+ time.sleep(delay)
214
+
215
+
216
+ class ResilientSqliteDatabase:
217
+ """
218
+ Wrapper for Peewee SqliteDatabase that adds retry logic to connection attempts.
219
+
220
+ This solves the issue where disk I/O errors occur during database connection
221
+ (specifically when setting PRAGMAs), before query-level retry logic can help.
222
+ """
223
+
224
+ def __init__(self, database, max_retries=5, backoff=0.5, logger=None):
225
+ """
226
+ Args:
227
+ database: Peewee SqliteDatabase instance to wrap
228
+ max_retries: Maximum connection retry attempts
229
+ backoff: Initial backoff delay in seconds
230
+ logger: Optional logger instance for logging recovery attempts
231
+ """
232
+ self._db = database
233
+ self._max_retries = max_retries
234
+ self._backoff = backoff
235
+ self._logger = logger
236
+
237
+ def __getattr__(self, name):
238
+ """Delegate all attribute access to the wrapped database."""
239
+ return getattr(self._db, name)
240
+
241
+ def connect(self, reuse_if_open=False):
242
+ """
243
+ Connect to database with retry logic for transient I/O errors.
244
+
245
+ Args:
246
+ reuse_if_open: If True, return without error if already connected
247
+
248
+ Returns:
249
+ Result from underlying database.connect()
250
+ """
251
+ import random
252
+ import sqlite3
253
+ import time
254
+
255
+ from peewee import DatabaseError, OperationalError
256
+
257
+ last_error = None
258
+ delay = self._backoff
259
+ corruption_recovery_attempted = False
260
+
261
+ for attempt in range(1, self._max_retries + 1):
262
+ try:
263
+ return self._db.connect(reuse_if_open=reuse_if_open)
264
+ except (OperationalError, DatabaseError, sqlite3.OperationalError) as e:
265
+ error_msg = str(e).lower()
266
+
267
+ # Detect corruption and attempt recovery (only once)
268
+ if not corruption_recovery_attempted and (
269
+ "disk image is malformed" in error_msg
270
+ or "database disk image is malformed" in error_msg
271
+ or "database corruption" in error_msg
272
+ ):
273
+ corruption_recovery_attempted = True
274
+ if self._logger:
275
+ self._logger.error(
276
+ "Database corruption detected during connection: %s. "
277
+ "Attempting automatic recovery...",
278
+ e,
279
+ )
280
+
281
+ recovery_succeeded = False
282
+ try:
283
+ db_path = APPDATA_FOLDER / "qbitrr.db"
284
+
285
+ # Close current connection if any
286
+ try:
287
+ if not self._db.is_closed():
288
+ self._db.close()
289
+ except Exception:
290
+ pass # Ignore errors closing corrupted connection
291
+
292
+ # Step 1: Try WAL checkpoint
293
+ if self._logger:
294
+ self._logger.info("Attempting WAL checkpoint...")
295
+ if checkpoint_wal(db_path, self._logger):
296
+ if self._logger:
297
+ self._logger.info(
298
+ "WAL checkpoint successful - retrying connection"
299
+ )
300
+ recovery_succeeded = True
301
+ else:
302
+ # Step 2: Try full repair
303
+ if self._logger:
304
+ self._logger.warning(
305
+ "WAL checkpoint failed - attempting full database repair..."
306
+ )
307
+ if repair_database(db_path, backup=True, logger_override=self._logger):
308
+ if self._logger:
309
+ self._logger.info(
310
+ "Database repair successful - retrying connection"
311
+ )
312
+ recovery_succeeded = True
313
+
314
+ except Exception as recovery_error:
315
+ if self._logger:
316
+ self._logger.error(
317
+ "Database recovery error: %s",
318
+ recovery_error,
319
+ )
320
+
321
+ if recovery_succeeded:
322
+ time.sleep(1)
323
+ continue
324
+
325
+ # Recovery failed - log and continue with normal retry
326
+ if self._logger:
327
+ self._logger.critical(
328
+ "Automatic database recovery failed. "
329
+ "Manual intervention may be required."
330
+ )
331
+
332
+ # Retry on transient I/O errors
333
+ if (
334
+ "disk i/o error" in error_msg
335
+ or "database is locked" in error_msg
336
+ or "disk image is malformed" in error_msg
337
+ ):
338
+ last_error = e
339
+
340
+ if attempt < self._max_retries:
341
+ # Add jitter to prevent thundering herd
342
+ jittered_delay = delay * (1 + random.uniform(-0.25, 0.25))
343
+ time.sleep(jittered_delay)
344
+ delay = min(delay * 2, 10.0) # Exponential backoff, max 10s
345
+ else:
346
+ # Final attempt failed
347
+ raise
348
+ else:
349
+ # Non-transient error, fail immediately
350
+ raise
351
+
352
+ # Should never reach here, but just in case
353
+ if last_error:
354
+ raise last_error
355
+
356
+
357
+ def check_database_health(db_path: Path, logger=None) -> tuple[bool, str]:
358
+ """
359
+ Perform lightweight SQLite integrity check.
360
+
361
+ Args:
362
+ db_path: Path to SQLite database file
363
+ logger: Logger instance for logging health check results
364
+
365
+ Returns:
366
+ (is_healthy, error_message) - True if healthy, False with error message otherwise
367
+ """
368
+ import sqlite3
369
+
370
+ try:
371
+ # Use a short timeout to avoid blocking
372
+ conn = sqlite3.connect(str(db_path), timeout=5.0)
373
+ cursor = conn.cursor()
374
+
375
+ # Quick integrity check (fast, catches major corruption)
376
+ cursor.execute("PRAGMA quick_check")
377
+ result = cursor.fetchone()[0]
378
+
379
+ conn.close()
380
+
381
+ if result != "ok":
382
+ error_msg = f"PRAGMA quick_check failed: {result}"
383
+ if logger:
384
+ logger.error("Database health check failed: %s", error_msg)
385
+ return False, error_msg
386
+
387
+ if logger:
388
+ logger.debug("Database health check passed")
389
+ return True, "Database healthy"
390
+
391
+ except sqlite3.OperationalError as e:
392
+ error_msg = f"Cannot access database: {e}"
393
+ if logger:
394
+ logger.error("Database health check failed: %s", error_msg)
395
+ return False, error_msg
396
+ except Exception as e:
397
+ error_msg = f"Unexpected error during health check: {e}"
398
+ if logger:
399
+ logger.error("Database health check failed: %s", error_msg)
400
+ return False, error_msg