s3ui 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
s3ui/db/database.py ADDED
@@ -0,0 +1,143 @@
1
+ import logging
2
+ import sqlite3
3
+ import threading
4
+ from pathlib import Path
5
+
6
+ from s3ui.constants import DB_PATH
7
+
8
+ logger = logging.getLogger("s3ui.db")
9
+
10
+ MIGRATIONS_DIR = Path(__file__).parent / "migrations"
11
+
12
+
13
+ class Database:
14
+ """Thread-safe SQLite database with WAL mode and migration support."""
15
+
16
+ def __init__(self, db_path: Path | None = None) -> None:
17
+ self._db_path = str(db_path or DB_PATH)
18
+ self._local = threading.local()
19
+ self._write_lock = threading.Lock()
20
+
21
+ # Ensure directory exists
22
+ Path(self._db_path).parent.mkdir(parents=True, exist_ok=True)
23
+
24
+ # Initialize on the creating thread and run migrations
25
+ conn = self._get_conn()
26
+ conn.execute("PRAGMA journal_mode=WAL")
27
+ conn.execute("PRAGMA foreign_keys=ON")
28
+ self._run_migrations()
29
+ logger.info("Database initialized at %s", self._db_path)
30
+
31
+ def _get_conn(self) -> sqlite3.Connection:
32
+ """Get a thread-local connection."""
33
+ if not hasattr(self._local, "conn") or self._local.conn is None:
34
+ self._local.conn = sqlite3.connect(self._db_path)
35
+ self._local.conn.row_factory = sqlite3.Row
36
+ self._local.conn.execute("PRAGMA foreign_keys=ON")
37
+ return self._local.conn
38
+
39
+ def execute(self, sql: str, params: tuple = ()) -> sqlite3.Cursor:
40
+ """Execute a SQL statement with write serialization."""
41
+ conn = self._get_conn()
42
+ if sql.lstrip().upper().startswith("SELECT"):
43
+ return conn.execute(sql, params)
44
+ with self._write_lock:
45
+ cursor = conn.execute(sql, params)
46
+ conn.commit()
47
+ return cursor
48
+
49
+ def executemany(self, sql: str, params_list: list[tuple]) -> sqlite3.Cursor:
50
+ """Execute a SQL statement with multiple parameter sets."""
51
+ conn = self._get_conn()
52
+ with self._write_lock:
53
+ cursor = conn.executemany(sql, params_list)
54
+ conn.commit()
55
+ return cursor
56
+
57
+ def executescript(self, sql: str) -> None:
58
+ """Execute a SQL script (multiple statements)."""
59
+ conn = self._get_conn()
60
+ with self._write_lock:
61
+ conn.executescript(sql)
62
+
63
+ def fetchone(self, sql: str, params: tuple = ()) -> sqlite3.Row | None:
64
+ """Execute a SELECT and return one row."""
65
+ return self._get_conn().execute(sql, params).fetchone()
66
+
67
+ def fetchall(self, sql: str, params: tuple = ()) -> list[sqlite3.Row]:
68
+ """Execute a SELECT and return all rows."""
69
+ return self._get_conn().execute(sql, params).fetchall()
70
+
71
+ def _run_migrations(self) -> None:
72
+ """Apply pending migrations in order."""
73
+ conn = self._get_conn()
74
+
75
+ # Ensure schema_version table exists (bootstrap)
76
+ conn.execute(
77
+ "CREATE TABLE IF NOT EXISTS schema_version "
78
+ "(version INTEGER PRIMARY KEY, applied_at TEXT NOT NULL DEFAULT (datetime('now')))"
79
+ )
80
+ conn.commit()
81
+
82
+ current = self._get_schema_version()
83
+
84
+ migration_files = sorted(MIGRATIONS_DIR.glob("*.sql"))
85
+ for migration_file in migration_files:
86
+ version = int(migration_file.stem.split("_")[0])
87
+ if version > current:
88
+ logger.info("Applying migration %03d: %s", version, migration_file.name)
89
+ sql = migration_file.read_text()
90
+ with self._write_lock:
91
+ conn.executescript(sql)
92
+ conn.execute(
93
+ "INSERT OR REPLACE INTO schema_version (version) VALUES (?)",
94
+ (version,),
95
+ )
96
+ conn.commit()
97
+
98
+ def _get_schema_version(self) -> int:
99
+ """Get the current schema version."""
100
+ row = self._get_conn().execute("SELECT MAX(version) as v FROM schema_version").fetchone()
101
+ return row["v"] if row and row["v"] is not None else 0
102
+
103
+ def close(self) -> None:
104
+ """Close the thread-local connection."""
105
+ if hasattr(self._local, "conn") and self._local.conn is not None:
106
+ self._local.conn.close()
107
+ self._local.conn = None
108
+
109
+
110
+ # Preferences helpers
111
+
112
+
113
+ def get_pref(db: Database, key: str, default: str | None = None) -> str | None:
114
+ """Get a preference value by key."""
115
+ row = db.fetchone("SELECT value FROM preferences WHERE key = ?", (key,))
116
+ return row["value"] if row else default
117
+
118
+
119
+ def set_pref(db: Database, key: str, value: str) -> None:
120
+ """Set a preference value."""
121
+ db.execute(
122
+ "INSERT OR REPLACE INTO preferences (key, value) VALUES (?, ?)",
123
+ (key, value),
124
+ )
125
+
126
+
127
+ def get_bool_pref(db: Database, key: str, default: bool = False) -> bool:
128
+ """Get a boolean preference."""
129
+ val = get_pref(db, key)
130
+ if val is None:
131
+ return default
132
+ return val.lower() in ("true", "1", "yes")
133
+
134
+
135
+ def get_int_pref(db: Database, key: str, default: int = 0) -> int:
136
+ """Get an integer preference."""
137
+ val = get_pref(db, key)
138
+ if val is None:
139
+ return default
140
+ try:
141
+ return int(val)
142
+ except ValueError:
143
+ return default
@@ -0,0 +1,114 @@
1
+ -- Tracks known buckets and their associated credential profile
2
+ CREATE TABLE IF NOT EXISTS buckets (
3
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
4
+ name TEXT NOT NULL,
5
+ region TEXT,
6
+ profile TEXT NOT NULL,
7
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
8
+ UNIQUE(name, profile)
9
+ );
10
+
11
+ -- Daily storage snapshots from bucket scans
12
+ CREATE TABLE IF NOT EXISTS bucket_snapshots (
13
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
14
+ bucket_id INTEGER NOT NULL REFERENCES buckets(id) ON DELETE CASCADE,
15
+ snapshot_date TEXT NOT NULL,
16
+ total_objects INTEGER,
17
+ total_bytes INTEGER,
18
+ standard_bytes INTEGER DEFAULT 0,
19
+ ia_bytes INTEGER DEFAULT 0,
20
+ glacier_bytes INTEGER DEFAULT 0,
21
+ deep_archive_bytes INTEGER DEFAULT 0,
22
+ intelligent_tiering_bytes INTEGER DEFAULT 0,
23
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
24
+ UNIQUE(bucket_id, snapshot_date)
25
+ );
26
+ CREATE INDEX IF NOT EXISTS idx_snapshots_bucket_date ON bucket_snapshots(bucket_id, snapshot_date);
27
+
28
+ -- Daily API usage and transfer volume
29
+ CREATE TABLE IF NOT EXISTS daily_usage (
30
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
31
+ bucket_id INTEGER NOT NULL REFERENCES buckets(id) ON DELETE CASCADE,
32
+ usage_date TEXT NOT NULL,
33
+ bytes_uploaded INTEGER DEFAULT 0,
34
+ bytes_downloaded INTEGER DEFAULT 0,
35
+ put_requests INTEGER DEFAULT 0,
36
+ get_requests INTEGER DEFAULT 0,
37
+ list_requests INTEGER DEFAULT 0,
38
+ delete_requests INTEGER DEFAULT 0,
39
+ copy_requests INTEGER DEFAULT 0,
40
+ head_requests INTEGER DEFAULT 0,
41
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
42
+ UNIQUE(bucket_id, usage_date)
43
+ );
44
+ CREATE INDEX IF NOT EXISTS idx_usage_bucket_date ON daily_usage(bucket_id, usage_date);
45
+
46
+ -- Configurable cost rates
47
+ CREATE TABLE IF NOT EXISTS cost_rates (
48
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
49
+ name TEXT NOT NULL UNIQUE,
50
+ rate REAL NOT NULL,
51
+ unit TEXT NOT NULL,
52
+ updated_at TEXT NOT NULL DEFAULT (datetime('now'))
53
+ );
54
+
55
+ -- Seed default cost rates (US East pricing as of 2025)
56
+ INSERT OR IGNORE INTO cost_rates (name, rate, unit) VALUES
57
+ ('storage_standard_gb_month', 0.023, '$/GB/month'),
58
+ ('storage_ia_gb_month', 0.0125, '$/GB/month'),
59
+ ('storage_glacier_gb_month', 0.004, '$/GB/month'),
60
+ ('storage_deep_archive_gb_month', 0.00099, '$/GB/month'),
61
+ ('storage_intelligent_tiering_gb_month', 0.023, '$/GB/month'),
62
+ ('put_request', 0.000005, '$/request'),
63
+ ('get_request', 0.0000004, '$/request'),
64
+ ('list_request', 0.000005, '$/request'),
65
+ ('delete_request', 0.0, '$/request'),
66
+ ('copy_request', 0.000005, '$/request'),
67
+ ('head_request', 0.0000004, '$/request'),
68
+ ('transfer_out_gb_first_100', 0.09, '$/GB'),
69
+ ('transfer_out_gb_next_10k', 0.085, '$/GB'),
70
+ ('transfer_in_gb', 0.0, '$/GB');
71
+
72
+ -- Transfer queue with resume support
73
+ CREATE TABLE IF NOT EXISTS transfers (
74
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
75
+ bucket_id INTEGER NOT NULL REFERENCES buckets(id) ON DELETE CASCADE,
76
+ object_key TEXT NOT NULL,
77
+ direction TEXT NOT NULL CHECK(direction IN ('upload', 'download')),
78
+ total_bytes INTEGER,
79
+ transferred INTEGER DEFAULT 0,
80
+ status TEXT NOT NULL DEFAULT 'queued'
81
+ CHECK(status IN ('queued','in_progress','paused','completed','failed','cancelled')),
82
+ upload_id TEXT,
83
+ local_path TEXT NOT NULL,
84
+ error_message TEXT,
85
+ retry_count INTEGER DEFAULT 0,
86
+ created_at TEXT NOT NULL DEFAULT (datetime('now')),
87
+ updated_at TEXT NOT NULL DEFAULT (datetime('now'))
88
+ );
89
+ CREATE INDEX IF NOT EXISTS idx_transfers_status ON transfers(status);
90
+
91
+ -- Individual parts for multipart uploads (resume support)
92
+ CREATE TABLE IF NOT EXISTS transfer_parts (
93
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
94
+ transfer_id INTEGER NOT NULL REFERENCES transfers(id) ON DELETE CASCADE,
95
+ part_number INTEGER NOT NULL,
96
+ offset INTEGER NOT NULL,
97
+ size INTEGER NOT NULL,
98
+ etag TEXT,
99
+ status TEXT NOT NULL DEFAULT 'pending'
100
+ CHECK(status IN ('pending','in_progress','completed','failed')),
101
+ UNIQUE(transfer_id, part_number)
102
+ );
103
+
104
+ -- Key-value store for app preferences and UI state
105
+ CREATE TABLE IF NOT EXISTS preferences (
106
+ key TEXT PRIMARY KEY,
107
+ value TEXT NOT NULL
108
+ );
109
+
110
+ -- Schema version tracking for migrations
111
+ CREATE TABLE IF NOT EXISTS schema_version (
112
+ version INTEGER PRIMARY KEY,
113
+ applied_at TEXT NOT NULL DEFAULT (datetime('now'))
114
+ );
s3ui/logging_setup.py ADDED
@@ -0,0 +1,18 @@
1
+ import logging
2
+ from logging.handlers import RotatingFileHandler
3
+
4
+ from s3ui.constants import LOG_BACKUP_COUNT, LOG_DIR, LOG_FILE, MAX_LOG_SIZE
5
+
6
+
7
+ def setup_logging() -> None:
8
+ """Configure rotating file logger for s3ui."""
9
+ LOG_DIR.mkdir(parents=True, exist_ok=True)
10
+ handler = RotatingFileHandler(
11
+ LOG_FILE,
12
+ maxBytes=MAX_LOG_SIZE,
13
+ backupCount=LOG_BACKUP_COUNT,
14
+ )
15
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)-8s [%(name)s] %(message)s"))
16
+ root = logging.getLogger("s3ui")
17
+ root.setLevel(logging.DEBUG)
18
+ root.addHandler(handler)