tradedangerous 12.0.0__py3-none-any.whl → 12.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tradedangerous might be problematic. Click here for more details.

@@ -0,0 +1,332 @@
1
+ # tradedangerous/db/lifecycle.py
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+ from typing import Iterable, List, Tuple, Optional, Dict
6
+
7
+ from sqlalchemy import inspect, text
8
+ from sqlalchemy.engine import Engine
9
+ from sqlalchemy.schema import MetaData
10
+
11
+
12
+ # --------------------------------------------------------------------
13
+ # Utilities
14
+ # --------------------------------------------------------------------
15
+
16
+ def is_sqlite(engine: Engine) -> bool:
17
+ """Return True if the SQLAlchemy engine is using SQLite dialect."""
18
+ return engine.dialect.name == "sqlite"
19
+
20
+
21
+ def _user_tables(engine: Engine) -> Iterable[str]:
22
+ """List user (non-internal) tables present in the current database."""
23
+ insp = inspect(engine)
24
+ names = insp.get_table_names()
25
+ if is_sqlite(engine):
26
+ names = [n for n in names if not n.startswith("sqlite_")]
27
+ return names
28
+
29
+
30
+ def is_empty(engine: Engine) -> bool:
31
+ """True when no user tables exist (via SQLAlchemy Inspector)."""
32
+ return len(list(_user_tables(engine))) == 0
33
+
34
+
35
+ # --------------------------------------------------------------------
36
+ # (Re)creation helpers — prefer explicit paths; discovery is fallback
37
+ # --------------------------------------------------------------------
38
+
39
+ def _read_sql_file(sql_path: Path) -> str:
40
+ """Read the provided SQL file (authoritative SQLite schema)."""
41
+ if not sql_path.exists():
42
+ raise FileNotFoundError(f"SQLite schema file not found: {sql_path}")
43
+ return sql_path.read_text(encoding="utf-8")
44
+
45
+
46
+ def _read_legacy_sql() -> str:
47
+ """Fallback: locate the legacy SQLite schema SQL if an explicit path was not provided."""
48
+ candidates = [
49
+ Path(__file__).resolve().parents[1] / "templates" / "TradeDangerous.sql",
50
+ Path.cwd() / "tradedangerous" / "templates" / "TradeDangerous.sql",
51
+ Path.cwd() / "TradeDangerous.sql",
52
+ ]
53
+ for p in candidates:
54
+ if p.exists():
55
+ return p.read_text(encoding="utf-8")
56
+ raise FileNotFoundError("TradeDangerous.sql not found in expected locations.")
57
+
58
+
59
+ def _execute_sql_script(engine: Engine, script: str) -> None:
60
+ """Execute a multi-statement SQL script using sqlite3's executescript()."""
61
+ with engine.begin() as conn:
62
+ raw_conn = conn.connection # DB-API connection (sqlite3.Connection)
63
+ raw_conn.executescript(script)
64
+
65
+
66
+ def _create_sqlite_from_legacy(engine: Engine, sql_path: Optional[Path] = None) -> None:
67
+ """Create the SQLite schema by executing the legacy SQL (explicit path preferred)."""
68
+ if sql_path is not None:
69
+ sql = _read_sql_file(sql_path)
70
+ else:
71
+ sql = _read_legacy_sql()
72
+ _execute_sql_script(engine, sql)
73
+
74
+
75
+ # --------------------------------------------------------------------
76
+ # Public resets
77
+ # --------------------------------------------------------------------
78
+
79
+ def reset_sqlite(engine: Engine, db_path: Path, sql_path: Optional[Path] = None) -> None:
80
+ """
81
+ Reset the SQLite schema by rotating the DB file and recreating from legacy SQL.
82
+
83
+ Steps:
84
+ 1) Dispose the SQLAlchemy engine to release pooled sqlite file handles.
85
+ 2) Rotate the on-disk database file to a .old sibling (idempotent; cross-device safe).
86
+ 3) Ensure the target directory exists.
87
+ 4) Recreate the schema using the provided canonical SQL file (or fallback discovery).
88
+
89
+ Notes:
90
+ - Rotation naming preserves your historic convention:
91
+ TradeDangerous.db → TradeDangerous.old
92
+ - If no DB file exists, rotation is a no-op.
93
+ """
94
+ # 1) Release any open file handles held by the connection pool
95
+ try:
96
+ engine.dispose()
97
+ except Exception:
98
+ pass # best-effort
99
+
100
+ # 2) Rotate DB → .old (idempotent, cross-device safe)
101
+ db_path = db_path.resolve()
102
+ old_path = db_path.with_suffix(".old")
103
+ try:
104
+ if db_path.exists():
105
+ try:
106
+ if old_path.exists():
107
+ old_path.unlink()
108
+ except Exception:
109
+ # If removal of old backup fails, continue and let rename/copy raise if necessary
110
+ pass
111
+
112
+ try:
113
+ db_path.rename(old_path)
114
+ except OSError:
115
+ # Cross-device or locked: copy then unlink
116
+ import shutil
117
+ shutil.copy2(db_path, old_path)
118
+ try:
119
+ db_path.unlink()
120
+ except Exception:
121
+ # If unlink fails, leave both; schema recreate will still run on db_path
122
+ pass
123
+ except Exception:
124
+ # Rotation shouldn't prevent schema recreation; continue
125
+ pass
126
+
127
+ # 3) Make sure parent directory exists
128
+ try:
129
+ db_path.parent.mkdir(parents=True, exist_ok=True)
130
+ except Exception:
131
+ pass
132
+
133
+ # 4) Recreate schema from canonical SQL
134
+ _create_sqlite_from_legacy(engine, sql_path=sql_path)
135
+
136
+ def reset_mariadb(engine: Engine, metadata: MetaData) -> None:
137
+ """
138
+ Drop all tables and recreate using ORM metadata (MariaDB/MySQL),
139
+ with FOREIGN_KEY_CHECKS disabled during the operation.
140
+
141
+ This avoids FK-ordering issues and makes resets deterministic.
142
+ """
143
+ # Use a transactional connection for the whole reset
144
+ with engine.begin() as conn:
145
+ # Disable FK checks for the duration of drop/create
146
+ conn.execute(text("SET FOREIGN_KEY_CHECKS=0"))
147
+ try:
148
+ metadata.drop_all(bind=conn)
149
+ metadata.create_all(bind=conn)
150
+ finally:
151
+ # Always restore FK checks
152
+ conn.execute(text("SET FOREIGN_KEY_CHECKS=1"))
153
+
154
+
155
+
156
+ # --------------------------------------------------------------------
157
+ # Unified reset entry point (dialect hidden from callers)
158
+ # --------------------------------------------------------------------
159
+
160
+ def reset_db(engine: Engine, *, db_path: Path, sql_path: Optional[Path] = None) -> str:
161
+ """
162
+ Reset the database schema for the given engine in a dialect-appropriate way.
163
+
164
+ Caller MUST pass the canonical on-disk `db_path` for SQLite and SHOULD pass `sql_path`.
165
+ (No path deduction is attempted here beyond optional SQL discovery.)
166
+
167
+ Returns a short action string for logs/tests.
168
+ """
169
+ dialect = engine.dialect.name.lower()
170
+
171
+ if dialect == "sqlite":
172
+ reset_sqlite(engine, db_path=db_path, sql_path=sql_path)
173
+ return "sqlite:rotated+recreated"
174
+
175
+ if dialect in ("mysql", "mariadb"):
176
+ # Resolve ORM metadata internally to avoid dialect branching at call sites.
177
+ from tradedangerous.db import orm_models
178
+ reset_mariadb(engine, orm_models.Base.metadata)
179
+ return f"{dialect}:reset"
180
+
181
+ raise RuntimeError(f"Unsupported database backend: {engine.dialect.name}")
182
+
183
+
184
+ # --------------------------------------------------------------------
185
+ # Sanity checks (seconds-only, no deep I/O)
186
+ # --------------------------------------------------------------------
187
+
188
+ # NOTE: 'Added' removed from core set (being deprecated)
189
+ _CORE_TABLES: Tuple[str, ...] = (
190
+ "System",
191
+ "Station",
192
+ "Category",
193
+ "Item",
194
+ "StationItem",
195
+ )
196
+
197
+ def _core_tables_and_pks_ok(engine: Engine) -> Tuple[bool, List[str]]:
198
+ """
199
+ T1 + T2: Required core tables exist and have primary keys.
200
+ Returns (ok, problems).
201
+ """
202
+ problems: List[str] = []
203
+ insp = inspect(engine)
204
+
205
+ existing = set(insp.get_table_names())
206
+ missing = [t for t in _CORE_TABLES if t not in existing]
207
+ if missing:
208
+ problems.append(f"missing tables: {', '.join(missing)}")
209
+ return False, problems
210
+
211
+ for t in _CORE_TABLES:
212
+ pk = insp.get_pk_constraint(t) or {}
213
+ cols = pk.get("constrained_columns") or []
214
+ if not cols:
215
+ problems.append(f"missing primary key on {t}")
216
+
217
+ return (len(problems) == 0), problems
218
+
219
+
220
+ def _seed_counts_ok(engine: Engine) -> Tuple[bool, List[str]]:
221
+ """
222
+ T4: Minimal seed/anchor rows must exist.
223
+ - Category > 0
224
+ - System > 0
225
+ """
226
+ problems: List[str] = []
227
+ with engine.connect() as conn:
228
+ for tbl in ("Category", "System"):
229
+ cnt = conn.execute(text(f"SELECT COUNT(*) FROM {tbl}")).scalar() or 0
230
+ if cnt <= 0:
231
+ problems.append(f"{tbl} is empty")
232
+
233
+ return (len(problems) == 0), problems
234
+
235
+
236
+ def _connectivity_ok(engine: Engine) -> bool:
237
+ """T0: Cheap connectivity probe (redundant if T4 runs, but negligible)."""
238
+ try:
239
+ with engine.connect() as conn:
240
+ conn.execute(text("SELECT 1"))
241
+ return True
242
+ except Exception:
243
+ return False
244
+
245
+
246
+ # --------------------------------------------------------------------
247
+ # Orchestration (policy) — may call buildCache
248
+ # --------------------------------------------------------------------
249
+
250
+ def ensure_fresh_db(
251
+ backend: str,
252
+ engine: Engine,
253
+ data_dir: Path,
254
+ metadata: MetaData | None,
255
+ mode: str = "auto",
256
+ *,
257
+ tdb=None,
258
+ tdenv=None,
259
+ rebuild: bool = True,
260
+ ) -> Dict[str, str]:
261
+ """
262
+ Ensure a *sane, populated* database exists (seconds-only checks).
263
+
264
+ Checks:
265
+ - T0: connectivity
266
+ - T1/T2: core tables exist and have PKs
267
+ - T4: seed rows exist in Category and System
268
+
269
+ Actions:
270
+ - mode == "force" → rebuild via buildCache(...) (if rebuild=True)
271
+ - mode == "auto" and not sane → rebuild via buildCache(...) (if rebuild=True)
272
+ - not sane and rebuild == False → action = "needs_rebuild" (NEVER rebuild)
273
+ - sane and mode != "force" → action = "kept"
274
+
275
+ Returns a summary dict including:
276
+ - backend, mode, action, sane (Y/N), and optional reason.
277
+
278
+ NOTE:
279
+ - When a rebuild is required but rebuild=True and (tdb/tdenv) are missing,
280
+ a ValueError is raised (preserves current semantics).
281
+ - When rebuild=False, the function NEVER calls buildCache and never raises
282
+ for missing tdb/tdenv. It simply reports the status.
283
+ """
284
+ summary: Dict[str, str] = {
285
+ "backend": (backend or engine.dialect.name).lower(),
286
+ "mode": mode,
287
+ "action": "kept",
288
+ "sane": "Y",
289
+ }
290
+
291
+ # T0: cheap connectivity
292
+ if not _connectivity_ok(engine):
293
+ summary["reason"] = "connectivity-failed"
294
+ summary["sane"] = "N"
295
+ if mode == "auto":
296
+ mode = "force"
297
+
298
+ # T1+T2: structure; T4: seeds
299
+ if summary["sane"] == "Y":
300
+ structure_ok, struct_problems = _core_tables_and_pks_ok(engine)
301
+ if not structure_ok:
302
+ summary["sane"] = "N"
303
+ summary["reason"] = "; ".join(struct_problems) or "structure-invalid"
304
+ else:
305
+ seeds_ok, seed_problems = _seed_counts_ok(engine)
306
+ if not seeds_ok:
307
+ summary["sane"] = "N"
308
+ reason = "; ".join(seed_problems) or "seeds-missing"
309
+ summary["reason"] = f"{summary.get('reason','')}; {reason}".strip("; ").strip()
310
+
311
+ sane = (summary["sane"] == "Y")
312
+ must_rebuild = (mode == "force") or (not sane)
313
+
314
+ # If nothing to do, return immediately.
315
+ if not must_rebuild:
316
+ summary["action"] = "kept"
317
+ return summary
318
+
319
+ # Caller explicitly requested no rebuild: report and exit.
320
+ if not rebuild:
321
+ summary["action"] = "needs_rebuild"
322
+ return summary
323
+
324
+ # From here on, behavior matches the original: rebuild via buildCache.
325
+ if tdb is None or tdenv is None:
326
+ raise ValueError("ensure_fresh_db needs `tdb` and `tdenv` to rebuild via buildCache")
327
+
328
+ from tradedangerous.cache import buildCache
329
+
330
+ buildCache(tdb, tdenv)
331
+ summary["action"] = "rebuilt"
332
+ return summary
@@ -0,0 +1,208 @@
1
+ # tradedangerous/db/locks.py
2
+ # -----------------------------------------------------------------------------
3
+ # Advisory lock helpers (MariaDB/MySQL) — per-station serialization
4
+ #
5
+ # SQLite compatibility:
6
+ # - On SQLite (or any unsupported dialect), all helpers become NO-OPs and
7
+ # behave as if the lock was immediately acquired (yield True). This lets
8
+ # shared code run unchanged across backends.
9
+ #
10
+ # Usage (both writers must use the SAME key format):
11
+ # from tradedangerous.db.locks import station_advisory_lock
12
+ #
13
+ # with sa_session_local(session_factory) as s:
14
+ # # (optional) set isolation once per process elsewhere:
15
+ # # s.execute(text("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED")); s.commit()
16
+ # with station_advisory_lock(s, station_id, timeout_seconds=0.2, max_retries=4) as got:
17
+ # if not got:
18
+ # # processor: defer/requeue work for this station and continue
19
+ # return
20
+ # with s.begin():
21
+ # # do per-station writes here...
22
+ # pass
23
+ # -----------------------------------------------------------------------------
24
+
25
+ from __future__ import annotations
26
+
27
+ import time
28
+ from contextlib import contextmanager
29
+ from typing import Iterator
30
+
31
+ from sqlalchemy import text
32
+ from sqlalchemy.orm import Session
33
+
34
+ __all__ = [
35
+ "station_advisory_lock",
36
+ "acquire_station_lock",
37
+ "release_station_lock",
38
+ "station_lock_key",
39
+ ]
40
+
41
+ # Precompiled SQL (MySQL/MariaDB only)
42
+ _SQL_GET_LOCK = text("SELECT GET_LOCK(:k, :t)")
43
+ _SQL_RELEASE_LOCK = text("SELECT RELEASE_LOCK(:k)")
44
+
45
+ def _is_lock_supported(session: Session) -> bool:
46
+ """
47
+ Return True if the current SQLAlchemy session is bound to a backend that
48
+ supports advisory locks via GET_LOCK/RELEASE_LOCK (MySQL/MariaDB).
49
+ """
50
+ try:
51
+ name = (session.get_bind().dialect.name or "").lower()
52
+ except Exception:
53
+ name = ""
54
+ return name in ("mysql", "mariadb")
55
+
56
+ def _ensure_read_committed(session: Session) -> None:
57
+ """
58
+ Ensure the session is using READ COMMITTED for subsequent transactions.
59
+ - Applies only to MySQL/MariaDB.
60
+ - No-ops on SQLite/others.
61
+ - Only sets it if NOT already inside a transaction (affects next txn).
62
+ """
63
+ if not _is_lock_supported(session):
64
+ return
65
+ try:
66
+ # Only set if we're not already in a transaction; otherwise it would
67
+ # affect the next transaction, not the current one.
68
+ if not session.in_transaction():
69
+ session.execute(text("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED"))
70
+ # No explicit commit needed; this is a session-level setting.
71
+ except Exception:
72
+ # Best-effort; if this fails we just proceed with the default isolation.
73
+ pass
74
+
75
+ def station_lock_key(station_id: int) -> str:
76
+ """
77
+ Return the advisory lock key used by both writers for the same station.
78
+ Keep this format identical in all writers (processor + Spansh).
79
+ """
80
+ return f"td.station.{int(station_id)}"
81
+
82
+ def acquire_station_lock(session: Session, station_id: int, timeout_seconds: float) -> bool:
83
+ """
84
+ Try to acquire the advisory lock for a station on THIS DB connection.
85
+
86
+ Returns:
87
+ True -> acquired within timeout (or NO-OP True on unsupported dialects)
88
+ False -> timed out (lock held elsewhere)
89
+
90
+ Notes:
91
+ - Advisory locks are per-connection. Use the same Session for acquire,
92
+ the critical section, and release.
93
+ - On SQLite/unsupported dialects, this is a NO-OP that returns True.
94
+ """
95
+ if not _is_lock_supported(session):
96
+ return True # NO-OP on SQLite/unsupported backends
97
+
98
+ key = station_lock_key(station_id)
99
+ row = session.execute(_SQL_GET_LOCK, {"k": key, "t": float(timeout_seconds)}).first()
100
+ # MariaDB/MySQL GET_LOCK returns 1 (acquired), 0 (timeout), or NULL (error)
101
+ return bool(row and row[0] == 1)
102
+
103
+ def release_station_lock(session: Session, station_id: int) -> None:
104
+ """
105
+ Release the advisory lock for a station on THIS DB connection.
106
+ Safe to call in finally; releasing a non-held lock is harmless.
107
+
108
+ On SQLite/unsupported dialects, this is a NO-OP.
109
+ """
110
+ if not _is_lock_supported(session):
111
+ return # NO-OP on SQLite/unsupported backends
112
+
113
+ key = station_lock_key(station_id)
114
+ try:
115
+ session.execute(_SQL_RELEASE_LOCK, {"k": key})
116
+ except Exception:
117
+ # Intentionally swallow — RELEASE_LOCK may return 0/NULL if not held.
118
+ pass
119
+
120
+ @contextmanager
121
+ def station_advisory_lock(
122
+ session: Session,
123
+ station_id: int,
124
+ timeout_seconds: float = 0.2,
125
+ max_retries: int = 4,
126
+ backoff_start_seconds: float = 0.05,
127
+ ) -> Iterator[bool]:
128
+ """
129
+ Context manager to acquire/retry/release a per-station advisory lock.
130
+
131
+ Resilience improvement:
132
+ - If no transaction is active on the Session, this helper will OPEN ONE,
133
+ so the lock is taken on the same physical connection the ensuing DML uses.
134
+ In that case, it will COMMIT on normal exit, or ROLLBACK if an exception
135
+ bubbles out of the context block.
136
+ - If a transaction is already active, this helper does NOT touch txn
137
+ boundaries; caller remains responsible for commit/rollback.
138
+
139
+ Yields:
140
+ acquired (bool): True if acquired within retry policy;
141
+ True immediately on unsupported dialects (NO-OP);
142
+ False if not acquired on supported backends.
143
+ """
144
+ # Fast-path NO-OP for SQLite/unsupported dialects
145
+ if not _is_lock_supported(session):
146
+ try:
147
+ yield True
148
+ finally:
149
+ pass
150
+ return
151
+
152
+ # If we can still influence the next txn, prefer READ COMMITTED for shorter waits.
153
+ _ensure_read_committed(session)
154
+
155
+ # Pin a connection if caller hasn't already begun a transaction.
156
+ started_txn = False
157
+ txn_ctx = None
158
+ if not session.in_transaction():
159
+ txn_ctx = session.begin()
160
+ started_txn = True
161
+
162
+ got = False
163
+ try:
164
+ # Attempt with bounded retries + exponential backoff.
165
+ attempt = 0
166
+ while attempt < max_retries:
167
+ if acquire_station_lock(session, station_id, timeout_seconds):
168
+ got = True
169
+ break
170
+ time.sleep(backoff_start_seconds * (2 ** attempt))
171
+ attempt += 1
172
+
173
+ # Hand control to caller
174
+ yield got
175
+
176
+ # If we created the transaction and no exception occurred, commit it.
177
+ if started_txn and got:
178
+ try:
179
+ session.commit()
180
+ except Exception:
181
+ # If commit fails, make sure to roll back so we don't leak an open txn.
182
+ session.rollback()
183
+ raise
184
+ except Exception:
185
+ # If we created the transaction and an exception escaped the block, roll it back.
186
+ if started_txn and session.in_transaction():
187
+ try:
188
+ session.rollback()
189
+ except Exception:
190
+ # Swallow secondary rollback failures; original exception should propagate.
191
+ pass
192
+ raise
193
+ finally:
194
+ # Always release the advisory lock if we acquired it.
195
+ if got:
196
+ try:
197
+ release_station_lock(session, station_id)
198
+ except Exception:
199
+ # Lock releases are best-effort; don't mask user exceptions.
200
+ pass
201
+
202
+ # If we opened a txn context object (older SA versions), ensure it's closed.
203
+ # (Harmless if already committed/rolled back above.)
204
+ if started_txn and txn_ctx is not None:
205
+ try:
206
+ txn_ctx.close()
207
+ except Exception:
208
+ pass