tradedangerous 12.0.5__py3-none-any.whl → 12.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tradedangerous might be problematic. Click here for more details.
- tradedangerous/cache.py +135 -133
- tradedangerous/commands/buildcache_cmd.py +7 -7
- tradedangerous/commands/buy_cmd.py +4 -4
- tradedangerous/commands/export_cmd.py +11 -11
- tradedangerous/commands/import_cmd.py +12 -12
- tradedangerous/commands/market_cmd.py +17 -17
- tradedangerous/commands/olddata_cmd.py +18 -18
- tradedangerous/commands/rares_cmd.py +30 -30
- tradedangerous/commands/run_cmd.py +21 -21
- tradedangerous/commands/sell_cmd.py +5 -5
- tradedangerous/corrections.py +1 -1
- tradedangerous/csvexport.py +20 -20
- tradedangerous/db/adapter.py +9 -9
- tradedangerous/db/config.py +4 -4
- tradedangerous/db/engine.py +12 -12
- tradedangerous/db/lifecycle.py +28 -28
- tradedangerous/db/orm_models.py +42 -42
- tradedangerous/db/paths.py +3 -3
- tradedangerous/plugins/eddblink_plug.py +106 -251
- tradedangerous/plugins/spansh_plug.py +253 -253
- tradedangerous/prices.py +21 -21
- tradedangerous/tradedb.py +85 -85
- tradedangerous/tradeenv.py +2 -2
- tradedangerous/version.py +1 -1
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/METADATA +1 -1
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/RECORD +30 -30
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/WHEEL +0 -0
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/entry_points.txt +0 -0
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/licenses/LICENSE +0 -0
- {tradedangerous-12.0.5.dist-info → tradedangerous-12.0.6.dist-info}/top_level.txt +0 -0
tradedangerous/db/lifecycle.py
CHANGED
|
@@ -79,13 +79,13 @@ def _create_sqlite_from_legacy(engine: Engine, sql_path: Optional[Path] = None)
|
|
|
79
79
|
def reset_sqlite(engine: Engine, db_path: Path, sql_path: Optional[Path] = None) -> None:
|
|
80
80
|
"""
|
|
81
81
|
Reset the SQLite schema by rotating the DB file and recreating from legacy SQL.
|
|
82
|
-
|
|
82
|
+
|
|
83
83
|
Steps:
|
|
84
84
|
1) Dispose the SQLAlchemy engine to release pooled sqlite file handles.
|
|
85
85
|
2) Rotate the on-disk database file to a .old sibling (idempotent; cross-device safe).
|
|
86
86
|
3) Ensure the target directory exists.
|
|
87
87
|
4) Recreate the schema using the provided canonical SQL file (or fallback discovery).
|
|
88
|
-
|
|
88
|
+
|
|
89
89
|
Notes:
|
|
90
90
|
- Rotation naming preserves your historic convention:
|
|
91
91
|
TradeDangerous.db → TradeDangerous.old
|
|
@@ -96,7 +96,7 @@ def reset_sqlite(engine: Engine, db_path: Path, sql_path: Optional[Path] = None)
|
|
|
96
96
|
engine.dispose()
|
|
97
97
|
except Exception:
|
|
98
98
|
pass # best-effort
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
# 2) Rotate DB → .old (idempotent, cross-device safe)
|
|
101
101
|
db_path = db_path.resolve()
|
|
102
102
|
old_path = db_path.with_suffix(".old")
|
|
@@ -108,7 +108,7 @@ def reset_sqlite(engine: Engine, db_path: Path, sql_path: Optional[Path] = None)
|
|
|
108
108
|
except Exception:
|
|
109
109
|
# If removal of old backup fails, continue and let rename/copy raise if necessary
|
|
110
110
|
pass
|
|
111
|
-
|
|
111
|
+
|
|
112
112
|
try:
|
|
113
113
|
db_path.rename(old_path)
|
|
114
114
|
except OSError:
|
|
@@ -123,13 +123,13 @@ def reset_sqlite(engine: Engine, db_path: Path, sql_path: Optional[Path] = None)
|
|
|
123
123
|
except Exception:
|
|
124
124
|
# Rotation shouldn't prevent schema recreation; continue
|
|
125
125
|
pass
|
|
126
|
-
|
|
126
|
+
|
|
127
127
|
# 3) Make sure parent directory exists
|
|
128
128
|
try:
|
|
129
129
|
db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
130
130
|
except Exception:
|
|
131
131
|
pass
|
|
132
|
-
|
|
132
|
+
|
|
133
133
|
# 4) Recreate schema from canonical SQL
|
|
134
134
|
_create_sqlite_from_legacy(engine, sql_path=sql_path)
|
|
135
135
|
|
|
@@ -137,7 +137,7 @@ def reset_mariadb(engine: Engine, metadata: MetaData) -> None:
|
|
|
137
137
|
"""
|
|
138
138
|
Drop all tables and recreate using ORM metadata (MariaDB/MySQL),
|
|
139
139
|
with FOREIGN_KEY_CHECKS disabled during the operation.
|
|
140
|
-
|
|
140
|
+
|
|
141
141
|
This avoids FK-ordering issues and makes resets deterministic.
|
|
142
142
|
"""
|
|
143
143
|
# Use a transactional connection for the whole reset
|
|
@@ -160,24 +160,24 @@ def reset_mariadb(engine: Engine, metadata: MetaData) -> None:
|
|
|
160
160
|
def reset_db(engine: Engine, *, db_path: Path, sql_path: Optional[Path] = None) -> str:
|
|
161
161
|
"""
|
|
162
162
|
Reset the database schema for the given engine in a dialect-appropriate way.
|
|
163
|
-
|
|
163
|
+
|
|
164
164
|
Caller MUST pass the canonical on-disk `db_path` for SQLite and SHOULD pass `sql_path`.
|
|
165
165
|
(No path deduction is attempted here beyond optional SQL discovery.)
|
|
166
|
-
|
|
166
|
+
|
|
167
167
|
Returns a short action string for logs/tests.
|
|
168
168
|
"""
|
|
169
169
|
dialect = engine.dialect.name.lower()
|
|
170
|
-
|
|
170
|
+
|
|
171
171
|
if dialect == "sqlite":
|
|
172
172
|
reset_sqlite(engine, db_path=db_path, sql_path=sql_path)
|
|
173
173
|
return "sqlite:rotated+recreated"
|
|
174
|
-
|
|
174
|
+
|
|
175
175
|
if dialect in ("mysql", "mariadb"):
|
|
176
176
|
# Resolve ORM metadata internally to avoid dialect branching at call sites.
|
|
177
177
|
from tradedangerous.db import orm_models
|
|
178
178
|
reset_mariadb(engine, orm_models.Base.metadata)
|
|
179
179
|
return f"{dialect}:reset"
|
|
180
|
-
|
|
180
|
+
|
|
181
181
|
raise RuntimeError(f"Unsupported database backend: {engine.dialect.name}")
|
|
182
182
|
|
|
183
183
|
|
|
@@ -201,19 +201,19 @@ def _core_tables_and_pks_ok(engine: Engine) -> Tuple[bool, List[str]]:
|
|
|
201
201
|
"""
|
|
202
202
|
problems: List[str] = []
|
|
203
203
|
insp = inspect(engine)
|
|
204
|
-
|
|
204
|
+
|
|
205
205
|
existing = set(insp.get_table_names())
|
|
206
206
|
missing = [t for t in _CORE_TABLES if t not in existing]
|
|
207
207
|
if missing:
|
|
208
208
|
problems.append(f"missing tables: {', '.join(missing)}")
|
|
209
209
|
return False, problems
|
|
210
|
-
|
|
210
|
+
|
|
211
211
|
for t in _CORE_TABLES:
|
|
212
212
|
pk = insp.get_pk_constraint(t) or {}
|
|
213
213
|
cols = pk.get("constrained_columns") or []
|
|
214
214
|
if not cols:
|
|
215
215
|
problems.append(f"missing primary key on {t}")
|
|
216
|
-
|
|
216
|
+
|
|
217
217
|
return (len(problems) == 0), problems
|
|
218
218
|
|
|
219
219
|
|
|
@@ -229,7 +229,7 @@ def _seed_counts_ok(engine: Engine) -> Tuple[bool, List[str]]:
|
|
|
229
229
|
cnt = conn.execute(text(f"SELECT COUNT(*) FROM {tbl}")).scalar() or 0
|
|
230
230
|
if cnt <= 0:
|
|
231
231
|
problems.append(f"{tbl} is empty")
|
|
232
|
-
|
|
232
|
+
|
|
233
233
|
return (len(problems) == 0), problems
|
|
234
234
|
|
|
235
235
|
|
|
@@ -260,21 +260,21 @@ def ensure_fresh_db(
|
|
|
260
260
|
) -> Dict[str, str]:
|
|
261
261
|
"""
|
|
262
262
|
Ensure a *sane, populated* database exists (seconds-only checks).
|
|
263
|
-
|
|
263
|
+
|
|
264
264
|
Checks:
|
|
265
265
|
- T0: connectivity
|
|
266
266
|
- T1/T2: core tables exist and have PKs
|
|
267
267
|
- T4: seed rows exist in Category and System
|
|
268
|
-
|
|
268
|
+
|
|
269
269
|
Actions:
|
|
270
270
|
- mode == "force" → rebuild via buildCache(...) (if rebuild=True)
|
|
271
271
|
- mode == "auto" and not sane → rebuild via buildCache(...) (if rebuild=True)
|
|
272
272
|
- not sane and rebuild == False → action = "needs_rebuild" (NEVER rebuild)
|
|
273
273
|
- sane and mode != "force" → action = "kept"
|
|
274
|
-
|
|
274
|
+
|
|
275
275
|
Returns a summary dict including:
|
|
276
276
|
- backend, mode, action, sane (Y/N), and optional reason.
|
|
277
|
-
|
|
277
|
+
|
|
278
278
|
NOTE:
|
|
279
279
|
- When a rebuild is required but rebuild=True and (tdb/tdenv) are missing,
|
|
280
280
|
a ValueError is raised (preserves current semantics).
|
|
@@ -287,14 +287,14 @@ def ensure_fresh_db(
|
|
|
287
287
|
"action": "kept",
|
|
288
288
|
"sane": "Y",
|
|
289
289
|
}
|
|
290
|
-
|
|
290
|
+
|
|
291
291
|
# T0: cheap connectivity
|
|
292
292
|
if not _connectivity_ok(engine):
|
|
293
293
|
summary["reason"] = "connectivity-failed"
|
|
294
294
|
summary["sane"] = "N"
|
|
295
295
|
if mode == "auto":
|
|
296
296
|
mode = "force"
|
|
297
|
-
|
|
297
|
+
|
|
298
298
|
# T1+T2: structure; T4: seeds
|
|
299
299
|
if summary["sane"] == "Y":
|
|
300
300
|
structure_ok, struct_problems = _core_tables_and_pks_ok(engine)
|
|
@@ -307,26 +307,26 @@ def ensure_fresh_db(
|
|
|
307
307
|
summary["sane"] = "N"
|
|
308
308
|
reason = "; ".join(seed_problems) or "seeds-missing"
|
|
309
309
|
summary["reason"] = f"{summary.get('reason','')}; {reason}".strip("; ").strip()
|
|
310
|
-
|
|
310
|
+
|
|
311
311
|
sane = (summary["sane"] == "Y")
|
|
312
312
|
must_rebuild = (mode == "force") or (not sane)
|
|
313
|
-
|
|
313
|
+
|
|
314
314
|
# If nothing to do, return immediately.
|
|
315
315
|
if not must_rebuild:
|
|
316
316
|
summary["action"] = "kept"
|
|
317
317
|
return summary
|
|
318
|
-
|
|
318
|
+
|
|
319
319
|
# Caller explicitly requested no rebuild: report and exit.
|
|
320
320
|
if not rebuild:
|
|
321
321
|
summary["action"] = "needs_rebuild"
|
|
322
322
|
return summary
|
|
323
|
-
|
|
323
|
+
|
|
324
324
|
# From here on, behavior matches the original: rebuild via buildCache.
|
|
325
325
|
if tdb is None or tdenv is None:
|
|
326
326
|
raise ValueError("ensure_fresh_db needs `tdb` and `tdenv` to rebuild via buildCache")
|
|
327
|
-
|
|
327
|
+
|
|
328
328
|
from tradedangerous.cache import buildCache
|
|
329
|
-
|
|
329
|
+
|
|
330
330
|
buildCache(tdb, tdenv)
|
|
331
331
|
summary["action"] = "rebuilt"
|
|
332
332
|
return summary
|
tradedangerous/db/orm_models.py
CHANGED
|
@@ -46,7 +46,7 @@ class DateTime6(TypeDecorator):
|
|
|
46
46
|
"""DATETIME that is DATETIME(6) on MySQL/MariaDB, generic DateTime elsewhere."""
|
|
47
47
|
impl = DateTime
|
|
48
48
|
cache_ok = True
|
|
49
|
-
|
|
49
|
+
|
|
50
50
|
def load_dialect_impl(self, dialect):
|
|
51
51
|
if dialect.name in ("mysql", "mariadb"):
|
|
52
52
|
from sqlalchemy.dialects.mysql import DATETIME as _MYSQL_DATETIME
|
|
@@ -64,10 +64,10 @@ class CIString(TypeDecorator):
|
|
|
64
64
|
"""
|
|
65
65
|
impl = String
|
|
66
66
|
cache_ok = True
|
|
67
|
-
|
|
67
|
+
|
|
68
68
|
def __init__(self, length, **kwargs):
|
|
69
69
|
super().__init__(length=length, **kwargs)
|
|
70
|
-
|
|
70
|
+
|
|
71
71
|
def load_dialect_impl(self, dialect):
|
|
72
72
|
if dialect.name == "sqlite":
|
|
73
73
|
return dialect.type_descriptor(String(self.impl.length, collation="NOCASE"))
|
|
@@ -115,17 +115,17 @@ PadSize = Enum(
|
|
|
115
115
|
# ---------- Core Domain ----------
|
|
116
116
|
class Added(Base):
|
|
117
117
|
__tablename__ = "Added"
|
|
118
|
-
|
|
118
|
+
|
|
119
119
|
added_id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
|
120
120
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False, unique=True)
|
|
121
|
-
|
|
121
|
+
|
|
122
122
|
# Relationships
|
|
123
123
|
systems: Mapped[list["System"]] = relationship(back_populates="added")
|
|
124
124
|
|
|
125
125
|
|
|
126
126
|
class System(Base):
|
|
127
127
|
__tablename__ = "System"
|
|
128
|
-
|
|
128
|
+
|
|
129
129
|
system_id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
|
|
130
130
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
131
131
|
pos_x: Mapped[float] = mapped_column(nullable=False)
|
|
@@ -140,11 +140,11 @@ class System(Base):
|
|
|
140
140
|
onupdate=now6(),
|
|
141
141
|
nullable=False,
|
|
142
142
|
)
|
|
143
|
-
|
|
143
|
+
|
|
144
144
|
# Relationships
|
|
145
145
|
added: Mapped[Optional["Added"]] = relationship(back_populates="systems")
|
|
146
146
|
stations: Mapped[list["Station"]] = relationship(back_populates="system", cascade="all, delete-orphan")
|
|
147
|
-
|
|
147
|
+
|
|
148
148
|
__table_args__ = (
|
|
149
149
|
Index("idx_system_by_pos", "pos_x", "pos_y", "pos_z", "system_id"),
|
|
150
150
|
Index("idx_system_by_name", "name"),
|
|
@@ -155,19 +155,19 @@ class System(Base):
|
|
|
155
155
|
|
|
156
156
|
class Station(Base):
|
|
157
157
|
__tablename__ = "Station"
|
|
158
|
-
|
|
158
|
+
|
|
159
159
|
station_id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
|
|
160
160
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
161
|
-
|
|
161
|
+
|
|
162
162
|
# type widened; cascade semantics unchanged (DELETE only)
|
|
163
163
|
system_id: Mapped[int] = mapped_column(
|
|
164
164
|
BigInteger,
|
|
165
165
|
ForeignKey("System.system_id", ondelete="CASCADE"),
|
|
166
166
|
nullable=False,
|
|
167
167
|
)
|
|
168
|
-
|
|
168
|
+
|
|
169
169
|
ls_from_star: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("0"))
|
|
170
|
-
|
|
170
|
+
|
|
171
171
|
blackmarket: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
172
172
|
max_pad_size: Mapped[str] = mapped_column(PadSize, nullable=False, server_default=text("'?'"))
|
|
173
173
|
market: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
@@ -177,16 +177,16 @@ class Station(Base):
|
|
|
177
177
|
refuel: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
178
178
|
repair: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
179
179
|
planetary: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
180
|
-
|
|
180
|
+
|
|
181
181
|
type_id: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("0"))
|
|
182
182
|
modified: Mapped[str] = mapped_column(DateTime6(), server_default=now6(), onupdate=now6(), nullable=False)
|
|
183
|
-
|
|
183
|
+
|
|
184
184
|
# Relationships
|
|
185
185
|
system: Mapped["System"] = relationship(back_populates="stations")
|
|
186
186
|
items: Mapped[list["StationItem"]] = relationship(back_populates="station", cascade="all, delete-orphan")
|
|
187
187
|
ship_vendors: Mapped[list["ShipVendor"]] = relationship(back_populates="station", cascade="all, delete-orphan")
|
|
188
188
|
upgrade_vendors: Mapped[list["UpgradeVendor"]] = relationship(back_populates="station", cascade="all, delete-orphan")
|
|
189
|
-
|
|
189
|
+
|
|
190
190
|
__table_args__ = (
|
|
191
191
|
Index("idx_station_by_system", "system_id"),
|
|
192
192
|
Index("idx_station_by_name", "name"),
|
|
@@ -197,19 +197,19 @@ class Station(Base):
|
|
|
197
197
|
|
|
198
198
|
class Category(Base):
|
|
199
199
|
__tablename__ = "Category"
|
|
200
|
-
|
|
200
|
+
|
|
201
201
|
category_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
202
202
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
203
|
-
|
|
203
|
+
|
|
204
204
|
# Relationships
|
|
205
205
|
items: Mapped[list["Item"]] = relationship(back_populates="category")
|
|
206
|
-
|
|
206
|
+
|
|
207
207
|
__table_args__ = (Index("idx_category_by_name", "name"),)
|
|
208
208
|
|
|
209
209
|
|
|
210
210
|
class Item(Base):
|
|
211
211
|
__tablename__ = "Item"
|
|
212
|
-
|
|
212
|
+
|
|
213
213
|
item_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
214
214
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
215
215
|
category_id: Mapped[int] = mapped_column(
|
|
@@ -219,11 +219,11 @@ class Item(Base):
|
|
|
219
219
|
ui_order: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("0"))
|
|
220
220
|
avg_price: Mapped[int | None] = mapped_column(Integer)
|
|
221
221
|
fdev_id: Mapped[int | None] = mapped_column(Integer)
|
|
222
|
-
|
|
222
|
+
|
|
223
223
|
# Relationships
|
|
224
224
|
category: Mapped["Category"] = relationship(back_populates="items")
|
|
225
225
|
stations: Mapped[list["StationItem"]] = relationship(back_populates="item", cascade="all, delete-orphan")
|
|
226
|
-
|
|
226
|
+
|
|
227
227
|
__table_args__ = (
|
|
228
228
|
Index("idx_item_by_fdevid", "fdev_id"),
|
|
229
229
|
Index("idx_item_by_category", "category_id"),
|
|
@@ -233,7 +233,7 @@ class Item(Base):
|
|
|
233
233
|
|
|
234
234
|
class StationItem(Base):
|
|
235
235
|
__tablename__ = "StationItem"
|
|
236
|
-
|
|
236
|
+
|
|
237
237
|
station_id: Mapped[int] = mapped_column(
|
|
238
238
|
BigInteger,
|
|
239
239
|
ForeignKey("Station.station_id", ondelete="CASCADE", onupdate="CASCADE"),
|
|
@@ -251,11 +251,11 @@ class StationItem(Base):
|
|
|
251
251
|
supply_level: Mapped[int] = mapped_column(Integer, nullable=False)
|
|
252
252
|
modified: Mapped[str] = mapped_column(DateTime6(), server_default=now6(), onupdate=now6(), nullable=False)
|
|
253
253
|
from_live: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("0"))
|
|
254
|
-
|
|
254
|
+
|
|
255
255
|
# Relationships
|
|
256
256
|
station: Mapped["Station"] = relationship(back_populates="items")
|
|
257
257
|
item: Mapped["Item"] = relationship(back_populates="stations")
|
|
258
|
-
|
|
258
|
+
|
|
259
259
|
__table_args__ = (
|
|
260
260
|
Index("si_mod_stn_itm", "modified", "station_id", "item_id"),
|
|
261
261
|
Index("si_itm_dmdpr", "item_id", "demand_price", sqlite_where=text("demand_price > 0")),
|
|
@@ -268,18 +268,18 @@ class StationItem(Base):
|
|
|
268
268
|
|
|
269
269
|
class Ship(Base):
|
|
270
270
|
__tablename__ = "Ship"
|
|
271
|
-
|
|
271
|
+
|
|
272
272
|
ship_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
273
273
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
274
274
|
cost: Mapped[int | None] = mapped_column(Integer)
|
|
275
|
-
|
|
275
|
+
|
|
276
276
|
# Relationships
|
|
277
277
|
vendors: Mapped[list["ShipVendor"]] = relationship(back_populates="ship")
|
|
278
278
|
|
|
279
279
|
|
|
280
280
|
class ShipVendor(Base):
|
|
281
281
|
__tablename__ = "ShipVendor"
|
|
282
|
-
|
|
282
|
+
|
|
283
283
|
ship_id: Mapped[int] = mapped_column(
|
|
284
284
|
ForeignKey("Ship.ship_id", ondelete="CASCADE", onupdate="CASCADE"),
|
|
285
285
|
primary_key=True,
|
|
@@ -290,30 +290,30 @@ class ShipVendor(Base):
|
|
|
290
290
|
primary_key=True,
|
|
291
291
|
)
|
|
292
292
|
modified: Mapped[str] = mapped_column(DateTime6(), server_default=now6(), onupdate=now6(), nullable=False)
|
|
293
|
-
|
|
293
|
+
|
|
294
294
|
# Relationships
|
|
295
295
|
ship: Mapped["Ship"] = relationship(back_populates="vendors")
|
|
296
296
|
station: Mapped["Station"] = relationship(back_populates="ship_vendors")
|
|
297
|
-
|
|
297
|
+
|
|
298
298
|
__table_args__ = (Index("idx_shipvendor_by_station", "station_id"),)
|
|
299
299
|
|
|
300
300
|
|
|
301
301
|
class Upgrade(Base):
|
|
302
302
|
__tablename__ = "Upgrade"
|
|
303
|
-
|
|
303
|
+
|
|
304
304
|
upgrade_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
305
305
|
name: Mapped[str] = mapped_column(CIString(128), nullable=False)
|
|
306
306
|
class_: Mapped[int] = mapped_column("class", Integer, nullable=False)
|
|
307
307
|
rating: Mapped[str] = mapped_column(CHAR(1), nullable=False)
|
|
308
308
|
ship: Mapped[str | None] = mapped_column(CIString(128))
|
|
309
|
-
|
|
309
|
+
|
|
310
310
|
# Relationships
|
|
311
311
|
vendors: Mapped[list["UpgradeVendor"]] = relationship(back_populates="upgrade")
|
|
312
312
|
|
|
313
313
|
|
|
314
314
|
class UpgradeVendor(Base):
|
|
315
315
|
__tablename__ = "UpgradeVendor"
|
|
316
|
-
|
|
316
|
+
|
|
317
317
|
upgrade_id: Mapped[int] = mapped_column(
|
|
318
318
|
ForeignKey("Upgrade.upgrade_id", ondelete="CASCADE", onupdate="CASCADE"),
|
|
319
319
|
primary_key=True,
|
|
@@ -324,17 +324,17 @@ class UpgradeVendor(Base):
|
|
|
324
324
|
primary_key=True,
|
|
325
325
|
)
|
|
326
326
|
modified: Mapped[str] = mapped_column(DateTime6(), nullable=False, server_default=now6(), onupdate=now6())
|
|
327
|
-
|
|
327
|
+
|
|
328
328
|
# Relationships
|
|
329
329
|
upgrade: Mapped["Upgrade"] = relationship(back_populates="vendors")
|
|
330
330
|
station: Mapped["Station"] = relationship(back_populates="upgrade_vendors")
|
|
331
|
-
|
|
331
|
+
|
|
332
332
|
__table_args__ = (Index("idx_vendor_by_station_id", "station_id"),)
|
|
333
333
|
|
|
334
334
|
|
|
335
335
|
class RareItem(Base):
|
|
336
336
|
__tablename__ = "RareItem"
|
|
337
|
-
|
|
337
|
+
|
|
338
338
|
rare_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
339
339
|
station_id: Mapped[int] = mapped_column(
|
|
340
340
|
BigInteger,
|
|
@@ -350,7 +350,7 @@ class RareItem(Base):
|
|
|
350
350
|
max_allocation: Mapped[int | None] = mapped_column(Integer)
|
|
351
351
|
illegal: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
352
352
|
suppressed: Mapped[str] = mapped_column(TriState, nullable=False, server_default=text("'?'"))
|
|
353
|
-
|
|
353
|
+
|
|
354
354
|
__table_args__ = (UniqueConstraint("name", name="uq_rareitem_name"),)
|
|
355
355
|
|
|
356
356
|
|
|
@@ -358,7 +358,7 @@ class RareItem(Base):
|
|
|
358
358
|
|
|
359
359
|
class FDevShipyard(Base):
|
|
360
360
|
__tablename__ = "FDevShipyard"
|
|
361
|
-
|
|
361
|
+
|
|
362
362
|
id = Column(Integer, primary_key=True, unique=True, nullable=False)
|
|
363
363
|
symbol = Column(CIString(128))
|
|
364
364
|
name = Column(CIString(128))
|
|
@@ -367,7 +367,7 @@ class FDevShipyard(Base):
|
|
|
367
367
|
|
|
368
368
|
class FDevOutfitting(Base):
|
|
369
369
|
__tablename__ = "FDevOutfitting"
|
|
370
|
-
|
|
370
|
+
|
|
371
371
|
id = Column(Integer, primary_key=True, unique=True, nullable=False)
|
|
372
372
|
symbol = Column(CIString(128))
|
|
373
373
|
category = Column(String(10))
|
|
@@ -378,7 +378,7 @@ class FDevOutfitting(Base):
|
|
|
378
378
|
class_ = Column("class", String(1), nullable=False)
|
|
379
379
|
rating = Column(String(1), nullable=False)
|
|
380
380
|
entitlement = Column(String(50))
|
|
381
|
-
|
|
381
|
+
|
|
382
382
|
__table_args__ = (
|
|
383
383
|
CheckConstraint(
|
|
384
384
|
"category IN ('hardpoint','internal','standard','utility')",
|
|
@@ -404,7 +404,7 @@ class ExportControl(Base):
|
|
|
404
404
|
- last_reset_key: optional cursor for chunked from_live resets
|
|
405
405
|
"""
|
|
406
406
|
__tablename__ = "ExportControl"
|
|
407
|
-
|
|
407
|
+
|
|
408
408
|
id: Mapped[int] = mapped_column(Integer, primary_key=True, server_default=text("1"))
|
|
409
409
|
last_full_dump_time: Mapped[str] = mapped_column(DateTime6(), nullable=False)
|
|
410
410
|
last_reset_key: Mapped[int | None] = mapped_column(BigInteger, nullable=True)
|
|
@@ -415,7 +415,7 @@ class StationItemStaging(Base):
|
|
|
415
415
|
Staging table for bulk loads (no FKs). Same columns as StationItem.
|
|
416
416
|
"""
|
|
417
417
|
__tablename__ = "StationItem_staging"
|
|
418
|
-
|
|
418
|
+
|
|
419
419
|
station_id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
|
|
420
420
|
item_id: Mapped[int] = mapped_column(Integer, primary_key=True)
|
|
421
421
|
demand_price: Mapped[int] = mapped_column(Integer, nullable=False)
|
|
@@ -426,7 +426,7 @@ class StationItemStaging(Base):
|
|
|
426
426
|
supply_level: Mapped[int] = mapped_column(Integer, nullable=False)
|
|
427
427
|
modified: Mapped[str] = mapped_column(DateTime6(), server_default=now6(), onupdate=now6(), nullable=False)
|
|
428
428
|
from_live: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("0"))
|
|
429
|
-
|
|
429
|
+
|
|
430
430
|
__table_args__ = (Index("idx_sistaging_stn_itm", "station_id", "item_id"),)
|
|
431
431
|
|
|
432
432
|
|
tradedangerous/db/paths.py
CHANGED
|
@@ -71,7 +71,7 @@ def ensure_dir(pathlike: os.PathLike | str) -> Path:
|
|
|
71
71
|
|
|
72
72
|
def resolve_data_dir(cfg: Any = None) -> Path:
|
|
73
73
|
"""Resolve the persistent data directory.
|
|
74
|
-
|
|
74
|
+
|
|
75
75
|
Precedence: TD_DATA env > cfg[paths|database].data_dir > ./data
|
|
76
76
|
Always creates the directory.
|
|
77
77
|
"""
|
|
@@ -81,7 +81,7 @@ def resolve_data_dir(cfg: Any = None) -> Path:
|
|
|
81
81
|
|
|
82
82
|
def resolve_tmp_dir(cfg: Any = None) -> Path:
|
|
83
83
|
"""Resolve the temporary directory.
|
|
84
|
-
|
|
84
|
+
|
|
85
85
|
Precedence: TD_TMP env > cfg[paths|database].tmp_dir > ./tmp
|
|
86
86
|
Always creates the directory.
|
|
87
87
|
"""
|
|
@@ -91,7 +91,7 @@ def resolve_tmp_dir(cfg: Any = None) -> Path:
|
|
|
91
91
|
|
|
92
92
|
def get_sqlite_db_path(cfg: Any = None) -> Path:
|
|
93
93
|
"""Return full path to the SQLite DB file (does not create the file).
|
|
94
|
-
|
|
94
|
+
|
|
95
95
|
Data dir is resolved via resolve_data_dir(cfg). Filename comes from:
|
|
96
96
|
cfg[sqlite].sqlite_filename or cfg[database].sqlite_filename or legacy default 'TradeDangerous.db'.
|
|
97
97
|
"""
|