tradedangerous 12.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. py.typed +1 -0
  2. trade.py +49 -0
  3. tradedangerous/__init__.py +43 -0
  4. tradedangerous/cache.py +1381 -0
  5. tradedangerous/cli.py +136 -0
  6. tradedangerous/commands/TEMPLATE.py +74 -0
  7. tradedangerous/commands/__init__.py +244 -0
  8. tradedangerous/commands/buildcache_cmd.py +102 -0
  9. tradedangerous/commands/buy_cmd.py +427 -0
  10. tradedangerous/commands/commandenv.py +372 -0
  11. tradedangerous/commands/exceptions.py +94 -0
  12. tradedangerous/commands/export_cmd.py +150 -0
  13. tradedangerous/commands/import_cmd.py +222 -0
  14. tradedangerous/commands/local_cmd.py +243 -0
  15. tradedangerous/commands/market_cmd.py +207 -0
  16. tradedangerous/commands/nav_cmd.py +252 -0
  17. tradedangerous/commands/olddata_cmd.py +270 -0
  18. tradedangerous/commands/parsing.py +221 -0
  19. tradedangerous/commands/rares_cmd.py +298 -0
  20. tradedangerous/commands/run_cmd.py +1521 -0
  21. tradedangerous/commands/sell_cmd.py +262 -0
  22. tradedangerous/commands/shipvendor_cmd.py +60 -0
  23. tradedangerous/commands/station_cmd.py +68 -0
  24. tradedangerous/commands/trade_cmd.py +181 -0
  25. tradedangerous/commands/update_cmd.py +67 -0
  26. tradedangerous/corrections.py +55 -0
  27. tradedangerous/csvexport.py +234 -0
  28. tradedangerous/db/__init__.py +27 -0
  29. tradedangerous/db/adapter.py +192 -0
  30. tradedangerous/db/config.py +107 -0
  31. tradedangerous/db/engine.py +259 -0
  32. tradedangerous/db/lifecycle.py +332 -0
  33. tradedangerous/db/locks.py +208 -0
  34. tradedangerous/db/orm_models.py +500 -0
  35. tradedangerous/db/paths.py +113 -0
  36. tradedangerous/db/utils.py +661 -0
  37. tradedangerous/edscupdate.py +565 -0
  38. tradedangerous/edsmupdate.py +474 -0
  39. tradedangerous/formatting.py +210 -0
  40. tradedangerous/fs.py +156 -0
  41. tradedangerous/gui.py +1146 -0
  42. tradedangerous/mapping.py +133 -0
  43. tradedangerous/mfd/__init__.py +103 -0
  44. tradedangerous/mfd/saitek/__init__.py +3 -0
  45. tradedangerous/mfd/saitek/directoutput.py +678 -0
  46. tradedangerous/mfd/saitek/x52pro.py +195 -0
  47. tradedangerous/misc/checkpricebounds.py +287 -0
  48. tradedangerous/misc/clipboard.py +49 -0
  49. tradedangerous/misc/coord64.py +83 -0
  50. tradedangerous/misc/csvdialect.py +57 -0
  51. tradedangerous/misc/derp-sentinel.py +35 -0
  52. tradedangerous/misc/diff-system-csvs.py +159 -0
  53. tradedangerous/misc/eddb.py +81 -0
  54. tradedangerous/misc/eddn.py +349 -0
  55. tradedangerous/misc/edsc.py +437 -0
  56. tradedangerous/misc/edsm.py +121 -0
  57. tradedangerous/misc/importeddbstats.py +54 -0
  58. tradedangerous/misc/prices-json-exp.py +179 -0
  59. tradedangerous/misc/progress.py +194 -0
  60. tradedangerous/plugins/__init__.py +249 -0
  61. tradedangerous/plugins/edcd_plug.py +371 -0
  62. tradedangerous/plugins/eddblink_plug.py +861 -0
  63. tradedangerous/plugins/edmc_batch_plug.py +133 -0
  64. tradedangerous/plugins/spansh_plug.py +2647 -0
  65. tradedangerous/prices.py +211 -0
  66. tradedangerous/submit-distances.py +422 -0
  67. tradedangerous/templates/Added.csv +37 -0
  68. tradedangerous/templates/Category.csv +17 -0
  69. tradedangerous/templates/RareItem.csv +143 -0
  70. tradedangerous/templates/TradeDangerous.sql +338 -0
  71. tradedangerous/tools.py +40 -0
  72. tradedangerous/tradecalc.py +1302 -0
  73. tradedangerous/tradedb.py +2320 -0
  74. tradedangerous/tradeenv.py +313 -0
  75. tradedangerous/tradeenv.pyi +109 -0
  76. tradedangerous/tradeexcept.py +131 -0
  77. tradedangerous/tradeorm.py +183 -0
  78. tradedangerous/transfers.py +192 -0
  79. tradedangerous/utils.py +243 -0
  80. tradedangerous/version.py +16 -0
  81. tradedangerous-12.7.6.dist-info/METADATA +106 -0
  82. tradedangerous-12.7.6.dist-info/RECORD +87 -0
  83. tradedangerous-12.7.6.dist-info/WHEEL +5 -0
  84. tradedangerous-12.7.6.dist-info/entry_points.txt +3 -0
  85. tradedangerous-12.7.6.dist-info/licenses/LICENSE +373 -0
  86. tradedangerous-12.7.6.dist-info/top_level.txt +2 -0
  87. tradegui.py +24 -0
@@ -0,0 +1,661 @@
1
+ # tradedangerous/db/utils.py
2
+ # -----------------------------------------------------------------------------
3
+ # Minimal utilities required by Spansh and other plugins which require dialect
4
+ # specific code.
5
+ #
6
+ # Retained:
7
+ # - parse_ts: Parse timestamps to UTC-naive datetime
8
+ # - get_import_batch_size: Decide batch commit size based on dialect/env
9
+ # -----------------------------------------------------------------------------
10
+
11
+ from __future__ import annotations
12
+
13
+ import os
14
+ from datetime import datetime, timezone
15
+ from typing import Optional, Iterable, Mapping, Sequence, Literal, Callable, Dict, Any
16
+ import re
17
+
18
+ from sqlalchemy import Table, text, func, and_, bindparam
19
+ from sqlalchemy.orm import Session
20
+ from sqlalchemy.dialects.sqlite import insert as sqlite_insert
21
+ from sqlalchemy.dialects.mysql import insert as mysql_insert
22
+ from sqlalchemy.sql.elements import ClauseElement
23
+
24
+
25
+ # --------------------------------------------------------
26
+ # eddblink helpers
27
+ # --------------------------------------------------------
28
+
29
+ def begin_bulk_mode(
30
+ session: Session,
31
+ *,
32
+ profile: str = "default",
33
+ phase: Literal["rebuild", "incremental"] = "incremental",
34
+ ) -> dict[str, Any]:
35
+ """
36
+ Apply connection-local settings to speed up bulk operations.
37
+ Returns an opaque token for symmetry with end_bulk_mode (currently a no-op).
38
+
39
+ - SQLite: ensure WAL, temp_store, cache; set synchronous=OFF for raw speed.
40
+ - MySQL/MariaDB: apply per-session import tunings (reduced fsync, lower waits).
41
+
42
+ Notes:
43
+ * Settings are connection-scoped and reset when the connection is returned
44
+ to the pool or closed.
45
+ * This is generic and safe for any plugin invoking long-running bulk writes.
46
+ """
47
+ token: dict[str, Any] = {"dialect": None, "profile": profile, "phase": phase}
48
+
49
+ try:
50
+ dialect = session.get_bind().dialect.name.lower()
51
+ except Exception:
52
+ return token # best-effort, no-op if we can't detect
53
+
54
+ token["dialect"] = dialect
55
+
56
+ if dialect == "sqlite":
57
+ try:
58
+ conn = session.connection()
59
+ # Speed-first defaults (align with schema PRAGMAs).
60
+ conn.execute(text("PRAGMA journal_mode=WAL"))
61
+ conn.execute(text("PRAGMA synchronous=OFF"))
62
+ conn.execute(text("PRAGMA temp_store=MEMORY"))
63
+ # Negative cache_size is KiB; -65536 ≈ 64 MiB
64
+ conn.execute(text("PRAGMA cache_size=-65536"))
65
+ # File-level; harmless to set each time.
66
+ conn.execute(text("PRAGMA auto_vacuum=INCREMENTAL"))
67
+ except Exception:
68
+ # Best-effort; keep going if PRAGMA adjustment fails.
69
+ pass
70
+ return token
71
+
72
+ if dialect in ("mysql", "mariadb"):
73
+ try:
74
+ mysql_set_bulk_session(session)
75
+ except Exception:
76
+ pass
77
+ return token
78
+
79
+ # Other dialects: nothing applied
80
+ return token
81
+
82
+
83
+ def end_bulk_mode(session: Session, token: Dict[str, Any] | None = None) -> None:
84
+ """
85
+ Placeholder symmetry for begin_bulk_mode. Currently a no-op because we only
86
+ *set* per-session tunings that naturally revert when the connection returns
87
+ to the pool. Kept for future extensibility.
88
+ """
89
+ return
90
+
91
+
92
+ def get_upsert_fn(
93
+ session: Session,
94
+ table: Table,
95
+ *,
96
+ key_cols: Sequence[str],
97
+ update_cols: Sequence[str],
98
+ modified_col: Optional[str] = None,
99
+ always_update: Sequence[str] = (),
100
+ ) -> Callable[[Iterable[Mapping[str, object]]], None]:
101
+ """
102
+ Return a callable that performs a batched upsert into `table` using the
103
+ fastest dialect-specific path available (SQLAlchemy Core).
104
+
105
+ - If `modified_col` is provided:
106
+ * SQLite → INSERT .. ON CONFLICT DO UPDATE with WHERE guard using modified
107
+ * MySQL → INSERT .. ON DUPLICATE KEY UPDATE with IF(guard, inserted, table)
108
+ Only the columns listed in `update_cols` are guarded by `modified_col`.
109
+
110
+ - Columns listed in `always_update` are synchronized unconditionally even
111
+ when modified timestamps are equal. This is implemented as a small,
112
+ portable second-pass UPDATE keyed by `key_cols`.
113
+
114
+ Usage example:
115
+ upsert = get_upsert_fn(
116
+ session,
117
+ SA.StationItem.__table__,
118
+ key_cols=("station_id","item_id"),
119
+ update_cols=("demand_price","demand_units","demand_level",
120
+ "supply_price","supply_units","supply_level","from_live"),
121
+ modified_col="modified",
122
+ always_update=("from_live",), # force-sync live flag even if modified equal
123
+ )
124
+ upsert(batch_of_row_dicts)
125
+ """
126
+ try:
127
+ dialect = session.get_bind().dialect.name.lower()
128
+ except Exception:
129
+ dialect = "unknown"
130
+
131
+ def _primary_upsert(rows: Iterable[Mapping[str, object]]) -> None:
132
+ batch = list(rows)
133
+ if not batch:
134
+ return
135
+
136
+ if modified_col:
137
+ if dialect == "sqlite":
138
+ sqlite_upsert_modified(
139
+ session,
140
+ table,
141
+ batch,
142
+ key_cols=key_cols,
143
+ modified_col=modified_col,
144
+ update_cols=update_cols,
145
+ )
146
+ elif dialect in ("mysql", "mariadb"):
147
+ mysql_upsert_modified(
148
+ session,
149
+ table,
150
+ batch,
151
+ key_cols=key_cols,
152
+ modified_col=modified_col,
153
+ update_cols=update_cols,
154
+ )
155
+ else:
156
+ # Fallback: simple upsert without guard
157
+ if dialect == "sqlite":
158
+ sqlite_upsert_simple(session, table, batch, key_cols=key_cols, update_cols=update_cols)
159
+ elif dialect in ("mysql", "mariadb"):
160
+ mysql_upsert_simple(session, table, batch, key_cols=key_cols, update_cols=update_cols)
161
+ else:
162
+ raise RuntimeError(f"Unsupported dialect for modified upsert: {dialect}")
163
+ else:
164
+ if dialect == "sqlite":
165
+ sqlite_upsert_simple(session, table, batch, key_cols=key_cols, update_cols=update_cols)
166
+ elif dialect in ("mysql", "mariadb"):
167
+ mysql_upsert_simple(session, table, batch, key_cols=key_cols, update_cols=update_cols)
168
+ else:
169
+ raise RuntimeError(f"Unsupported dialect for simple upsert: {dialect}")
170
+
171
+ def _always_update_pass(rows: Iterable[Mapping[str, object]]) -> None:
172
+ if not always_update:
173
+ return
174
+ batch = list(rows)
175
+ if not batch:
176
+ return
177
+
178
+ # UPDATE table SET c1=:c1, ... WHERE k1=:__key__k1 AND k2=:__key__k2
179
+ where_clause = and_(*[table.c[k] == bindparam(f"__key__{k}") for k in key_cols])
180
+ upd = table.update().where(where_clause).values({c: bindparam(c) for c in always_update})
181
+
182
+ params: list[Dict[str, object]] = []
183
+ for row in batch:
184
+ # Only issue an UPDATE if at least one always_update value is present
185
+ p: Dict[str, object] = {}
186
+ for k in key_cols:
187
+ p[f"__key__{k}"] = row[k]
188
+ present = False
189
+ for c in always_update:
190
+ if c in row:
191
+ p[c] = row[c]
192
+ present = True
193
+ if present:
194
+ params.append(p)
195
+
196
+ if params:
197
+ session.execute(upd, params)
198
+
199
+ def _upsert(rows: Iterable[Mapping[str, object]]) -> None:
200
+ batch = list(rows)
201
+ if not batch:
202
+ return
203
+ _primary_upsert(batch)
204
+ _always_update_pass(batch)
205
+
206
+ return _upsert
207
+
208
+
209
+ # -----------------------------------------------------------------------------
210
+ # spansh helpers (db specific upserts)
211
+ # -----------------------------------------------------------------------------
212
+
213
+ # --- Dialect checks (unchanged) ---
214
+ def is_sqlite(session: Session) -> bool:
215
+ try:
216
+ return session.get_bind().dialect.name.lower() == "sqlite"
217
+ except Exception:
218
+ return False
219
+
220
+ def is_mysql(session: Session) -> bool:
221
+ try:
222
+ name = session.get_bind().dialect.name.lower()
223
+ return name in ("mysql", "mariadb")
224
+ except Exception:
225
+ return False
226
+
227
+ def sqlite_set_bulk_pragmas(session: Session) -> None:
228
+ """
229
+ Apply connection-local PRAGMAs to speed up bulk imports.
230
+ Safe defaults for an import session; durability is still acceptable with WAL.
231
+ """
232
+ conn = session.connection()
233
+ # WAL gives better concurrency; synchronous=NORMAL keeps some safety at high speed.
234
+ conn.execute(text("PRAGMA journal_mode=WAL"))
235
+ conn.execute(text("PRAGMA synchronous=NORMAL"))
236
+ # Keep temp structures in memory; increase page cache.
237
+ conn.execute(text("PRAGMA temp_store=MEMORY"))
238
+ # Negative cache_size is KiB; -65536 ≈ 64 MiB page cache
239
+ conn.execute(text("PRAGMA cache_size=-65536"))
240
+
241
+ def sqlite_upsert_modified(
242
+ session: Session,
243
+ table: Table,
244
+ rows: Iterable[Mapping[str, object]],
245
+ *,
246
+ key_cols: Sequence[str],
247
+ modified_col: str,
248
+ update_cols: Sequence[str],
249
+ ) -> None:
250
+ """
251
+ SQLite ON CONFLICT fast-path with timestamp guard using the dialect insert():
252
+ INSERT .. ON CONFLICT(<keys>) DO UPDATE SET <cols...>, modified=excluded.modified
253
+ WHERE excluded.modified > table.modified OR table.modified IS NULL
254
+ """
255
+ rows = list(rows)
256
+ if not rows:
257
+ return
258
+
259
+ stmt = sqlite_insert(table)
260
+ excluded = stmt.excluded # "excluded" namespace
261
+
262
+ # Build set_ mapping for update columns + modified
263
+ set_map = {c: getattr(excluded, c) for c in update_cols}
264
+ set_map[modified_col] = getattr(excluded, modified_col)
265
+
266
+ # WHERE guard: only update if incoming is newer (or DB NULL)
267
+ where_guard = (getattr(excluded, modified_col) >= getattr(table.c, modified_col)) | (
268
+ getattr(table.c, modified_col).is_(None)
269
+ )
270
+
271
+ stmt = stmt.on_conflict_do_update(
272
+ index_elements=list(key_cols),
273
+ set_=set_map,
274
+ where=where_guard,
275
+ )
276
+
277
+ session.execute(stmt, rows)
278
+
279
+ def sqlite_upsert_simple(
280
+ session: Session,
281
+ table: Table,
282
+ rows: Iterable[Mapping[str, object]],
283
+ *,
284
+ key_cols: Sequence[str],
285
+ update_cols: Sequence[str],
286
+ ) -> None:
287
+ """
288
+ SQLite INSERT .. ON CONFLICT(<keys>) DO UPDATE SET <update_cols>
289
+ (no timestamp guard) using dialect insert() so types are adapted correctly.
290
+ """
291
+ rows = list(rows)
292
+ if not rows:
293
+ return
294
+
295
+ stmt = sqlite_insert(table)
296
+ excluded = stmt.excluded
297
+ set_map = {c: getattr(excluded, c) for c in update_cols}
298
+
299
+ stmt = stmt.on_conflict_do_update(
300
+ index_elements=list(key_cols),
301
+ set_=set_map,
302
+ )
303
+
304
+ session.execute(stmt, rows)
305
+
306
+ def mysql_set_bulk_session(session: Session) -> None:
307
+ """
308
+ Per-session tuning for bulk imports (MariaDB/MySQL).
309
+ Session-scoped, resets when the connection closes/recycles.
310
+ Conservative defaults for import workloads.
311
+ """
312
+ conn = session.connection()
313
+ # Reduce fsyncs; lose up to ~1s of transactions on power loss (import-safe).
314
+ conn.execute(text("SET SESSION innodb_flush_log_at_trx_commit=2"))
315
+ # Amortize binlog fsync if binlog is enabled.
316
+ conn.execute(text("SET SESSION sync_binlog=0"))
317
+ # Reader-friendly concurrency and shorter lock waits.
318
+ conn.execute(text("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED"))
319
+ conn.execute(text("SET SESSION innodb_lock_wait_timeout=10"))
320
+ # Optional micro-wins on constraint checking (safe for our import order).
321
+ conn.execute(text("SET SESSION foreign_key_checks=0"))
322
+ conn.execute(text("SET SESSION unique_checks=0"))
323
+ # If permitted, skipping binlog on this session can be a big win (DEV ONLY).
324
+ try:
325
+ conn.execute(text("SET SESSION sql_log_bin=0"))
326
+ except Exception:
327
+ # Not always allowed; silently ignore.
328
+ pass
329
+
330
+ def mysql_upsert_modified(
331
+ session: Session,
332
+ table: Table,
333
+ rows: Iterable[Mapping[str, object]],
334
+ *,
335
+ key_cols: Sequence[str], # present for interface symmetry
336
+ modified_col: str,
337
+ update_cols: Sequence[str],
338
+ ) -> None:
339
+ """
340
+ MySQL/MariaDB ON DUPLICATE KEY fast-path using dialect insert().
341
+ Only updates when incoming.modified > existing.modified OR existing is NULL.
342
+ """
343
+ rows = list(rows)
344
+ if not rows:
345
+ return
346
+
347
+ ins = mysql_insert(table)
348
+ inserted = ins.inserted # alias to VALUES()/INSERTED
349
+
350
+ # Guard: newer incoming timestamp or DB is NULL
351
+ guard = (inserted[modified_col] >= table.c[modified_col]) | (table.c[modified_col].is_(None))
352
+
353
+ # For each update col, write: IF(guard, inserted.col, table.col)
354
+ set_map = {
355
+ c: func.if_(guard, inserted[c], table.c[c])
356
+ for c in update_cols
357
+ }
358
+ # Always compute modified with the same guard
359
+ set_map[modified_col] = func.if_(guard, inserted[modified_col], table.c[modified_col])
360
+
361
+ stmt = ins.on_duplicate_key_update(**set_map)
362
+ session.execute(stmt, rows)
363
+
364
+
365
+ def mysql_upsert_simple(
366
+ session: Session,
367
+ table: Table,
368
+ rows: Iterable[Mapping[str, object]],
369
+ *,
370
+ key_cols: Sequence[str], # present for interface symmetry
371
+ update_cols: Sequence[str],
372
+ ) -> None:
373
+ """
374
+ MySQL/MariaDB ON DUPLICATE KEY fast-path (no timestamp guard) using dialect insert().
375
+ Updates the listed columns unconditionally to INSERTED/VALUES().
376
+ """
377
+ rows = list(rows)
378
+ if not rows:
379
+ return
380
+
381
+ ins = mysql_insert(table)
382
+ inserted = ins.inserted
383
+
384
+ set_map = {c: inserted[c] for c in update_cols}
385
+
386
+ stmt = ins.on_duplicate_key_update(**set_map)
387
+ session.execute(stmt, rows)
388
+
389
+ # -----------------------------------------------------------------------------
390
+ # csvexport helpers (schema introspection)
391
+ # -----------------------------------------------------------------------------
392
+ # These functions are used by csvexport.exportTableToFile() to reconstruct
393
+ # headers (incl. unique columns and foreign-key references) in a backend-
394
+ # independent way.
395
+ #
396
+ # Implemented here in utils.py so that both SQLite (PRAGMA) and SQL backends
397
+ # (MariaDB/MySQL INFORMATION_SCHEMA, etc.) can share common logic.
398
+ #
399
+ # Notes:
400
+ # * SQLite → PRAGMA index_list / index_info / foreign_key_list
401
+ # * MySQL/MariaDB → INFORMATION_SCHEMA.STATISTICS / KEY_COLUMN_USAGE
402
+ # * Other backends (e.g. PostgreSQL) would need catalog queries added here.
403
+ #
404
+ # These helpers are not intended for general ORM use — only to support
405
+ # correct CSV header reconstruction during exports.
406
+ # -----------------------------------------------------------------------------
407
+
408
+ def get_unique_columns(session, table_name: str) -> list[str]:
409
+ """
410
+ Return a list of unique column names for a table.
411
+ Dialect-specific implementations:
412
+ * SQLite → PRAGMA index_list + PRAGMA index_info
413
+ * MariaDB/MySQL → INFORMATION_SCHEMA.STATISTICS
414
+ * Other backends will require solutions in dialect e.g. Postgres catalogs
415
+ """
416
+ engine = session.get_bind()
417
+ dialect = engine.dialect.name.lower()
418
+
419
+ if dialect == "sqlite":
420
+ conn = session.connection().connection
421
+ cur = conn.cursor()
422
+ uniques: list[str] = []
423
+ # Pre-escape table name for PRAGMA
424
+ esc_table = table_name.replace("'", "''")
425
+ for idxRow in cur.execute(f"PRAGMA index_list('{esc_table}')"):
426
+ # idxRow: (seq, name, unique, origin, partial) — unique is at index 2
427
+ if idxRow[2]: # 'unique' flag is truthy for UNIQUE indexes
428
+ idx_name = idxRow[1]
429
+ esc_idx = idx_name.replace("'", "''")
430
+ for unqRow in conn.execute(f"PRAGMA index_info('{esc_idx}')"):
431
+ col = unqRow[2]
432
+ if col not in uniques:
433
+ uniques.append(col)
434
+ return uniques
435
+
436
+ elif dialect in ("mysql", "mariadb"):
437
+ sql = text("""
438
+ SELECT DISTINCT COLUMN_NAME
439
+ FROM INFORMATION_SCHEMA.STATISTICS
440
+ WHERE TABLE_SCHEMA = DATABASE()
441
+ AND TABLE_NAME = :table
442
+ AND NON_UNIQUE = 0
443
+ """)
444
+ rows = session.execute(sql, {"table": table_name}).fetchall()
445
+ return [r[0] for r in rows]
446
+
447
+ else:
448
+ # Fallback: try SQLAlchemy inspector
449
+ insp = session.get_bind().inspect(session.get_bind())
450
+ cols = []
451
+ try:
452
+ pk = insp.get_pk_constraint(table_name) or {}
453
+ cols.extend(pk.get("constrained_columns", []))
454
+ except Exception:
455
+ pass
456
+ try:
457
+ for uc in insp.get_unique_constraints(table_name) or []:
458
+ cols.extend(uc.get("column_names", []))
459
+ except Exception:
460
+ pass
461
+ return list(set(cols))
462
+
463
+
464
+
465
+
466
+ def get_foreign_keys(session, table_name: str) -> list[dict]:
467
+ """
468
+ Return list of foreign key mappings:
469
+ { "table": <ref_table>, "from": <local_col>, "to": <ref_col> }
470
+
471
+ Dialect-specific implementations:
472
+ * SQLite → PRAGMA foreign_key_list
473
+ * MariaDB/MySQL → INFORMATION_SCHEMA.KEY_COLUMN_USAGE
474
+ * Other backends will require solutions in dialect e.g. Postgres catalogs
475
+ """
476
+ engine = session.get_bind()
477
+ dialect = engine.dialect.name.lower()
478
+
479
+ if dialect == "sqlite":
480
+ conn = session.connection().connection
481
+ cur = conn.cursor()
482
+ fkeys: list[dict] = []
483
+ esc_table = table_name.replace("'", "''")
484
+ for row in cur.execute(f"PRAGMA foreign_key_list('{esc_table}')"):
485
+ # row: (id, seq, table, from, to, on_update, on_delete, match)
486
+ fkeys.append({
487
+ "table": row[2],
488
+ "from": row[3],
489
+ "to": row[4],
490
+ })
491
+ return fkeys
492
+
493
+ elif dialect in ("mysql", "mariadb"):
494
+ sql = text("""
495
+ SELECT COLUMN_NAME AS `from`,
496
+ REFERENCED_TABLE_NAME AS `table`,
497
+ REFERENCED_COLUMN_NAME AS `to`
498
+ FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
499
+ WHERE TABLE_SCHEMA = DATABASE()
500
+ AND TABLE_NAME = :table
501
+ AND REFERENCED_TABLE_NAME IS NOT NULL
502
+ """)
503
+ rows = session.execute(sql, {"table": table_name}).fetchall()
504
+ return [{"table": r[1], "from": r[0], "to": r[2]} for r in rows]
505
+
506
+ else:
507
+ # Fallback: use SQLAlchemy inspector
508
+ insp = session.get_bind().inspect(session.get_bind())
509
+ fkeys: list[dict] = []
510
+ try:
511
+ for fk in insp.get_foreign_keys(table_name) or []:
512
+ if not fk.get("referred_table") or not fk.get("constrained_columns"):
513
+ continue
514
+ fkeys.append({
515
+ "table": fk["referred_table"],
516
+ "from": fk["constrained_columns"][0],
517
+ "to": fk["referred_columns"][0],
518
+ })
519
+ except Exception:
520
+ pass
521
+ return fkeys
522
+
523
+
524
+
525
+ # -----------------------------------------------------------------------------
526
+ # Timestamp Helpers
527
+ # -----------------------------------------------------------------------------
528
+
529
+ def age_in_days(session: Session, column: ClauseElement) -> ClauseElement:
530
+ """
531
+ Return a dialect-safe SQLAlchemy expression that yields the age of `column`
532
+ (a DATETIME/TIMESTAMP) in **whole days** relative to the database's current date.
533
+
534
+ Dialect mappings:
535
+ * SQLite → julianday(CURRENT_DATE) - julianday(column)
536
+ * MySQL/MariaDB → TIMESTAMPDIFF(DAY, column, CURRENT_DATE())
537
+ * Others → DATE(NOW()) - DATE(column) (best-effort integer days)
538
+
539
+ Notes:
540
+ - Designed for use in aggregates (e.g., func.avg(age_in_days(...))).
541
+ - Leaves NULL handling to the caller (filter or COALESCE as needed).
542
+ """
543
+ engine = session.get_bind()
544
+ dialect = engine.dialect.name.lower()
545
+
546
+ if dialect == "sqlite":
547
+ # julianday() returns a fractional day difference (FLOAT).
548
+ return func.julianday() - func.julianday(column)
549
+
550
+ if dialect in ("mysql", "mariadb"):
551
+ # TIMESTAMPDIFF returns an integer number of DAY boundaries crossed.
552
+ # Use CURRENT_DATE() to avoid time-of-day skew.
553
+ return func.timestampdiff(text("DAY"), column, func.current_date())
554
+
555
+ # Fallback (e.g., PostgreSQL, etc.): integer days between dates
556
+ # DATE(NOW()) - DATE(column) yields an integer in many SQL dialects.
557
+ return func.date(func.now()) - func.date(column)
558
+
559
+ def parse_ts(value) -> Optional[datetime]:
560
+ """
561
+ Parse timestamp values into UTC-naive datetime (microsecond=0).
562
+
563
+ Accepts:
564
+ - None -> None
565
+ - datetime (aware/naive)
566
+ - int/float epoch seconds
567
+ - str:
568
+ * ISO-like with 'Z', '+HH', '+HHMM', or '+HH:MM'
569
+ * Space-separated 'YYYY-MM-DD HH:MM:SS[ offset]'
570
+ * Date-only 'YYYY-MM-DD'
571
+
572
+ Rules:
573
+ - 'Z' -> '+00:00'
574
+ - '+HHMM' -> '+HH:MM'
575
+ - '+HH' -> '+HH:00'
576
+ - single space between date/time -> replaced with 'T'
577
+ - Aware datetimes -> converted to UTC then made naive
578
+ """
579
+ if value is None:
580
+ return None
581
+
582
+ # datetime input
583
+ if isinstance(value, datetime):
584
+ dt = value
585
+ if dt.tzinfo is not None:
586
+ dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
587
+ return dt.replace(microsecond=0)
588
+
589
+ # epoch seconds
590
+ if isinstance(value, (int, float)):
591
+ try:
592
+ return datetime.utcfromtimestamp(float(value)).replace(microsecond=0)
593
+ except Exception:
594
+ return None
595
+
596
+ # string input
597
+ if isinstance(value, str):
598
+ s = value.strip()
599
+ if not s:
600
+ return None
601
+
602
+ # Normalise timezone notations
603
+ if s.endswith("Z"):
604
+ s = s[:-1] + "+00:00"
605
+ # ' ' -> 'T' to please fromisoformat
606
+ if " " in s and "T" not in s:
607
+ s = s.replace(" ", "T", 1)
608
+ # +HHMM -> +HH:MM
609
+ s = re.sub(r"([+-]\d{2})(\d{2})$", r"\1:\2", s)
610
+ # +HH -> +HH:00 (ensure we didn't just match +HH:MM)
611
+ s = re.sub(r"([+-]\d{2})(?!:\d{2})$", r"\1:00", s)
612
+
613
+ # Try ISO parse
614
+ try:
615
+ dt = datetime.fromisoformat(s)
616
+ if dt.tzinfo is not None:
617
+ dt = dt.astimezone(timezone.utc).replace(tzinfo=None)
618
+ return dt.replace(microsecond=0)
619
+ except Exception:
620
+ pass
621
+
622
+ # Legacy / naive formats (assume UTC)
623
+ for fmt in ("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d"):
624
+ try:
625
+ return datetime.strptime(s, fmt).replace(microsecond=0)
626
+ except Exception:
627
+ continue
628
+
629
+ return None
630
+
631
+ # -----------------------------------------------------------------------------
632
+ # Batch size calculation
633
+ # -----------------------------------------------------------------------------
634
+ def get_import_batch_size(session: Session, profile: str | None = None) -> int | None:
635
+ """
636
+ Return the recommended batch commit size for imports.
637
+
638
+ - Respects TD_LISTINGS_BATCH environment variable (int).
639
+ - Defaults:
640
+ * SQLite → None (commit once at end, no batching).
641
+ * MySQL/MariaDB → 50k rows per commit.
642
+ * Spansh profile → conservative 5k rows per commit if not otherwise set.
643
+ """
644
+ env_batch = os.environ.get("TD_LISTINGS_BATCH")
645
+ if env_batch:
646
+ try:
647
+ return int(env_batch)
648
+ except ValueError:
649
+ # fall through to backend defaults
650
+ pass
651
+
652
+ dialect = session.bind.dialect.name
653
+
654
+ if dialect == "sqlite":
655
+ return None
656
+ if dialect in ("mysql", "mariadb"):
657
+ return 50000
658
+ if profile == "spansh":
659
+ return 5000
660
+
661
+ return None