tradedangerous 11.5.3__py3-none-any.whl → 12.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tradedangerous might be problematic. Click here for more details.
- tradedangerous/cache.py +567 -395
- tradedangerous/cli.py +2 -2
- tradedangerous/commands/TEMPLATE.py +25 -26
- tradedangerous/commands/__init__.py +8 -16
- tradedangerous/commands/buildcache_cmd.py +40 -10
- tradedangerous/commands/buy_cmd.py +57 -46
- tradedangerous/commands/commandenv.py +0 -2
- tradedangerous/commands/export_cmd.py +78 -50
- tradedangerous/commands/import_cmd.py +67 -31
- tradedangerous/commands/market_cmd.py +52 -19
- tradedangerous/commands/olddata_cmd.py +120 -107
- tradedangerous/commands/rares_cmd.py +122 -110
- tradedangerous/commands/run_cmd.py +118 -66
- tradedangerous/commands/sell_cmd.py +52 -45
- tradedangerous/commands/shipvendor_cmd.py +49 -234
- tradedangerous/commands/station_cmd.py +55 -485
- tradedangerous/commands/update_cmd.py +56 -420
- tradedangerous/csvexport.py +173 -162
- tradedangerous/db/__init__.py +27 -0
- tradedangerous/db/adapter.py +191 -0
- tradedangerous/db/config.py +95 -0
- tradedangerous/db/engine.py +246 -0
- tradedangerous/db/lifecycle.py +332 -0
- tradedangerous/db/locks.py +208 -0
- tradedangerous/db/orm_models.py +455 -0
- tradedangerous/db/paths.py +112 -0
- tradedangerous/db/utils.py +661 -0
- tradedangerous/gui.py +2 -2
- tradedangerous/plugins/eddblink_plug.py +387 -251
- tradedangerous/plugins/spansh_plug.py +2488 -821
- tradedangerous/prices.py +124 -142
- tradedangerous/templates/TradeDangerous.sql +6 -6
- tradedangerous/tradecalc.py +1227 -1109
- tradedangerous/tradedb.py +533 -384
- tradedangerous/tradeenv.py +12 -1
- tradedangerous/version.py +1 -1
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info}/METADATA +11 -7
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info}/RECORD +42 -38
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info}/WHEEL +1 -1
- tradedangerous/commands/update_gui.py +0 -721
- tradedangerous/jsonprices.py +0 -254
- tradedangerous/plugins/edapi_plug.py +0 -1071
- tradedangerous/plugins/journal_plug.py +0 -537
- tradedangerous/plugins/netlog_plug.py +0 -316
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info}/entry_points.txt +0 -0
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info/licenses}/LICENSE +0 -0
- {tradedangerous-11.5.3.dist-info → tradedangerous-12.0.1.dist-info}/top_level.txt +0 -0
|
@@ -1,858 +1,2525 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
1
|
+
# tradedangerous/plugins/spansh_plug.py
|
|
2
|
+
# -----------------------------------------------------------------------------
|
|
3
|
+
# Spansh Import Plugin (new, defragmented)
|
|
4
|
+
#
|
|
5
|
+
# Behavioural contract:
|
|
6
|
+
# - Optimised for modify/update (churn-safe via service timestamps)
|
|
7
|
+
# - Streaming reader for huge top-level JSON array
|
|
8
|
+
# - Options: -O url=… | -O file=… (mutually exclusive), -O maxage=<float days>
|
|
9
|
+
# - JSON/intermediate in tmp/, CSV & .prices in data/
|
|
10
|
+
# - Warnings gated by verbosity; low-verbosity uses single-line progress
|
|
11
|
+
# - After import: export CSVs (incl. RareItem) and regenerate TradeDangerous.prices
|
|
12
|
+
# - Returns True from finish() to stop default flow
|
|
13
|
+
#
|
|
14
|
+
# DB/dialect specifics live in tradedangerous.db.utils (parse_ts, batch sizing, etc.)
|
|
15
|
+
# -----------------------------------------------------------------------------
|
|
10
16
|
|
|
11
|
-
from
|
|
17
|
+
from __future__ import annotations
|
|
12
18
|
|
|
19
|
+
import io
|
|
13
20
|
import os
|
|
14
|
-
import requests
|
|
15
|
-
import sqlite3
|
|
16
21
|
import sys
|
|
22
|
+
import traceback
|
|
17
23
|
import time
|
|
18
|
-
import
|
|
19
|
-
import ijson
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
'Asteroid base': [14, False],
|
|
43
|
-
'Drake-Class Carrier': [24, False], # fleet carriers
|
|
44
|
-
'Settlement': [25, True], # odyssey settlements
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
if dataclass:
|
|
48
|
-
# Dataclass with slots is considerably cheaper and faster than namedtuple
|
|
49
|
-
# but is only reliably introduced in 3.10+
|
|
50
|
-
@dataclass(slots=True)
|
|
51
|
-
class System:
|
|
52
|
-
id: int
|
|
53
|
-
name: str
|
|
54
|
-
pos_x: float
|
|
55
|
-
pos_y: float
|
|
56
|
-
pos_z: float
|
|
57
|
-
modified: float | None
|
|
58
|
-
|
|
59
|
-
@dataclass(slots=True)
|
|
60
|
-
class Station: # pylint: disable=too-many-instance-attributes
|
|
61
|
-
id: int
|
|
62
|
-
system_id: int
|
|
63
|
-
name: str
|
|
64
|
-
distance: float
|
|
65
|
-
max_pad_size: str
|
|
66
|
-
market: str # should be Optional[bool]
|
|
67
|
-
black_market: str # should be Optional[bool]
|
|
68
|
-
shipyard: str # should be Optional[bool]
|
|
69
|
-
outfitting: str # should be Optional[bool]
|
|
70
|
-
rearm: str # should be Optional[bool]
|
|
71
|
-
refuel: str # should be Optional[bool]
|
|
72
|
-
repair: str # should be Optional[bool]
|
|
73
|
-
planetary: str # should be Optional[bool]
|
|
74
|
-
type: int # station type
|
|
75
|
-
modified: float
|
|
76
|
-
|
|
77
|
-
@dataclass(slots=True)
|
|
78
|
-
class Ship:
|
|
79
|
-
id: int
|
|
80
|
-
name: str
|
|
81
|
-
modified: float
|
|
82
|
-
|
|
83
|
-
@dataclass(slots=True)
|
|
84
|
-
class Module:
|
|
85
|
-
id: int
|
|
86
|
-
name: str
|
|
87
|
-
cls: int
|
|
88
|
-
rating: str
|
|
89
|
-
ship: str
|
|
90
|
-
modified: float
|
|
91
|
-
|
|
92
|
-
@dataclass(slots=True)
|
|
93
|
-
class Commodity:
|
|
94
|
-
id: int
|
|
95
|
-
name: str
|
|
96
|
-
category: str
|
|
97
|
-
demand: int
|
|
98
|
-
supply: int
|
|
99
|
-
sell: int
|
|
100
|
-
buy: int
|
|
101
|
-
modified: float
|
|
102
|
-
|
|
103
|
-
else:
|
|
104
|
-
System = namedtuple('System', 'id,name,pos_x,pos_y,pos_z,modified')
|
|
105
|
-
Station = namedtuple('Station',
|
|
106
|
-
'id,system_id,name,distance,max_pad_size,'
|
|
107
|
-
'market,black_market,shipyard,outfitting,rearm,refuel,repair,planetary,type,modified')
|
|
108
|
-
Ship = namedtuple('Ship', 'id,name,modified')
|
|
109
|
-
Module = namedtuple('Module', 'id,name,cls,rating,ship,modified')
|
|
110
|
-
Commodity = namedtuple('Commodity', 'id,name,category,demand,supply,sell,buy,modified')
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
class Timing:
|
|
114
|
-
""" Helper that provides a context manager for timing code execution. """
|
|
115
|
-
|
|
116
|
-
def __init__(self):
|
|
117
|
-
self.start_ts = None
|
|
118
|
-
self.end_ts = None
|
|
119
|
-
|
|
120
|
-
def __enter__(self):
|
|
121
|
-
self.start_ts = time.perf_counter()
|
|
122
|
-
self.end_ts = None
|
|
123
|
-
return self
|
|
124
|
-
|
|
125
|
-
def __exit__(self, *args):
|
|
126
|
-
self.end_ts = time.perf_counter()
|
|
127
|
-
|
|
128
|
-
@property
|
|
129
|
-
def elapsed(self) -> Optional[float]:
|
|
130
|
-
""" If the timing has finished, calculates the elapsed time. """
|
|
131
|
-
if self.start_ts is None:
|
|
132
|
-
return None
|
|
133
|
-
return (self.end_ts or time.perf_counter()) - self.start_ts
|
|
134
|
-
|
|
135
|
-
@property
|
|
136
|
-
def is_finished(self) -> bool:
|
|
137
|
-
""" True if the timing has finished. """
|
|
138
|
-
return self.end_ts is not None
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
class Progresser:
|
|
142
|
-
""" Encapsulates a potentially transient progress view for a given TradeEnv. """
|
|
143
|
-
def __init__(self, tdenv: 'TradeEnv', title: str, fancy: bool = True, total: Optional[int] = None):
|
|
144
|
-
self.started = time.time()
|
|
145
|
-
self.tdenv = tdenv
|
|
146
|
-
self.progress, self.main_task = None, None
|
|
147
|
-
self.title = title
|
|
148
|
-
self.fancy = fancy
|
|
149
|
-
self.total = total
|
|
150
|
-
self.main_task = None
|
|
151
|
-
if fancy:
|
|
152
|
-
self.progress = Progress(console=self.tdenv.console, transient=True, auto_refresh=True, refresh_per_second=2)
|
|
153
|
-
else:
|
|
154
|
-
self.progress = None
|
|
155
|
-
|
|
156
|
-
def __enter__(self):
|
|
157
|
-
if not self.fancy:
|
|
158
|
-
self.tdenv.uprint(self.title)
|
|
159
|
-
else:
|
|
160
|
-
self.progress.start()
|
|
161
|
-
self.main_task = self.progress.add_task(self.title, start=True, total=self.total)
|
|
162
|
-
return self
|
|
163
|
-
|
|
164
|
-
def __exit__(self, *args):
|
|
165
|
-
self.progress.stop()
|
|
166
|
-
|
|
167
|
-
def update(self, title: str) -> None:
|
|
168
|
-
if self.fancy:
|
|
169
|
-
self.progress.update(self.main_task, description=title)
|
|
170
|
-
else:
|
|
171
|
-
self.tdenv.DEBUG1(title)
|
|
172
|
-
|
|
173
|
-
@contextmanager
|
|
174
|
-
def task(self, title: str, total: Optional[int] = None, parent: Optional[str] = None):
|
|
175
|
-
parent = parent or self.main_task
|
|
176
|
-
if self.fancy:
|
|
177
|
-
task = self.progress.add_task(title, start=True, total=total, parent=parent)
|
|
178
|
-
else:
|
|
179
|
-
self.tdenv.DEBUG0(title)
|
|
180
|
-
task = None
|
|
181
|
-
try:
|
|
182
|
-
yield task
|
|
183
|
-
finally:
|
|
184
|
-
if self.fancy:
|
|
185
|
-
self.progress.remove_task(task)
|
|
186
|
-
if task is not None and parent is not None:
|
|
187
|
-
self.progress.update(parent, advance=1)
|
|
188
|
-
|
|
189
|
-
def bump(self, task, advance: int = 1, description: Optional[str] = None):
|
|
190
|
-
""" Advances the progress of a task by one mark. """
|
|
191
|
-
if self.fancy and task is not None:
|
|
192
|
-
self.progress.update(task, advance=advance, description=description)
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
def get_timings(started: float, system_count: int, total_station_count: int, *, min_count: int = 100) -> tuple[float, str]:
|
|
196
|
-
""" describes how long it is taking to process each system and station """
|
|
197
|
-
elapsed = time.time() - started
|
|
198
|
-
timings = "sys="
|
|
199
|
-
if system_count >= min_count:
|
|
200
|
-
avg = elapsed / float(system_count) * 1000.0
|
|
201
|
-
timings += f"{avg:5.2f}ms"
|
|
202
|
-
else:
|
|
203
|
-
timings += "..."
|
|
204
|
-
timings += ", stn="
|
|
205
|
-
if total_station_count >= min_count:
|
|
206
|
-
avg = elapsed / float(total_station_count) * 1000.0
|
|
207
|
-
timings += f"{avg:5.2f}ms"
|
|
208
|
-
else:
|
|
209
|
-
timings += "..."
|
|
210
|
-
return elapsed, timings
|
|
24
|
+
import json # Used for debug tracing
|
|
25
|
+
import ijson # Used for main stream
|
|
26
|
+
import shutil
|
|
27
|
+
import csv
|
|
28
|
+
from datetime import datetime, timedelta, timezone
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
from typing import Any, Dict, Generator, List, Mapping, Optional, Tuple, Iterable
|
|
31
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# Framework modules
|
|
35
|
+
from .. import plugins, cache, csvexport # provided by project
|
|
36
|
+
|
|
37
|
+
# DB helpers (dialect specifics live here)
|
|
38
|
+
from ..db import utils as db_utils
|
|
39
|
+
from ..db.lifecycle import ensure_fresh_db
|
|
40
|
+
from ..db.locks import station_advisory_lock
|
|
41
|
+
|
|
42
|
+
# SQLAlchemy
|
|
43
|
+
from sqlalchemy import MetaData, Table, select, insert, update, func, and_, or_, text, UniqueConstraint
|
|
44
|
+
from sqlalchemy.engine import Engine
|
|
45
|
+
from sqlalchemy.orm import Session
|
|
46
|
+
|
|
47
|
+
DEFAULT_URL = "https://downloads.spansh.co.uk/galaxy_stations.json"
|
|
211
48
|
|
|
212
49
|
|
|
213
50
|
class ImportPlugin(plugins.ImportPluginBase):
|
|
214
|
-
"""Plugin that downloads data from https://spansh.co.uk/dumps.
|
|
215
51
|
"""
|
|
216
|
-
|
|
52
|
+
Spansh galaxy dump importer:
|
|
53
|
+
- Consumes galaxy_stations.json (local file or remote URL)
|
|
54
|
+
- Updates System, Station, Ship/ShipVendor, Upgrade/UpgradeVendor, Item/StationItem
|
|
55
|
+
- Respects per-service freshness & optional maxage (days)
|
|
56
|
+
- Imports RareItem.csv via cache.processImportFile() AFTER systems/stations exist
|
|
57
|
+
- Exports CSVs (+RareItem) and rebuilds TradeDangerous.prices
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
pluginInfo = {
|
|
61
|
+
"name": "spansh",
|
|
62
|
+
"author": "TD Team",
|
|
63
|
+
"version": "2.1",
|
|
64
|
+
"minimum-tb-version": "1.76",
|
|
65
|
+
"description": "Imports Spansh galaxy dump and refreshes cache artefacts.",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Correct option contract: dict name -> help text
|
|
217
69
|
pluginOptions = {
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
70
|
+
"url": "Remote URL to galaxy_stations.json (default if neither url nor file is given)",
|
|
71
|
+
"file": "Local path to galaxy_stations.json; use '-' to read from stdin",
|
|
72
|
+
"maxage": "Skip service sections older than <days> (float), evaluated per service",
|
|
73
|
+
"pricesonly": "Skip import/exports; regenerate TradeDangerous.prices only (for testing).",
|
|
74
|
+
"force_baseline": "If set, overwrite service blocks to Spansh baseline (from_live=0) and delete any extras.",
|
|
75
|
+
"skip_stationitems": "Skip exporting StationItem.csv (large). Env: TD_SKIP_STATIONITEM_EXPORT=1",
|
|
76
|
+
"progress_compact": "Use shorter one-line import status (or set env TD_PROGRESS_COMPACT=1).",
|
|
77
|
+
# --- EDCD sourcing (hardcoded URLs; can be disabled or overridden) ---
|
|
78
|
+
"no_edcd": "Disable EDCD preloads (categories, FDev tables) and EDCD rares import.",
|
|
79
|
+
"edcd_commodity": "Override URL or local path for EDCD commodity.csv.",
|
|
80
|
+
"edcd_outfitting": "Override URL or local path for EDCD outfitting.csv.",
|
|
81
|
+
"edcd_shipyard": "Override URL or local path for EDCD shipyard.csv.",
|
|
82
|
+
"edcd_rares": "Override URL or local path for EDCD rare_commodity.csv.",
|
|
83
|
+
# --- Extra Debug Options
|
|
84
|
+
"only_system": "Process only the system with this name or id64; still stream the real file.",
|
|
85
|
+
"debug_trace": "Emit compact JSONL decision logs to tmp/spansh_trace.jsonl (1 line per decision).",
|
|
221
86
|
}
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
self.
|
|
245
|
-
self.
|
|
246
|
-
self.
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
self.
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
self.tdenv
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
87
|
+
|
|
88
|
+
# Hardcoded EDCD sources (raw GitHub)
|
|
89
|
+
EDCD_URLS = {
|
|
90
|
+
"commodity": "https://raw.githubusercontent.com/EDCD/FDevIDs/master/commodity.csv",
|
|
91
|
+
"outfitting": "https://raw.githubusercontent.com/EDCD/FDevIDs/master/outfitting.csv",
|
|
92
|
+
"shipyard": "https://raw.githubusercontent.com/EDCD/FDevIDs/master/shipyard.csv",
|
|
93
|
+
"rares": "https://raw.githubusercontent.com/EDCD/FDevIDs/master/rare_commodity.csv",
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# ------------------------------
|
|
97
|
+
# Construction & plumbing
|
|
98
|
+
# ------------------------------
|
|
99
|
+
# ------------------------------
|
|
100
|
+
# Construction & plumbing (REPLACEMENT)
|
|
101
|
+
# ------------------------------
|
|
102
|
+
def __init__(self, tdb, cmdenv):
|
|
103
|
+
super().__init__(tdb, cmdenv)
|
|
104
|
+
self.tdb = tdb
|
|
105
|
+
self.tdenv = cmdenv
|
|
106
|
+
self.session: Optional[Session] = None
|
|
107
|
+
|
|
108
|
+
# Paths (data/tmp) from env/config; fall back defensively
|
|
109
|
+
self.data_dir = Path(getattr(self.tdenv, "dataDir", getattr(self.tdb, "dataDir", "data"))).resolve()
|
|
110
|
+
self.tmp_dir = Path(getattr(self.tdenv, "tmpDir", getattr(self.tdb, "tmpDir", "tmp"))).resolve()
|
|
111
|
+
for p in (self.data_dir, self.tmp_dir):
|
|
112
|
+
try:
|
|
113
|
+
p.mkdir(parents=True, exist_ok=True)
|
|
114
|
+
except Exception as e:
|
|
115
|
+
raise CleanExit(f"Failed to create directory {p}: {e!r}")
|
|
116
|
+
|
|
117
|
+
# Batch size decided AFTER session is opened (see finish())
|
|
118
|
+
self.batch_size: Optional[int] = None
|
|
119
|
+
|
|
120
|
+
# Verbosity gates
|
|
121
|
+
self._is_tty = sys.stderr.isatty() or sys.stdout.isatty()
|
|
122
|
+
self._debug_level = int(getattr(self.tdenv, "debug", 0) or 0) # -v levels
|
|
123
|
+
self._warn_enabled = bool(getattr(self.tdenv, "warn", None)) or (self._debug_level >= 3)
|
|
124
|
+
|
|
125
|
+
# Progress state
|
|
126
|
+
self._last_progress_time = 0.0
|
|
127
|
+
|
|
128
|
+
# Station type mapping (existing helper in this module)
|
|
129
|
+
self._station_type_map = self._build_station_type_map()
|
|
130
|
+
|
|
131
|
+
# Debug trace option
|
|
132
|
+
self.debug_trace = str(self.getOption("debug_trace") or "0").strip().lower() not in ("0", "", "false", "no")
|
|
133
|
+
self._trace_fp = None
|
|
134
|
+
|
|
135
|
+
# --------------------------------------
|
|
136
|
+
# Small tracing helper
|
|
137
|
+
# --------------------------------------
|
|
138
|
+
def _trace(self, **evt) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Lightweight debug tracer. Writes one compact JSON line per call
|
|
141
|
+
into tmp/spansh_trace.jsonl when -O debug_trace=1 is set.
|
|
142
|
+
Has no side-effects on existing logic if disabled.
|
|
143
|
+
"""
|
|
144
|
+
if not getattr(self, "debug_trace", False):
|
|
266
145
|
return
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
146
|
+
try:
|
|
147
|
+
import json
|
|
148
|
+
# lazily open file handle if not yet opened
|
|
149
|
+
if not hasattr(self, "_trace_fp") or self._trace_fp is None:
|
|
150
|
+
tmp = getattr(self, "tmp_dir", Path("tmp"))
|
|
151
|
+
tmp.mkdir(parents=True, exist_ok=True)
|
|
152
|
+
self._trace_fp = (tmp / "spansh_trace.jsonl").open("a", encoding="utf-8")
|
|
153
|
+
|
|
154
|
+
# sanitize datetimes
|
|
155
|
+
for k, v in list(evt.items()):
|
|
156
|
+
if hasattr(v, "isoformat"):
|
|
157
|
+
evt[k] = v.isoformat()
|
|
158
|
+
|
|
159
|
+
self._trace_fp.write(json.dumps(evt, ensure_ascii=False) + "\n")
|
|
160
|
+
self._trace_fp.flush()
|
|
161
|
+
except Exception:
|
|
162
|
+
pass # never break main flow
|
|
163
|
+
|
|
164
|
+
# --- TD shim: seed 'Added' from templates (idempotent) ---
|
|
165
|
+
def _seed_added_from_templates(self, session) -> None:
|
|
166
|
+
"""
|
|
167
|
+
Seed the legacy 'Added' table from the packaged CSV:
|
|
168
|
+
tradedangerous/templates/Added.csv
|
|
169
|
+
|
|
170
|
+
DB-agnostic; uses cache.processImportFile. No reliance on any templatesDir.
|
|
171
|
+
"""
|
|
172
|
+
from importlib.resources import files, as_file
|
|
173
|
+
from tradedangerous.cache import processImportFile
|
|
174
|
+
|
|
175
|
+
# Obtain a Traversable for the packaged resource and materialize to a real path
|
|
176
|
+
res = files("tradedangerous").joinpath("templates", "Added.csv")
|
|
177
|
+
with as_file(res) as csv_path:
|
|
178
|
+
if not csv_path.exists():
|
|
179
|
+
# Graceful failure so schedulers can retry
|
|
180
|
+
raise CleanExit(f"Packaged Added.csv not found: {csv_path}")
|
|
290
181
|
try:
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
self.
|
|
182
|
+
processImportFile(
|
|
183
|
+
tdenv=self.tdenv,
|
|
184
|
+
session=session,
|
|
185
|
+
importPath=csv_path,
|
|
186
|
+
tableName="Added",
|
|
187
|
+
)
|
|
188
|
+
except Exception as e:
|
|
189
|
+
# Keep diagnostics, but avoid hard process exit
|
|
190
|
+
self._warn("Seeding 'Added' from templates failed; continuing without it.")
|
|
191
|
+
self._warn(f"{type(e).__name__}: {e}")
|
|
192
|
+
traceback.print_exc()
|
|
193
|
+
raise CleanExit("Failed to seed 'Added' table from templates.")
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
# --------------------------------------
|
|
197
|
+
# EDCD Import Functions
|
|
198
|
+
# --------------------------------------
|
|
199
|
+
|
|
200
|
+
# ---------- Download from EDCD ----------
|
|
201
|
+
def _acquire_edcd_files(self) -> Dict[str, Optional[Path]]:
|
|
202
|
+
"""
|
|
203
|
+
Download (or resolve) EDCD CSVs to tmp/ with conditional caching.
|
|
204
|
+
Honors -O no_edcd=1 and per-file overrides:
|
|
205
|
+
- edcd_commodity, edcd_outfitting, edcd_shipyard, edcd_rares
|
|
206
|
+
Each override may be a local path or an http(s) URL.
|
|
207
|
+
Returns dict: {commodity,outfitting,shipyard,rares} -> Path or None.
|
|
208
|
+
"""
|
|
209
|
+
def _resolve_one(opt_key: str, default_url: str, basename: str) -> Optional[Path]:
|
|
210
|
+
override = self.getOption(opt_key)
|
|
211
|
+
target = self.tmp_dir / f"edcd_{basename}.csv"
|
|
212
|
+
label = f"EDCD {basename}.csv"
|
|
213
|
+
|
|
214
|
+
# Explicit disable via empty override
|
|
215
|
+
if override is not None and str(override).strip() == "":
|
|
216
|
+
return None
|
|
217
|
+
|
|
218
|
+
# Local path override
|
|
219
|
+
if override and ("://" not in override):
|
|
220
|
+
p = Path(override)
|
|
221
|
+
if not p.exists():
|
|
222
|
+
cwd = getattr(self.tdenv, "cwDir", None)
|
|
223
|
+
if cwd:
|
|
224
|
+
p = Path(cwd, override)
|
|
225
|
+
if p.exists() and p.is_file():
|
|
226
|
+
return p.resolve()
|
|
227
|
+
override = None # fall back to URL
|
|
228
|
+
|
|
229
|
+
# URL (override or default)
|
|
230
|
+
url = override or default_url
|
|
231
|
+
try:
|
|
232
|
+
return self._download_with_cache(url, target, label=label)
|
|
233
|
+
except CleanExit:
|
|
234
|
+
return target if target.exists() else None
|
|
235
|
+
except Exception:
|
|
236
|
+
return target if target.exists() else None
|
|
237
|
+
|
|
238
|
+
if self.getOption("no_edcd"):
|
|
239
|
+
return {"commodity": None, "outfitting": None, "shipyard": None, "rares": None}
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
"commodity": _resolve_one("edcd_commodity", self.EDCD_URLS["commodity"], "commodity"),
|
|
243
|
+
"outfitting": _resolve_one("edcd_outfitting", self.EDCD_URLS["outfitting"], "outfitting"),
|
|
244
|
+
"shipyard": _resolve_one("edcd_shipyard", self.EDCD_URLS["shipyard"], "shipyard"),
|
|
245
|
+
"rares": _resolve_one("edcd_rares", self.EDCD_URLS["rares"], "rare_commodity"),
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# ---------- EDCD: Categories (add-only) ----------
|
|
250
|
+
def _edcd_import_categories_add_only(
|
|
251
|
+
self,
|
|
252
|
+
session: Session,
|
|
253
|
+
tables: Dict[str, Table],
|
|
254
|
+
commodity_csv: Path,
|
|
255
|
+
) -> int:
|
|
256
|
+
"""
|
|
257
|
+
Read EDCD commodity.csv, extract distinct category names, and add any
|
|
258
|
+
missing Category rows. No updates, no deletes. Cross-dialect safe.
|
|
259
|
+
|
|
260
|
+
Returns: number of rows inserted.
|
|
261
|
+
"""
|
|
262
|
+
t_cat = tables["Category"]
|
|
263
|
+
|
|
264
|
+
# Load existing category names (case-insensitive) to avoid duplicates.
|
|
265
|
+
existing_lc = {
|
|
266
|
+
(str(n) or "").strip().lower()
|
|
267
|
+
for (n,) in session.execute(select(t_cat.c.name)).all()
|
|
268
|
+
if n is not None
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Parse the CSV and collect unique category names.
|
|
272
|
+
with open(commodity_csv, "r", encoding="utf-8", newline="") as fh:
|
|
273
|
+
reader = csv.DictReader(fh)
|
|
274
|
+
|
|
275
|
+
# Find the 'category' column (case-insensitive).
|
|
276
|
+
cat_col = None
|
|
277
|
+
for h in (reader.fieldnames or []):
|
|
278
|
+
if h and str(h).strip().lower() == "category":
|
|
279
|
+
cat_col = h
|
|
280
|
+
break
|
|
281
|
+
if cat_col is None:
|
|
282
|
+
raise CleanExit(f"EDCD commodity.csv missing 'category' column: {commodity_csv}")
|
|
283
|
+
|
|
284
|
+
seen_lc: set[str] = set()
|
|
285
|
+
to_add: list[dict] = []
|
|
286
|
+
|
|
287
|
+
for row in reader:
|
|
288
|
+
raw = row.get(cat_col)
|
|
289
|
+
if not raw:
|
|
290
|
+
continue
|
|
291
|
+
name = str(raw).strip()
|
|
292
|
+
if not name:
|
|
293
|
+
continue
|
|
294
|
+
|
|
295
|
+
lk = name.lower()
|
|
296
|
+
if lk in existing_lc or lk in seen_lc:
|
|
297
|
+
continue
|
|
298
|
+
|
|
299
|
+
seen_lc.add(lk)
|
|
300
|
+
to_add.append({"name": name})
|
|
301
|
+
|
|
302
|
+
if not to_add:
|
|
303
|
+
return 0
|
|
304
|
+
|
|
305
|
+
# Cross-dialect safe "add-only": bulk insert the missing names.
|
|
306
|
+
session.execute(insert(t_cat), to_add)
|
|
307
|
+
return len(to_add)
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
# ---------- EDCD: FDev tables (direct load) ----------
|
|
313
|
+
def _edcd_import_table_direct(self, session: Session, table: Table, csv_path: Path) -> int:
|
|
314
|
+
"""
|
|
315
|
+
Upsert CSV rows into a table whose columns match CSV headers.
|
|
316
|
+
Prefers the table's primary key; if absent, falls back to a single-column
|
|
317
|
+
UNIQUE key (e.g. 'id' in FDev tables). Returns approx rows written.
|
|
318
|
+
"""
|
|
319
|
+
# --- choose key columns for upsert ---
|
|
320
|
+
pk_cols = tuple(c.name for c in table.primary_key.columns)
|
|
321
|
+
key_cols: tuple[str, ...] = pk_cols
|
|
322
|
+
|
|
323
|
+
if not key_cols:
|
|
324
|
+
# Common case for EDCD FDev tables: UNIQUE(id) but no PK
|
|
325
|
+
if "id" in table.c:
|
|
326
|
+
key_cols = ("id",)
|
|
327
|
+
else:
|
|
328
|
+
# Try to discover a single-column UNIQUE constraint via reflection
|
|
301
329
|
try:
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
330
|
+
uniq_single = []
|
|
331
|
+
for cons in getattr(table, "constraints", set()):
|
|
332
|
+
if isinstance(cons, UniqueConstraint):
|
|
333
|
+
cols = tuple(col.name for col in cons.columns)
|
|
334
|
+
if len(cols) == 1:
|
|
335
|
+
uniq_single.append(cols[0])
|
|
336
|
+
if uniq_single:
|
|
337
|
+
key_cols = (uniq_single[0],)
|
|
338
|
+
except Exception:
|
|
339
|
+
pass
|
|
340
|
+
|
|
341
|
+
if not key_cols:
|
|
342
|
+
raise CleanExit(f"Table {table.name} has neither a primary key nor a single-column UNIQUE key; cannot upsert from EDCD")
|
|
343
|
+
|
|
344
|
+
# --- read CSV ---
|
|
345
|
+
with open(csv_path, "r", encoding="utf-8", newline="") as fh:
|
|
346
|
+
reader = csv.DictReader(fh)
|
|
347
|
+
cols = [c for c in (reader.fieldnames or []) if c in table.c]
|
|
348
|
+
if not cols:
|
|
349
|
+
return 0
|
|
350
|
+
rows = [{k: row.get(k) for k in cols} for row in reader]
|
|
351
|
+
|
|
352
|
+
if not rows:
|
|
353
|
+
return 0
|
|
354
|
+
|
|
355
|
+
# --- table-specific sanitation (fixes ck_fdo_mount / ck_fdo_guidance) ---
|
|
356
|
+
if table.name == "FDevOutfitting":
|
|
357
|
+
allowed_mount = {"Fixed", "Gimballed", "Turreted"}
|
|
358
|
+
allowed_guid = {"Dumbfire", "Seeker", "Swarm"}
|
|
359
|
+
|
|
360
|
+
def _norm(val, allowed):
|
|
361
|
+
if val is None:
|
|
362
|
+
return None
|
|
363
|
+
s = str(val).strip()
|
|
364
|
+
if not s or s not in allowed:
|
|
365
|
+
return None
|
|
366
|
+
return s
|
|
367
|
+
|
|
368
|
+
for r in rows:
|
|
369
|
+
if "mount" in r:
|
|
370
|
+
r["mount"] = _norm(r["mount"], allowed_mount)
|
|
371
|
+
if "guidance" in r:
|
|
372
|
+
r["guidance"] = _norm(r["guidance"], allowed_guid)
|
|
373
|
+
|
|
374
|
+
# --- perform upsert using chosen key columns ---
|
|
375
|
+
upd_cols = tuple(c for c in cols if c not in key_cols)
|
|
376
|
+
|
|
377
|
+
if db_utils.is_sqlite(session):
|
|
378
|
+
db_utils.sqlite_upsert_simple(session, table, rows=rows, key_cols=key_cols, update_cols=upd_cols)
|
|
379
|
+
return len(rows)
|
|
380
|
+
|
|
381
|
+
if db_utils.is_mysql(session):
|
|
382
|
+
db_utils.mysql_upsert_simple(session, table, rows=rows, key_cols=key_cols, update_cols=upd_cols)
|
|
383
|
+
return len(rows)
|
|
384
|
+
|
|
385
|
+
# Generic backend (read-then-insert/update)
|
|
386
|
+
for r in rows:
|
|
387
|
+
cond = and_(*[getattr(table.c, k) == r[k] for k in key_cols])
|
|
388
|
+
ext = session.execute(select(*[getattr(table.c, k) for k in key_cols]).where(cond)).first()
|
|
389
|
+
if ext is None:
|
|
390
|
+
session.execute(insert(table).values(**r))
|
|
391
|
+
elif upd_cols:
|
|
392
|
+
session.execute(update(table).where(cond).values(**{k: r[k] for k in upd_cols}))
|
|
393
|
+
return len(rows)
|
|
310
394
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
395
|
+
def _edcd_import_fdev_catalogs(self, session: Session, tables: Dict[str, Table], *, outfitting_csv: Path, shipyard_csv: Path) -> Tuple[int, int]:
|
|
396
|
+
u = self._edcd_import_table_direct(session, tables["FDevOutfitting"], outfitting_csv)
|
|
397
|
+
s = self._edcd_import_table_direct(session, tables["FDevShipyard"], shipyard_csv)
|
|
398
|
+
return (u, s)
|
|
399
|
+
|
|
400
|
+
# --------------------------------------
|
|
401
|
+
# Comparison Helpers
|
|
402
|
+
# --------------------------------------
|
|
403
|
+
|
|
404
|
+
def _apply_vendor_block_per_rules(
|
|
405
|
+
self,
|
|
406
|
+
t_vendor: Table,
|
|
407
|
+
station_id: int,
|
|
408
|
+
ids: Iterable[int],
|
|
409
|
+
ts_sp: datetime,
|
|
410
|
+
*,
|
|
411
|
+
id_col: str,
|
|
412
|
+
) -> Tuple[int, int, int]:
|
|
413
|
+
"""
|
|
414
|
+
Per-row rule for ShipVendor / UpgradeVendor:
|
|
415
|
+
- If db.modified > ts_sp: leave row.
|
|
416
|
+
- If db.modified == ts_sp: no-op.
|
|
417
|
+
- If db.modified < ts_sp: set modified = ts_sp.
|
|
418
|
+
Deletions:
|
|
419
|
+
- Remove rows missing in JSON if (db.modified <= ts_sp).
|
|
420
|
+
Returns (insert_count, update_count, delete_count).
|
|
421
|
+
"""
|
|
422
|
+
keep_ids = {int(x) for x in ids if x is not None}
|
|
423
|
+
inserts = updates = deletes = 0
|
|
424
|
+
|
|
425
|
+
# --- INSERT missing (batch) ---
|
|
426
|
+
if keep_ids:
|
|
427
|
+
# Find which of keep_ids are missing
|
|
428
|
+
existing_ids = {
|
|
429
|
+
int(r[0]) for r in self.session.execute(
|
|
430
|
+
select(getattr(t_vendor.c, id_col)).where(
|
|
431
|
+
and_(t_vendor.c.station_id == station_id,
|
|
432
|
+
getattr(t_vendor.c, id_col).in_(keep_ids))
|
|
433
|
+
)
|
|
434
|
+
).all()
|
|
435
|
+
}
|
|
436
|
+
to_insert = keep_ids - existing_ids
|
|
437
|
+
if to_insert:
|
|
438
|
+
self.session.execute(
|
|
439
|
+
insert(t_vendor),
|
|
440
|
+
[{id_col: vid, "station_id": station_id, "modified": ts_sp} for vid in to_insert]
|
|
441
|
+
)
|
|
442
|
+
inserts = len(to_insert)
|
|
443
|
+
|
|
444
|
+
# --- UPDATE only those with modified < ts_sp (batch) ---
|
|
445
|
+
if keep_ids:
|
|
446
|
+
res = self.session.execute(
|
|
447
|
+
update(t_vendor)
|
|
448
|
+
.where(
|
|
449
|
+
and_(
|
|
450
|
+
t_vendor.c.station_id == station_id,
|
|
451
|
+
getattr(t_vendor.c, id_col).in_(keep_ids),
|
|
452
|
+
or_(t_vendor.c.modified == None, t_vendor.c.modified < ts_sp),
|
|
453
|
+
)
|
|
454
|
+
)
|
|
455
|
+
.values(modified=ts_sp)
|
|
456
|
+
)
|
|
457
|
+
# rowcount includes both existing rows (not inserts) whose modified was < ts_sp
|
|
458
|
+
updates = int(res.rowcount or 0)
|
|
459
|
+
|
|
460
|
+
# --- DELETE rows NOT in keep_ids, but only if <= ts_sp (single statement) ---
|
|
461
|
+
res = self.session.execute(
|
|
462
|
+
t_vendor.delete().where(
|
|
463
|
+
and_(
|
|
464
|
+
t_vendor.c.station_id == station_id,
|
|
465
|
+
~getattr(t_vendor.c, id_col).in_(keep_ids) if keep_ids else True,
|
|
466
|
+
or_(t_vendor.c.modified == None, t_vendor.c.modified <= ts_sp),
|
|
467
|
+
)
|
|
468
|
+
)
|
|
469
|
+
)
|
|
470
|
+
deletes = int(res.rowcount or 0)
|
|
471
|
+
|
|
472
|
+
return inserts, updates, deletes
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
def _sync_vendor_block_fast(
|
|
476
|
+
self,
|
|
477
|
+
tables: Dict[str, Table],
|
|
478
|
+
*,
|
|
479
|
+
station_id: int,
|
|
480
|
+
entries: List[Dict[str, Any]],
|
|
481
|
+
ts_sp: Optional[datetime],
|
|
482
|
+
kind: str, # "ship" or "module"
|
|
483
|
+
) -> Tuple[int, int]:
|
|
484
|
+
"""
|
|
485
|
+
Fast, set-based vendor sync for a single station and one service (shipyard/outfitting).
|
|
486
|
+
|
|
487
|
+
Returns: (number_of_inserts_or_updates_on_vendor_links, deletions_count).
|
|
488
|
+
"""
|
|
489
|
+
# Ensure we never write NULL into NOT NULL 'modified' columns.
|
|
490
|
+
ts_eff = (ts_sp or datetime.utcnow().replace(microsecond=0))
|
|
491
|
+
|
|
492
|
+
if kind == "ship":
|
|
493
|
+
t_master = tables["Ship"]
|
|
494
|
+
t_vendor = tables["ShipVendor"]
|
|
495
|
+
id_key = "shipId"
|
|
496
|
+
id_col = "ship_id"
|
|
497
|
+
master_rows = []
|
|
498
|
+
keep_ids: set[int] = set()
|
|
499
|
+
for e in entries:
|
|
500
|
+
if not isinstance(e, dict):
|
|
501
|
+
continue
|
|
502
|
+
ship_id = e.get(id_key)
|
|
503
|
+
name = e.get("name")
|
|
504
|
+
if ship_id is None or name is None:
|
|
505
|
+
continue
|
|
506
|
+
keep_ids.add(int(ship_id))
|
|
507
|
+
master_rows.append({"ship_id": ship_id, "name": name})
|
|
508
|
+
|
|
509
|
+
elif kind == "module":
|
|
510
|
+
t_master = tables["Upgrade"]
|
|
511
|
+
t_vendor = tables["UpgradeVendor"]
|
|
512
|
+
id_key = "moduleId"
|
|
513
|
+
id_col = "upgrade_id"
|
|
514
|
+
master_rows = []
|
|
515
|
+
keep_ids = set()
|
|
516
|
+
for e in entries:
|
|
517
|
+
if not isinstance(e, dict):
|
|
518
|
+
continue
|
|
519
|
+
up_id = e.get(id_key)
|
|
520
|
+
name = e.get("name")
|
|
521
|
+
if up_id is None or name is None:
|
|
522
|
+
continue
|
|
523
|
+
keep_ids.add(int(up_id))
|
|
524
|
+
master_rows.append({
|
|
525
|
+
"upgrade_id": up_id,
|
|
526
|
+
"name": name,
|
|
527
|
+
"class": e.get("class"),
|
|
528
|
+
"rating": e.get("rating"),
|
|
529
|
+
"ship": e.get("ship"),
|
|
530
|
+
})
|
|
531
|
+
else:
|
|
532
|
+
raise CleanExit(f"_sync_vendor_block_fast: unknown kind={kind!r}")
|
|
533
|
+
|
|
534
|
+
# 1) Ensure master rows exist (simple upsert, no timestamp guards).
|
|
535
|
+
if master_rows:
|
|
536
|
+
key_name = list(master_rows[0].keys())[0]
|
|
537
|
+
update_cols = tuple(k for k in master_rows[0].keys() if k != key_name)
|
|
538
|
+
if db_utils.is_sqlite(self.session):
|
|
539
|
+
db_utils.sqlite_upsert_simple(
|
|
540
|
+
self.session, t_master, rows=master_rows,
|
|
541
|
+
key_cols=(key_name,),
|
|
542
|
+
update_cols=update_cols,
|
|
543
|
+
)
|
|
544
|
+
elif db_utils.is_mysql(self.session):
|
|
545
|
+
db_utils.mysql_upsert_simple(
|
|
546
|
+
self.session, t_master, rows=master_rows,
|
|
547
|
+
key_cols=(key_name,),
|
|
548
|
+
update_cols=update_cols,
|
|
549
|
+
)
|
|
550
|
+
else:
|
|
551
|
+
for r in master_rows:
|
|
552
|
+
cond = (getattr(t_master.c, key_name) == r[key_name])
|
|
553
|
+
exists = self.session.execute(select(getattr(t_master.c, key_name)).where(cond)).first()
|
|
554
|
+
if exists is None:
|
|
555
|
+
self.session.execute(insert(t_master).values(**r))
|
|
556
|
+
else:
|
|
557
|
+
upd = {k: v for k, v in r.items() if k != key_name}
|
|
558
|
+
if upd:
|
|
559
|
+
self.session.execute(update(t_master).where(cond).values(**upd))
|
|
560
|
+
|
|
561
|
+
# 2) Link rows with timestamp guard for vendor tables.
|
|
562
|
+
wrote = 0
|
|
563
|
+
delc = 0
|
|
564
|
+
if keep_ids:
|
|
565
|
+
existing = {
|
|
566
|
+
int(r[0]): (r[1] or None)
|
|
567
|
+
for r in self.session.execute(
|
|
568
|
+
select(getattr(t_vendor.c, id_col), t_vendor.c.modified).where(
|
|
569
|
+
and_(t_vendor.c.station_id == station_id, getattr(t_vendor.c, id_col).in_(keep_ids))
|
|
570
|
+
)
|
|
571
|
+
).all()
|
|
572
|
+
}
|
|
573
|
+
to_insert = keep_ids - set(existing.keys())
|
|
574
|
+
to_update = {
|
|
575
|
+
vid for vid, mod in existing.items()
|
|
576
|
+
if (mod is None) or (ts_eff > mod)
|
|
577
|
+
}
|
|
578
|
+
wrote = len(to_insert) + len(to_update)
|
|
579
|
+
|
|
580
|
+
vendor_rows = [{id_col: vid, "station_id": station_id, "modified": ts_eff} for vid in keep_ids]
|
|
581
|
+
if db_utils.is_sqlite(self.session):
|
|
582
|
+
db_utils.sqlite_upsert_modified(
|
|
583
|
+
self.session, t_vendor, rows=vendor_rows,
|
|
584
|
+
key_cols=(id_col, "station_id"),
|
|
585
|
+
modified_col="modified",
|
|
586
|
+
update_cols=(),
|
|
587
|
+
)
|
|
588
|
+
elif db_utils.is_mysql(self.session):
|
|
589
|
+
db_utils.mysql_upsert_modified(
|
|
590
|
+
self.session, t_vendor, rows=vendor_rows,
|
|
591
|
+
key_cols=(id_col, "station_id"),
|
|
592
|
+
modified_col="modified",
|
|
593
|
+
update_cols=(),
|
|
594
|
+
)
|
|
595
|
+
else:
|
|
596
|
+
for r in vendor_rows:
|
|
597
|
+
cond = and_(getattr(t_vendor.c, id_col) == r[id_col], t_vendor.c.station_id == station_id)
|
|
598
|
+
cur = self.session.execute(select(t_vendor.c.modified).where(cond)).first()
|
|
599
|
+
if cur is None:
|
|
600
|
+
self.session.execute(insert(t_vendor).values(**r))
|
|
601
|
+
else:
|
|
602
|
+
mod = cur[0]
|
|
603
|
+
if (mod is None) or (ts_eff > mod):
|
|
604
|
+
self.session.execute(update(t_vendor).where(cond).values(modified=ts_eff))
|
|
605
|
+
|
|
606
|
+
return wrote, delc
|
|
321
607
|
|
|
322
|
-
|
|
323
|
-
|
|
608
|
+
def _cleanup_absent_stations(self, tables: Dict[str, Table], present_station_ids: set[int], json_ts: datetime) -> Tuple[int, int, int]:
|
|
609
|
+
"""
|
|
610
|
+
After streaming, delete baseline rows for stations absent from the JSON
|
|
611
|
+
if the JSON timestamp is >= row.modified. Never delete newer-than-JSON rows.
|
|
612
|
+
Returns (market_del, outfit_del, ship_del) counts.
|
|
613
|
+
"""
|
|
614
|
+
t_si, t_uv, t_sv, t_st = tables["StationItem"], tables["UpgradeVendor"], tables["ShipVendor"], tables["Station"]
|
|
615
|
+
|
|
616
|
+
# All station ids in DB
|
|
617
|
+
all_sids = [int(r[0]) for r in self.session.execute(select(t_st.c.station_id)).all()]
|
|
618
|
+
absent = [sid for sid in all_sids if sid not in present_station_ids]
|
|
619
|
+
if not absent:
|
|
620
|
+
return (0, 0, 0)
|
|
621
|
+
|
|
622
|
+
# Markets: delete baseline rows (from_live=0) with modified <= json_ts
|
|
623
|
+
del_m = self.session.execute(
|
|
624
|
+
t_si.delete().where(
|
|
625
|
+
and_(
|
|
626
|
+
t_si.c.station_id.in_(absent),
|
|
627
|
+
t_si.c.from_live == 0,
|
|
628
|
+
or_(t_si.c.modified == None, t_si.c.modified <= json_ts),
|
|
629
|
+
)
|
|
630
|
+
)
|
|
631
|
+
).rowcount or 0
|
|
632
|
+
|
|
633
|
+
# Vendors: delete rows with modified <= json_ts
|
|
634
|
+
del_u = self.session.execute(
|
|
635
|
+
tables["UpgradeVendor"].delete().where(
|
|
636
|
+
and_(t_uv.c.station_id.in_(absent), or_(t_uv.c.modified == None, t_uv.c.modified <= json_ts))
|
|
637
|
+
)
|
|
638
|
+
).rowcount or 0
|
|
639
|
+
del_s = self.session.execute(
|
|
640
|
+
tables["ShipVendor"].delete().where(
|
|
641
|
+
and_(t_sv.c.station_id.in_(absent), or_(t_sv.c.modified == None, t_sv.c.modified <= json_ts))
|
|
642
|
+
)
|
|
643
|
+
).rowcount or 0
|
|
644
|
+
|
|
645
|
+
return (int(del_m), int(del_u), int(del_s))
|
|
324
646
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
if modified and module.modified <= modified:
|
|
412
|
-
# All modules in a station will have the same modified time,
|
|
413
|
-
# so no need to check the rest if the fist is older.
|
|
414
|
-
if self.tdenv.detail > 2:
|
|
415
|
-
self.print(f' | {fq_station_name:50s} | Skipping older outfitting data')
|
|
416
|
-
break
|
|
417
|
-
|
|
418
|
-
module_entries.append((module.id, station.id, module.modified))
|
|
419
|
-
if module_entries:
|
|
420
|
-
self.executemany("""INSERT OR REPLACE INTO UpgradeVendor (
|
|
421
|
-
upgrade_id, station_id, modified
|
|
422
|
-
) VALUES (
|
|
423
|
-
?, ?, IFNULL(?, CURRENT_TIMESTAMP)
|
|
424
|
-
)""", module_entries, commitable=True)
|
|
425
|
-
module_count += len(module_entries)
|
|
426
|
-
|
|
427
|
-
# Items
|
|
428
|
-
commodity_entries = []
|
|
429
|
-
db_commodity_times = dict(self.execute("SELECT item_id, modified FROM StationItem WHERE station_id = ?", station.id))
|
|
430
|
-
|
|
431
|
-
for commodity in commodities:
|
|
432
|
-
if commodity.id not in self.known_commodities:
|
|
433
|
-
commodity = self.ensure_commodity(commodity)
|
|
434
|
-
|
|
435
|
-
# We're concerned with the market age, not the station age,
|
|
436
|
-
# as they each have their own 'modified' times.
|
|
437
|
-
if age_cutoff and (now - commodity.modified) > age_cutoff:
|
|
438
|
-
if self.tdenv.detail:
|
|
439
|
-
self.print(f' | {fq_station_name:50s} | Skipping market due to age: {now - station.modified}, ts: {station.modified}')
|
|
440
|
-
break
|
|
441
|
-
|
|
442
|
-
db_modified = db_commodity_times.get(commodity.id)
|
|
443
|
-
modified = parse_ts(db_modified) if db_modified else None
|
|
444
|
-
if modified and commodity.modified <= modified:
|
|
445
|
-
# All commodities in a station will have the same modified time,
|
|
446
|
-
# so no need to check the rest if the fist is older.
|
|
447
|
-
if self.tdenv.detail > 2:
|
|
448
|
-
self.print(f' | {fq_station_name:50s} | Skipping older market data')
|
|
449
|
-
break
|
|
450
|
-
commodity_entries.append((station.id, commodity.id, commodity.modified,
|
|
451
|
-
commodity.sell, commodity.demand, -1,
|
|
452
|
-
commodity.buy, commodity.supply, -1, 0))
|
|
453
|
-
if commodity_entries:
|
|
454
|
-
self.executemany("""INSERT OR REPLACE INTO StationItem (
|
|
455
|
-
station_id, item_id, modified,
|
|
456
|
-
demand_price, demand_units, demand_level,
|
|
457
|
-
supply_price, supply_units, supply_level, from_live
|
|
458
|
-
) VALUES (
|
|
459
|
-
?, ?, IFNULL(?, CURRENT_TIMESTAMP),
|
|
460
|
-
?, ?, ?,
|
|
461
|
-
?, ?, ?, ?
|
|
462
|
-
)""", commodity_entries, commitable=True)
|
|
463
|
-
commodity_count += len(commodity_entries)
|
|
464
|
-
# Good time to save data and try to keep the transaction small
|
|
465
|
-
self.commit()
|
|
466
|
-
|
|
467
|
-
if commodity_count or ship_count or module_count:
|
|
468
|
-
station_count += 1
|
|
469
|
-
progress.bump(sta_task)
|
|
470
|
-
|
|
471
|
-
system_count += 1
|
|
472
|
-
if station_count:
|
|
473
|
-
total_station_count += station_count
|
|
474
|
-
total_ship_count += ship_count
|
|
475
|
-
total_module_count += module_count
|
|
476
|
-
total_commodity_count += commodity_count
|
|
477
|
-
if self.tdenv.detail:
|
|
478
|
-
self.print(
|
|
479
|
-
f'{system_count:6d} | {upper_sys:50s} | '
|
|
480
|
-
f'{station_count:3d} st {commodity_count:5d} co '
|
|
481
|
-
f'{ship_count:4d} sh {module_count:4d} mo'
|
|
647
|
+
def _sync_market_block_fast(
|
|
648
|
+
self,
|
|
649
|
+
tables: Dict[str, Table],
|
|
650
|
+
categories: Dict[str, int],
|
|
651
|
+
*,
|
|
652
|
+
station_id: int,
|
|
653
|
+
commodities: List[Dict[str, Any]],
|
|
654
|
+
ts_sp: datetime,
|
|
655
|
+
) -> Tuple[int, int]:
|
|
656
|
+
"""
|
|
657
|
+
Fast, set-based market sync for one station.
|
|
658
|
+
|
|
659
|
+
Returns: (number_of_inserts_or_updates_on_StationItem, deletions_count).
|
|
660
|
+
"""
|
|
661
|
+
t_item, t_si = tables["Item"], tables["StationItem"]
|
|
662
|
+
|
|
663
|
+
item_rows: List[Dict[str, Any]] = []
|
|
664
|
+
link_rows: List[Dict[str, Any]] = []
|
|
665
|
+
keep_ids: set[int] = set()
|
|
666
|
+
|
|
667
|
+
for co in commodities:
|
|
668
|
+
if not isinstance(co, dict):
|
|
669
|
+
continue
|
|
670
|
+
fdev_id = co.get("commodityId")
|
|
671
|
+
name = co.get("name")
|
|
672
|
+
cat_name = co.get("category")
|
|
673
|
+
if fdev_id is None or name is None or cat_name is None:
|
|
674
|
+
continue
|
|
675
|
+
|
|
676
|
+
cat_id = categories.get(str(cat_name).lower())
|
|
677
|
+
if cat_id is None:
|
|
678
|
+
raise CleanExit(f'Unknown commodity category "{cat_name}"')
|
|
679
|
+
|
|
680
|
+
keep_ids.add(int(fdev_id))
|
|
681
|
+
item_rows.append({
|
|
682
|
+
"item_id": fdev_id,
|
|
683
|
+
"name": name,
|
|
684
|
+
"category_id": cat_id,
|
|
685
|
+
"fdev_id": fdev_id,
|
|
686
|
+
"ui_order": 0,
|
|
687
|
+
})
|
|
688
|
+
|
|
689
|
+
demand = co.get("demand")
|
|
690
|
+
supply = co.get("supply")
|
|
691
|
+
buy = co.get("buyPrice")
|
|
692
|
+
sell = co.get("sellPrice")
|
|
693
|
+
|
|
694
|
+
link_rows.append({
|
|
695
|
+
"station_id": station_id,
|
|
696
|
+
"item_id": fdev_id,
|
|
697
|
+
"demand_price": sell,
|
|
698
|
+
"demand_units": demand,
|
|
699
|
+
"demand_level": -1,
|
|
700
|
+
"supply_price": buy,
|
|
701
|
+
"supply_units": supply,
|
|
702
|
+
"supply_level": -1,
|
|
703
|
+
"from_live": 0,
|
|
704
|
+
"modified": ts_sp,
|
|
705
|
+
})
|
|
706
|
+
|
|
707
|
+
# 1) Upsert Items (simple)
|
|
708
|
+
if item_rows:
|
|
709
|
+
if db_utils.is_sqlite(self.session):
|
|
710
|
+
db_utils.sqlite_upsert_simple(
|
|
711
|
+
self.session, t_item, rows=item_rows,
|
|
712
|
+
key_cols=("item_id",),
|
|
713
|
+
update_cols=("name", "category_id", "fdev_id", "ui_order"),
|
|
714
|
+
)
|
|
715
|
+
elif db_utils.is_mysql(self.session):
|
|
716
|
+
db_utils.mysql_upsert_simple(
|
|
717
|
+
self.session, t_item, rows=item_rows,
|
|
718
|
+
key_cols=("item_id",),
|
|
719
|
+
update_cols=("name", "category_id", "fdev_id", "ui_order"),
|
|
720
|
+
)
|
|
721
|
+
else:
|
|
722
|
+
for r in item_rows:
|
|
723
|
+
exists = self.session.execute(
|
|
724
|
+
select(t_item.c.item_id).where(t_item.c.item_id == r["item_id"])
|
|
725
|
+
).first()
|
|
726
|
+
if exists is None:
|
|
727
|
+
self.session.execute(insert(t_item).values(**r))
|
|
728
|
+
else:
|
|
729
|
+
self.session.execute(
|
|
730
|
+
update(t_item).where(t_item.c.item_id == r["item_id"]).values(
|
|
731
|
+
name=r["name"], category_id=r["category_id"], fdev_id=r["fdev_id"], ui_order=r["ui_order"]
|
|
732
|
+
)
|
|
482
733
|
)
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
734
|
+
|
|
735
|
+
# 2) Compute effective inserts/updates for StationItem (pre-check modified), then upsert
|
|
736
|
+
wrote = 0
|
|
737
|
+
if link_rows:
|
|
738
|
+
existing = {
|
|
739
|
+
(int(r[0]), int(r[1])): (r[2] or None)
|
|
740
|
+
for r in self.session.execute(
|
|
741
|
+
select(t_si.c.station_id, t_si.c.item_id, t_si.c.modified).where(
|
|
742
|
+
and_(t_si.c.station_id == station_id, t_si.c.item_id.in_(keep_ids))
|
|
743
|
+
)
|
|
744
|
+
).all()
|
|
745
|
+
}
|
|
746
|
+
to_insert = {
|
|
747
|
+
(station_id, rid) for rid in keep_ids
|
|
748
|
+
if (station_id, rid) not in existing
|
|
749
|
+
}
|
|
750
|
+
to_update = {
|
|
751
|
+
(station_id, rid) for rid, mod in ((rid, existing.get((station_id, rid))) for rid in keep_ids)
|
|
752
|
+
if (mod is None) or (ts_sp is not None and ts_sp > mod)
|
|
753
|
+
}
|
|
754
|
+
wrote = len(to_insert) + len(to_update)
|
|
755
|
+
|
|
756
|
+
if db_utils.is_sqlite(self.session):
|
|
757
|
+
db_utils.sqlite_upsert_modified(
|
|
758
|
+
self.session, t_si, rows=link_rows,
|
|
759
|
+
key_cols=("station_id", "item_id"),
|
|
760
|
+
modified_col="modified",
|
|
761
|
+
update_cols=("demand_price", "demand_units", "demand_level",
|
|
762
|
+
"supply_price", "supply_units", "supply_level", "from_live"),
|
|
763
|
+
)
|
|
764
|
+
elif db_utils.is_mysql(self.session):
|
|
765
|
+
db_utils.mysql_upsert_modified(
|
|
766
|
+
self.session, t_si, rows=link_rows,
|
|
767
|
+
key_cols=("station_id", "item_id"),
|
|
768
|
+
modified_col="modified",
|
|
769
|
+
update_cols=("demand_price", "demand_units", "demand_level",
|
|
770
|
+
"supply_price", "supply_units", "supply_level", "from_live"),
|
|
771
|
+
)
|
|
772
|
+
else:
|
|
773
|
+
for r in link_rows:
|
|
774
|
+
row = self.session.execute(
|
|
775
|
+
select(t_si.c.modified).where(and_(
|
|
776
|
+
t_si.c.station_id == r["station_id"],
|
|
777
|
+
t_si.c.item_id == r["item_id"],
|
|
778
|
+
))
|
|
779
|
+
).first()
|
|
780
|
+
if row is None:
|
|
781
|
+
self.session.execute(insert(t_si).values(**r))
|
|
782
|
+
else:
|
|
783
|
+
dbm = row[0]
|
|
784
|
+
if dbm is None or r["modified"] > dbm:
|
|
785
|
+
self.session.execute(
|
|
786
|
+
update(t_si)
|
|
787
|
+
.where(and_(t_si.c.station_id == r["station_id"], t_si.c.item_id == r["item_id"]))
|
|
788
|
+
.values(**r)
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
# 3) Delete baseline rows missing from JSON, not newer than ts_sp
|
|
792
|
+
delc = 0
|
|
793
|
+
base_where = and_(
|
|
794
|
+
t_si.c.station_id == station_id,
|
|
795
|
+
t_si.c.from_live == 0,
|
|
796
|
+
or_(t_si.c.modified == None, t_si.c.modified <= ts_sp),
|
|
797
|
+
)
|
|
798
|
+
if keep_ids:
|
|
799
|
+
delete_stmt = t_si.delete().where(and_(base_where, ~t_si.c.item_id.in_(keep_ids)))
|
|
800
|
+
else:
|
|
801
|
+
delete_stmt = t_si.delete().where(base_where)
|
|
802
|
+
|
|
803
|
+
res = self.session.execute(delete_stmt)
|
|
804
|
+
try:
|
|
805
|
+
delc = int(res.rowcount or 0)
|
|
806
|
+
except Exception:
|
|
807
|
+
delc = 0
|
|
808
|
+
|
|
809
|
+
return wrote, delc
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
# ------------------------------
|
|
813
|
+
# Lifecycle hooks
|
|
814
|
+
# ------------------------------
|
|
815
|
+
|
|
816
|
+
def run(self) -> bool:
|
|
817
|
+
"""
|
|
818
|
+
Full orchestrator: acquisition → bootstrap → EDCD preload → import → rares → export.
|
|
819
|
+
Returns False to keep default flow suppressed.
|
|
820
|
+
"""
|
|
821
|
+
started = time.time()
|
|
822
|
+
|
|
823
|
+
if self.getOption("pricesonly"):
|
|
824
|
+
try:
|
|
825
|
+
self._print("Regenerating TradeDangerous.prices …")
|
|
826
|
+
cache.regeneratePricesFile(self.tdb, self.tdenv)
|
|
827
|
+
self._print("Prices file generated.")
|
|
828
|
+
except Exception as e:
|
|
829
|
+
self._error(f"Prices regeneration failed: {e!r}")
|
|
830
|
+
return False
|
|
831
|
+
return False
|
|
832
|
+
|
|
833
|
+
# Acquire Spansh JSON
|
|
834
|
+
try:
|
|
835
|
+
source_path = self._acquire_source()
|
|
836
|
+
except CleanExit as ce:
|
|
837
|
+
self._warn(str(ce)); return False
|
|
838
|
+
except Exception as e:
|
|
839
|
+
self._error(f"Acquisition failed: {e!r}"); return False
|
|
840
|
+
|
|
841
|
+
# -------- Bootstrap DB (no cache rebuild here) --------
|
|
842
|
+
try:
|
|
843
|
+
backend = self.tdb.engine.dialect.name.lower()
|
|
844
|
+
data_dir = Path(getattr(self.tdenv, "dataDir", getattr(self.tdb, "dataDir", "data")))
|
|
845
|
+
metadata = getattr(self.tdb, "metadata", None)
|
|
846
|
+
|
|
847
|
+
summary = ensure_fresh_db(
|
|
848
|
+
backend=backend,
|
|
849
|
+
engine=self.tdb.engine,
|
|
850
|
+
data_dir=data_dir,
|
|
851
|
+
metadata=metadata,
|
|
852
|
+
mode="auto",
|
|
853
|
+
tdb=self.tdb,
|
|
854
|
+
tdenv=self.tdenv,
|
|
855
|
+
rebuild=False, # do not run buildCache here
|
|
856
|
+
)
|
|
857
|
+
self._print(
|
|
858
|
+
f"DB bootstrap: action={summary.get('action','kept')} "
|
|
859
|
+
f"reason={summary.get('reason','ok')} backend={summary.get('backend')}"
|
|
860
|
+
)
|
|
861
|
+
|
|
862
|
+
# No valid DB? Create full schema now (SQLite from canonical SQL; MariaDB via ORM)
|
|
863
|
+
if summary.get("action") == "needs_rebuild":
|
|
864
|
+
from tradedangerous.db.lifecycle import reset_db
|
|
865
|
+
db_path = Path(self.tdb.engine.url.database or (data_dir / "TradeDangerous.db")) # SQLite only
|
|
866
|
+
self._print("No valid DB detected — creating full schema…")
|
|
867
|
+
reset_db(self.tdb.engine, db_path=db_path)
|
|
868
|
+
|
|
869
|
+
# Seed 'Added' once on a fresh schema
|
|
870
|
+
self.session = self._open_session()
|
|
871
|
+
self._seed_added_from_templates(self.session)
|
|
872
|
+
self.session.commit()
|
|
873
|
+
self._safe_close_session()
|
|
874
|
+
|
|
875
|
+
except Exception as e:
|
|
876
|
+
self._error(f"Database bootstrap failed: {e!r}")
|
|
877
|
+
return False
|
|
878
|
+
|
|
879
|
+
# -------- Session + batch + reflection --------
|
|
880
|
+
try:
|
|
881
|
+
self.session = self._open_session()
|
|
882
|
+
self.batch_size = self._resolve_batch_size()
|
|
883
|
+
tables = self._reflect_tables(self.session.get_bind())
|
|
884
|
+
except Exception as e:
|
|
885
|
+
self._error(f"Failed to open/reflect DB session: {e!r}")
|
|
886
|
+
return False
|
|
887
|
+
|
|
888
|
+
# -------- EDCD preloads (hardcoded URLs; can be disabled) --------
|
|
889
|
+
edcd = self._acquire_edcd_files()
|
|
890
|
+
|
|
891
|
+
# Categories (add-only) — COMMIT immediately so they persist even if later phases fail.
|
|
892
|
+
try:
|
|
893
|
+
if edcd.get("commodity"):
|
|
894
|
+
added = self._edcd_import_categories_add_only(self.session, tables, edcd["commodity"])
|
|
895
|
+
if added:
|
|
896
|
+
self._print(f"EDCD categories: added {added} new categories")
|
|
897
|
+
self.session.commit()
|
|
898
|
+
except CleanExit as ce:
|
|
899
|
+
self._warn(str(ce)); return False
|
|
900
|
+
except Exception as e:
|
|
901
|
+
self._warn(f"EDCD categories skipped due to error: {e!r}")
|
|
902
|
+
|
|
903
|
+
# FDev catalogs (outfitting, shipyard) — COMMIT immediately as well.
|
|
904
|
+
try:
|
|
905
|
+
if edcd.get("outfitting") and edcd.get("shipyard"):
|
|
906
|
+
u, s = self._edcd_import_fdev_catalogs(
|
|
907
|
+
self.session, tables,
|
|
908
|
+
outfitting_csv=edcd["outfitting"],
|
|
909
|
+
shipyard_csv=edcd["shipyard"],
|
|
910
|
+
)
|
|
911
|
+
if (u + s) > 0:
|
|
912
|
+
self._print(f"EDCD FDev: Outfitting upserts={u:,} Shipyard upserts={s:,}")
|
|
913
|
+
self.session.commit()
|
|
914
|
+
except Exception as e:
|
|
915
|
+
self._warn(f"EDCD FDev catalogs skipped due to error: {e!r}")
|
|
916
|
+
|
|
917
|
+
# Load categories (may have grown) before Spansh import
|
|
918
|
+
try:
|
|
919
|
+
categories = self._load_categories(self.session, tables)
|
|
920
|
+
except Exception as e:
|
|
921
|
+
self._error(f"Failed to load categories: {e!r}")
|
|
922
|
+
return False
|
|
923
|
+
|
|
924
|
+
# -------- Import Spansh JSON --------
|
|
925
|
+
try:
|
|
926
|
+
if self._debug_level < 1:
|
|
927
|
+
self._print("This will take at least several minutes.")
|
|
928
|
+
self._print("You can increase verbosity (-v) to get a sense of progress")
|
|
929
|
+
self._print("Importing spansh data")
|
|
930
|
+
stats = self._import_stream(source_path, categories, tables)
|
|
931
|
+
self._end_live_status()
|
|
932
|
+
|
|
933
|
+
mk_e = stats.get("market_writes", 0) + stats.get("market_stations", 0)
|
|
934
|
+
of_e = stats.get("outfit_writes", 0) + stats.get("outfit_stations", 0)
|
|
935
|
+
sh_e = stats.get("ship_writes", 0) + stats.get("ship_stations", 0)
|
|
936
|
+
self._print(
|
|
937
|
+
f"Import complete — systems: {stats.get('systems',0):,} "
|
|
938
|
+
f"stations: {stats.get('stations',0):,} "
|
|
939
|
+
f"evaluated: markets≈{mk_e:,} outfitters≈{of_e:,} shipyards≈{sh_e:,} "
|
|
940
|
+
f"kept: markets≈{stats.get('market_stations',0):,} outfitters≈{stats.get('outfit_stations',0):,} shipyards≈{stats.get('ship_stations',0):,}"
|
|
495
941
|
)
|
|
942
|
+
except CleanExit as ce:
|
|
943
|
+
self._warn(str(ce)); self._safe_close_session(); return False
|
|
944
|
+
except Exception as e:
|
|
945
|
+
self._error(f"Import failed: {e!r}"); self._safe_close_session(); return False
|
|
946
|
+
|
|
947
|
+
# Enforce Item.ui_order
|
|
948
|
+
try:
|
|
949
|
+
t0 = time.time()
|
|
950
|
+
self._enforce_ui_order(self.session, tables)
|
|
951
|
+
self._print(f"ui_order enforced in {time.time()-t0:.2f}s")
|
|
952
|
+
except Exception as e:
|
|
953
|
+
self._error(f"ui_order enforcement failed: {e!r}")
|
|
954
|
+
self._safe_close_session(); return False
|
|
955
|
+
|
|
956
|
+
# Final commit for import phase
|
|
957
|
+
try:
|
|
958
|
+
self.session.commit()
|
|
959
|
+
except Exception as e:
|
|
960
|
+
self._warn(f"Commit failed at end of import; rolling back. Cause: {e!r}")
|
|
961
|
+
self.session.rollback(); self._safe_close_session(); return False
|
|
962
|
+
|
|
963
|
+
self._safe_close_session()
|
|
964
|
+
|
|
965
|
+
# -------- Rares (prefer EDCD; fallback to template) --------
|
|
966
|
+
try:
|
|
967
|
+
t0 = time.time()
|
|
968
|
+
if edcd.get("rares"):
|
|
969
|
+
self._import_rareitems_edcd(edcd["rares"])
|
|
970
|
+
else:
|
|
971
|
+
self._import_rareitems()
|
|
972
|
+
self._print(f"Rares imported in {time.time()-t0:.2f}s")
|
|
973
|
+
except CleanExit as ce:
|
|
974
|
+
self._warn(str(ce)); return False
|
|
975
|
+
except Exception as e:
|
|
976
|
+
self._error(f"RareItem import failed: {e!r}"); return False
|
|
977
|
+
|
|
978
|
+
# -------- Export (uses your parallel exporter already present) --------
|
|
979
|
+
try:
|
|
980
|
+
self._export_and_mirror() # timing + final print handled inside
|
|
981
|
+
except Exception as e:
|
|
982
|
+
self._error(f"Export failed: {e!r}"); return False
|
|
983
|
+
|
|
984
|
+
elapsed = self._format_hms(time.time() - started)
|
|
985
|
+
self._print(f"{elapsed} Done")
|
|
986
|
+
return False
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
|
|
990
|
+
def finish(self) -> bool:
|
|
991
|
+
"""No-op: handled in run(); finish() won’t be called."""
|
|
992
|
+
return True
|
|
496
993
|
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
994
|
+
# ------------------------------
|
|
995
|
+
# Acquisition (url/file/stdin)
|
|
996
|
+
# ------------------------------
|
|
997
|
+
|
|
998
|
+
def _acquire_source(self) -> Path:
|
|
999
|
+
"""Return a readable filesystem path to the JSON source (tmp/)."""
|
|
1000
|
+
url = self.getOption("url")
|
|
1001
|
+
file_ = self.getOption("file")
|
|
1002
|
+
cache_path = self.tmp_dir / "galaxy_stations.json"
|
|
1003
|
+
|
|
1004
|
+
if file_:
|
|
1005
|
+
if file_ == "-":
|
|
1006
|
+
self._print("Reading Spansh dump from stdin …")
|
|
1007
|
+
self._write_stream_to_file(sys.stdin.buffer, cache_path)
|
|
1008
|
+
return cache_path
|
|
1009
|
+
src = Path(file_)
|
|
1010
|
+
if not src.exists() or not src.is_file():
|
|
1011
|
+
raise CleanExit(f"Local file not found: {src}")
|
|
1012
|
+
return src.resolve()
|
|
1013
|
+
|
|
1014
|
+
if not url:
|
|
1015
|
+
url = DEFAULT_URL
|
|
1016
|
+
|
|
1017
|
+
# Pass a friendly label so progress says “Spansh dump”
|
|
1018
|
+
return self._download_with_cache(url, cache_path, label="Spansh dump")
|
|
1019
|
+
|
|
1020
|
+
def _download_with_cache(self, url: str, cache_path: Path, *, label: str = "download") -> Path:
|
|
1021
|
+
"""Conditional download with HEAD Last-Modified and atomic .part."""
|
|
1022
|
+
import urllib.request
|
|
1023
|
+
from email.utils import parsedate_to_datetime
|
|
1024
|
+
|
|
1025
|
+
remote_lm: Optional[datetime] = None
|
|
1026
|
+
try:
|
|
1027
|
+
req = urllib.request.Request(url, method="HEAD")
|
|
1028
|
+
with urllib.request.urlopen(req, timeout=30) as resp:
|
|
1029
|
+
lm_header = resp.headers.get("Last-Modified")
|
|
1030
|
+
if lm_header:
|
|
1031
|
+
try:
|
|
1032
|
+
remote_lm = parsedate_to_datetime(lm_header).astimezone(timezone.utc).replace(tzinfo=None)
|
|
1033
|
+
except Exception:
|
|
1034
|
+
remote_lm = None
|
|
1035
|
+
except Exception:
|
|
1036
|
+
pass
|
|
1037
|
+
|
|
1038
|
+
if cache_path.exists() and remote_lm:
|
|
1039
|
+
local_mtime = datetime.fromtimestamp(cache_path.stat().st_mtime, tz=timezone.utc).replace(tzinfo=None)
|
|
1040
|
+
if local_mtime >= remote_lm:
|
|
1041
|
+
self._print(f"Remote not newer; using cached {label}")
|
|
1042
|
+
return cache_path
|
|
1043
|
+
|
|
1044
|
+
self._print(f"Downloading {label} from {url} …")
|
|
1045
|
+
part = cache_path.with_suffix(cache_path.suffix + ".part")
|
|
1046
|
+
if part.exists():
|
|
1047
|
+
try:
|
|
1048
|
+
part.unlink()
|
|
1049
|
+
except Exception:
|
|
1050
|
+
pass
|
|
1051
|
+
|
|
1052
|
+
req = urllib.request.Request(url, method="GET")
|
|
1053
|
+
connect_timeout = 30
|
|
1054
|
+
chunk = 8 * 1024 * 1024 # 8 MiB
|
|
1055
|
+
|
|
1056
|
+
try:
|
|
1057
|
+
with urllib.request.urlopen(req, timeout=connect_timeout) as resp, open(part, "wb") as fh:
|
|
1058
|
+
total_hdr = resp.headers.get("Content-Length")
|
|
1059
|
+
total = int(total_hdr) if total_hdr and total_hdr.isdigit() else None
|
|
1060
|
+
downloaded = 0
|
|
1061
|
+
start = time.time()
|
|
1062
|
+
|
|
1063
|
+
while True:
|
|
1064
|
+
data = resp.read(chunk)
|
|
1065
|
+
if not data:
|
|
1066
|
+
break
|
|
1067
|
+
fh.write(data)
|
|
1068
|
+
downloaded += len(data)
|
|
1069
|
+
self._download_progress(downloaded, total, start, label=label)
|
|
1070
|
+
|
|
1071
|
+
part.replace(cache_path)
|
|
1072
|
+
|
|
1073
|
+
# Set mtime to Last-Modified if present on GET
|
|
1074
|
+
lm_header = None
|
|
1075
|
+
try:
|
|
1076
|
+
with urllib.request.urlopen(urllib.request.Request(url, method="HEAD"), timeout=10) as head2:
|
|
1077
|
+
lm_header = head2.headers.get("Last-Modified")
|
|
1078
|
+
except Exception:
|
|
1079
|
+
pass
|
|
1080
|
+
if lm_header:
|
|
1081
|
+
try:
|
|
1082
|
+
got_lm = parsedate_to_datetime(lm_header).astimezone(timezone.utc).replace(tzinfo=None)
|
|
1083
|
+
ts = got_lm.replace(tzinfo=timezone.utc).timestamp()
|
|
1084
|
+
os.utime(cache_path, (ts, ts))
|
|
1085
|
+
except Exception:
|
|
1086
|
+
pass
|
|
1087
|
+
|
|
1088
|
+
except Exception as e:
|
|
1089
|
+
try:
|
|
1090
|
+
if part.exists():
|
|
1091
|
+
part.unlink()
|
|
1092
|
+
except Exception:
|
|
1093
|
+
pass
|
|
1094
|
+
raise CleanExit(f"Download failed or timed out for {label}; skipping run ({e!r})")
|
|
1095
|
+
|
|
1096
|
+
self._print(f'Download complete: {label} → "{cache_path}"')
|
|
1097
|
+
return cache_path
|
|
506
1098
|
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
attempts = 5
|
|
525
|
-
while True:
|
|
1099
|
+
def _download_progress(self, downloaded: int, total: Optional[int], start_ts: float, *, label: str = "download") -> None:
|
|
1100
|
+
now = time.time()
|
|
1101
|
+
if now - self._last_progress_time < 0.5 and self._debug_level < 1:
|
|
1102
|
+
return
|
|
1103
|
+
self._last_progress_time = now
|
|
1104
|
+
|
|
1105
|
+
rate = downloaded / max(now - start_ts, 1e-9)
|
|
1106
|
+
if total:
|
|
1107
|
+
pct = (downloaded / total) * 100.0
|
|
1108
|
+
msg = f"{label}: {self._fmt_bytes(downloaded)} / {self._fmt_bytes(total)} ({pct:5.1f}%) {self._fmt_bytes(rate)}/s"
|
|
1109
|
+
else:
|
|
1110
|
+
msg = f"{label}: {self._fmt_bytes(downloaded)} read {self._fmt_bytes(rate)}/s"
|
|
1111
|
+
self._live_status(msg)
|
|
1112
|
+
|
|
1113
|
+
def _write_stream_to_file(self, stream: io.BufferedReader, dest: Path) -> None:
|
|
1114
|
+
part = dest.with_suffix(dest.suffix + ".part")
|
|
1115
|
+
if part.exists():
|
|
526
1116
|
try:
|
|
527
|
-
|
|
528
|
-
except
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
attempts = 5
|
|
544
|
-
while True:
|
|
1117
|
+
part.unlink()
|
|
1118
|
+
except Exception:
|
|
1119
|
+
pass
|
|
1120
|
+
written = 0
|
|
1121
|
+
start = time.time()
|
|
1122
|
+
try:
|
|
1123
|
+
with open(part, "wb") as fh:
|
|
1124
|
+
while True:
|
|
1125
|
+
buf = stream.read(8 * 1024 * 1024)
|
|
1126
|
+
if not buf:
|
|
1127
|
+
break
|
|
1128
|
+
fh.write(buf)
|
|
1129
|
+
written += len(buf)
|
|
1130
|
+
self._download_progress(written, None, start)
|
|
1131
|
+
part.replace(dest)
|
|
1132
|
+
except Exception as e:
|
|
545
1133
|
try:
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
"""
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
self.
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
except Exception
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
"""
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
self.
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
""
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
1134
|
+
if part.exists():
|
|
1135
|
+
part.unlink()
|
|
1136
|
+
except Exception:
|
|
1137
|
+
pass
|
|
1138
|
+
raise CleanExit(f"Failed to read stdin into tmp file: {e!r})")
|
|
1139
|
+
|
|
1140
|
+
# ------------------------------
|
|
1141
|
+
# DB session / reflection
|
|
1142
|
+
# ------------------------------
|
|
1143
|
+
def _open_session(self) -> Session:
|
|
1144
|
+
"""
|
|
1145
|
+
Create a DB session and apply per-connection bulk settings.
|
|
1146
|
+
"""
|
|
1147
|
+
if hasattr(self.tdb, "Session") and callable(self.tdb.Session):
|
|
1148
|
+
sess = self.tdb.Session()
|
|
1149
|
+
elif hasattr(db_utils, "get_session"):
|
|
1150
|
+
sess = db_utils.get_session(self.tdb.engine)
|
|
1151
|
+
else:
|
|
1152
|
+
raise RuntimeError("No Session factory available")
|
|
1153
|
+
|
|
1154
|
+
# SQLite pragmas (non-fatal)
|
|
1155
|
+
try:
|
|
1156
|
+
if db_utils.is_sqlite(sess):
|
|
1157
|
+
db_utils.sqlite_set_bulk_pragmas(sess)
|
|
1158
|
+
except Exception:
|
|
1159
|
+
pass
|
|
1160
|
+
|
|
1161
|
+
# MySQL/MariaDB session tuning (non-fatal)
|
|
1162
|
+
try:
|
|
1163
|
+
if db_utils.is_mysql(sess):
|
|
1164
|
+
db_utils.mysql_set_bulk_session(sess)
|
|
1165
|
+
except Exception:
|
|
1166
|
+
pass
|
|
1167
|
+
|
|
1168
|
+
return sess
|
|
1169
|
+
|
|
1170
|
+
def _reflect_tables(self, engine: Engine) -> Dict[str, Table]:
|
|
1171
|
+
meta = MetaData()
|
|
1172
|
+
names = [
|
|
1173
|
+
"System", "Station", "Item", "Category", "StationItem",
|
|
1174
|
+
"Ship", "ShipVendor", "Upgrade", "UpgradeVendor",
|
|
1175
|
+
"FDevOutfitting", "FDevShipyard", "RareItem",
|
|
1176
|
+
]
|
|
1177
|
+
return {n: Table(n, meta, autoload_with=engine) for n in names}
|
|
1178
|
+
|
|
1179
|
+
# ------------------------------
|
|
1180
|
+
# Import (streaming JSON → upserts)
|
|
1181
|
+
# ------------------------------
|
|
1182
|
+
def _import_stream(self, source_path: Path, categories: Dict[str, int], tables: Dict[str, Table]) -> Dict[str, int]:
|
|
1183
|
+
"""
|
|
1184
|
+
Streaming importer with service-level maxage gating (FK-safe), using per-row rules.
|
|
1185
|
+
|
|
1186
|
+
FIXES:
|
|
1187
|
+
- Batch commits now honor utils.get_import_batch_size() across *all* parent/child ops.
|
|
1188
|
+
- System/Station increments are counted in stats and batch_ops.
|
|
1189
|
+
- Commit checks occur before each station is processed (outside advisory lock scope),
|
|
1190
|
+
reducing long transactions and making Ctrl-C loss less likely.
|
|
1191
|
+
"""
|
|
1192
|
+
batch_ops = 0
|
|
1193
|
+
stats = {
|
|
1194
|
+
"systems": 0, "stations": 0,
|
|
1195
|
+
"market_stations": 0, "outfit_stations": 0, "ship_stations": 0,
|
|
1196
|
+
"market_writes": 0, "outfit_writes": 0, "ship_writes": 0,
|
|
1197
|
+
"commodities": 0,
|
|
1198
|
+
}
|
|
1199
|
+
|
|
1200
|
+
# NEW: initialize parse metrics for _progress_line(); iterator keeps these updated
|
|
1201
|
+
self._parse_bytes = 0
|
|
1202
|
+
self._parse_rate = 0.0
|
|
1203
|
+
|
|
1204
|
+
maxage_days = float(self.getOption("maxage")) if self.getOption("maxage") else None
|
|
1205
|
+
maxage_td = timedelta(days=maxage_days) if maxage_days is not None else None
|
|
1206
|
+
now_utc = datetime.utcnow()
|
|
1207
|
+
|
|
1208
|
+
try:
|
|
1209
|
+
json_ts = datetime.fromtimestamp(os.path.getmtime(source_path), tz=timezone.utc).replace(tzinfo=None)
|
|
1210
|
+
except Exception:
|
|
1211
|
+
json_ts = datetime.utcfromtimestamp(0)
|
|
1212
|
+
|
|
1213
|
+
seen_station_ids: set[int] = set()
|
|
1214
|
+
force_baseline = bool(self.getOption("force_baseline"))
|
|
1215
|
+
|
|
1216
|
+
def recent(ts: Optional[datetime]) -> bool:
|
|
1217
|
+
if ts is None:
|
|
1218
|
+
return False if maxage_td is not None else True
|
|
1219
|
+
if maxage_td is None:
|
|
1220
|
+
return True
|
|
1221
|
+
return (now_utc - ts) <= maxage_td
|
|
1222
|
+
|
|
1223
|
+
def svc_ts(st: Dict[str, Any], key: str) -> Optional[datetime]:
|
|
1224
|
+
obj = st.get(key) or {}
|
|
1225
|
+
if not isinstance(obj, dict):
|
|
1226
|
+
return None
|
|
1227
|
+
return self._parse_ts(obj.get("updateTime"))
|
|
1228
|
+
|
|
1229
|
+
with open(source_path, "rb") as fh:
|
|
1230
|
+
for sys_idx, system_obj in enumerate(self._iter_top_level_json_array(fh), 1):
|
|
1231
|
+
sys_id64 = system_obj.get("id64")
|
|
1232
|
+
sys_name = system_obj.get("name")
|
|
1233
|
+
coords = system_obj.get("coords") or {}
|
|
1234
|
+
if sys_id64 is None or sys_name is None or not isinstance(coords, dict):
|
|
1235
|
+
if self._debug_level >= 3:
|
|
1236
|
+
self._warn(f"Skipping malformed system object at index {sys_idx}")
|
|
1237
|
+
continue
|
|
1238
|
+
|
|
1239
|
+
self._trace(phase="system", decision="consider", name=sys_name, id64=sys_id64)
|
|
1240
|
+
|
|
1241
|
+
# Collect stations (top-level + body-embedded)
|
|
1242
|
+
stations: List[Dict[str, Any]] = []
|
|
1243
|
+
if isinstance(system_obj.get("stations"), list):
|
|
1244
|
+
stations.extend(system_obj["stations"])
|
|
1245
|
+
bodies = system_obj.get("bodies") or []
|
|
1246
|
+
if isinstance(bodies, list):
|
|
1247
|
+
for b in bodies:
|
|
1248
|
+
if isinstance(b, dict):
|
|
1249
|
+
stl = b.get("stations")
|
|
1250
|
+
if isinstance(stl, list):
|
|
1251
|
+
stations.extend(stl)
|
|
1252
|
+
|
|
1253
|
+
# --- System upsert ---
|
|
1254
|
+
t_system = tables["System"]
|
|
1255
|
+
x = coords.get("x"); y = coords.get("y"); z = coords.get("z")
|
|
1256
|
+
sys_modified = self._parse_ts(system_obj.get("updateTime"))
|
|
1257
|
+
self._upsert_system(t_system, int(sys_id64), str(sys_name), x, y, z, sys_modified)
|
|
1258
|
+
|
|
1259
|
+
# Count system progress and participate in batching
|
|
1260
|
+
stats["systems"] += 1
|
|
1261
|
+
batch_ops += 1
|
|
1262
|
+
|
|
1263
|
+
imported_station_modifieds: list[datetime] = []
|
|
1264
|
+
|
|
1265
|
+
for st in stations:
|
|
1266
|
+
# Periodic commit BEFORE processing the next station (outside any advisory locks)
|
|
1267
|
+
if (self.batch_size is not None) and (batch_ops >= self.batch_size):
|
|
1268
|
+
try:
|
|
1269
|
+
self.session.commit()
|
|
1270
|
+
batch_ops = 0
|
|
1271
|
+
except Exception as e:
|
|
1272
|
+
self._warn(f"Batch commit failed; rolling back. Cause: {e!r}")
|
|
1273
|
+
self.session.rollback()
|
|
1274
|
+
|
|
1275
|
+
name = st.get("name")
|
|
1276
|
+
sid = st.get("id")
|
|
1277
|
+
if not isinstance(name, str) or sid is None:
|
|
1278
|
+
continue
|
|
1279
|
+
station_id = int(sid)
|
|
1280
|
+
seen_station_ids.add(station_id)
|
|
1281
|
+
stats["stations"] += 1
|
|
1282
|
+
# Count at least one op per station so batching still progresses even if no vendor writes occur
|
|
1283
|
+
batch_ops += 1
|
|
1284
|
+
|
|
1285
|
+
# NEW: drive live progress from here (throttled inside _progress_line)
|
|
1286
|
+
self._progress_line(stats)
|
|
1287
|
+
|
|
1288
|
+
# Flags/timestamps
|
|
1289
|
+
has_market = bool(st.get("hasMarket") or ("market" in st))
|
|
1290
|
+
has_outfit = bool(st.get("hasOutfitting") or ("outfitting" in st))
|
|
1291
|
+
has_ship = bool(st.get("hasShipyard") or ("shipyard" in st))
|
|
1292
|
+
mkt_ts = svc_ts(st, "market")
|
|
1293
|
+
outf_ts = svc_ts(st, "outfitting")
|
|
1294
|
+
ship_ts = svc_ts(st, "shipyard")
|
|
1295
|
+
mkt_fresh = recent(mkt_ts)
|
|
1296
|
+
outf_fresh = recent(outf_ts)
|
|
1297
|
+
ship_fresh = recent(ship_ts)
|
|
1298
|
+
|
|
1299
|
+
# Station upsert (idempotent)
|
|
1300
|
+
t_station = tables["Station"]
|
|
1301
|
+
type_id, planetary = self._map_station_type(st.get("type"))
|
|
1302
|
+
pads = st.get("landingPads") or {}
|
|
1303
|
+
max_pad = self._derive_pad_size(pads)
|
|
1304
|
+
sflags = {
|
|
1305
|
+
"market": "Y" if has_market else "N",
|
|
1306
|
+
"blackmarket": "?" if st.get("hasBlackmarket") is None else ("Y" if st.get("hasBlackmarket") else "N"),
|
|
1307
|
+
"shipyard": "Y" if has_ship else "N",
|
|
1308
|
+
"outfitting": "Y" if has_outfit else "N",
|
|
1309
|
+
"rearm": "?" if st.get("hasRearm") is None else ("Y" if st.get("hasRearm") else "N"),
|
|
1310
|
+
"refuel": "?" if st.get("hasRefuel") is None else ("Y" if st.get("hasRefuel") else "N"),
|
|
1311
|
+
"repair": "?" if st.get("hasRepair") is None else ("Y" if st.get("hasRepair") else "N"),
|
|
1312
|
+
}
|
|
1313
|
+
st_modified = self._parse_ts(st.get("updateTime"))
|
|
1314
|
+
if st_modified:
|
|
1315
|
+
imported_station_modifieds.append(st_modified)
|
|
1316
|
+
|
|
1317
|
+
ls_from_star_val = st.get("distanceToArrival", 0)
|
|
1318
|
+
try:
|
|
1319
|
+
if ls_from_star_val is None:
|
|
1320
|
+
ls_from_star_val = 0
|
|
1321
|
+
else:
|
|
1322
|
+
ls_from_star_val = int(float(ls_from_star_val))
|
|
1323
|
+
if ls_from_star_val < 0:
|
|
1324
|
+
ls_from_star_val = 0
|
|
1325
|
+
except Exception:
|
|
1326
|
+
ls_from_star_val = 0
|
|
1327
|
+
|
|
1328
|
+
self._upsert_station(
|
|
1329
|
+
t_station, station_id=int(station_id), system_id=int(sys_id64), name=name,
|
|
1330
|
+
ls_from_star=ls_from_star_val, max_pad=max_pad,
|
|
1331
|
+
type_id=int(type_id), planetary=planetary, sflags=sflags, modified=st_modified
|
|
1332
|
+
)
|
|
1333
|
+
|
|
1334
|
+
# ----------------------------
|
|
1335
|
+
# Ship vendor
|
|
1336
|
+
# ----------------------------
|
|
1337
|
+
if has_ship and ship_fresh:
|
|
1338
|
+
ships = (st.get("shipyard") or {}).get("ships") or []
|
|
1339
|
+
if isinstance(ships, list) and ships:
|
|
1340
|
+
if force_baseline:
|
|
1341
|
+
wrote, _, delc = self._apply_vendor_block_per_rules(
|
|
1342
|
+
tables["ShipVendor"], station_id, (s.get("shipId") for s in ships if isinstance(s, dict)),
|
|
1343
|
+
ship_ts, id_col="ship_id",
|
|
1344
|
+
)
|
|
1345
|
+
if wrote or delc:
|
|
1346
|
+
stats["ship_writes"] += 1
|
|
1347
|
+
batch_ops += (wrote + delc)
|
|
1348
|
+
stats["ship_stations"] += 1
|
|
1349
|
+
else:
|
|
1350
|
+
wrote, delc = self._sync_vendor_block_fast(
|
|
1351
|
+
tables, station_id=station_id, entries=ships, ts_sp=ship_ts, kind="ship"
|
|
1352
|
+
)
|
|
1353
|
+
if wrote or delc:
|
|
1354
|
+
stats["ship_writes"] += 1
|
|
1355
|
+
batch_ops += (wrote + delc)
|
|
1356
|
+
stats["ship_stations"] += 1
|
|
1357
|
+
else:
|
|
1358
|
+
stats["ship_stations"] += 1
|
|
1359
|
+
|
|
1360
|
+
# ----------------------------
|
|
1361
|
+
# Outfitting vendor
|
|
1362
|
+
# ----------------------------
|
|
1363
|
+
if has_outfit and outf_fresh:
|
|
1364
|
+
modules = (st.get("outfitting") or {}).get("modules") or []
|
|
1365
|
+
if isinstance(modules, list) and modules:
|
|
1366
|
+
if force_baseline:
|
|
1367
|
+
wrote = self._upsert_outfitting(tables, station_id, modules, outf_ts)
|
|
1368
|
+
_, _, delc = self._apply_vendor_block_per_rules(
|
|
1369
|
+
tables["UpgradeVendor"], station_id,
|
|
1370
|
+
(m.get("moduleId") for m in modules if isinstance(m, dict)),
|
|
1371
|
+
outf_ts, id_col="upgrade_id",
|
|
1372
|
+
)
|
|
1373
|
+
if wrote or delc:
|
|
1374
|
+
stats["outfit_writes"] += 1
|
|
1375
|
+
batch_ops += (wrote + delc)
|
|
1376
|
+
stats["outfit_stations"] += 1
|
|
1377
|
+
else:
|
|
1378
|
+
wrote, delc = self._sync_vendor_block_fast(
|
|
1379
|
+
tables, station_id=station_id, entries=modules, ts_sp=outf_ts, kind="module"
|
|
1380
|
+
)
|
|
1381
|
+
if wrote or delc:
|
|
1382
|
+
stats["outfit_writes"] += 1
|
|
1383
|
+
batch_ops += (wrote + delc)
|
|
1384
|
+
stats["outfit_stations"] += 1
|
|
1385
|
+
else:
|
|
1386
|
+
stats["outfit_stations"] += 1
|
|
1387
|
+
|
|
1388
|
+
# ----------------------------
|
|
1389
|
+
# Market (commit check already happened before this station)
|
|
1390
|
+
# ----------------------------
|
|
1391
|
+
if has_market and mkt_fresh:
|
|
1392
|
+
commodities = (st.get("market") or {}).get("commodities") or []
|
|
1393
|
+
if isinstance(commodities, list) and commodities:
|
|
1394
|
+
from ..db.locks import station_advisory_lock
|
|
1395
|
+
# The advisory lock context pins lock + DML to the same connection/txn.
|
|
1396
|
+
with station_advisory_lock(self.session, station_id, timeout_seconds=0.2, max_retries=4) as got:
|
|
1397
|
+
if not got:
|
|
1398
|
+
# Could not acquire; try this station on a later pass
|
|
1399
|
+
continue
|
|
1400
|
+
|
|
1401
|
+
self._trace(phase="market", decision="process",
|
|
1402
|
+
station_id=station_id, commodities=len(commodities))
|
|
1403
|
+
|
|
1404
|
+
if force_baseline:
|
|
1405
|
+
wrote_i, wrote_si = self._upsert_market(
|
|
1406
|
+
tables, categories, station_id, commodities, mkt_ts
|
|
1407
|
+
)
|
|
1408
|
+
# Remove any extras unconditionally (baseline reset)
|
|
1409
|
+
t_si = tables["StationItem"]
|
|
1410
|
+
keep_ids = {
|
|
1411
|
+
int(co.get("commodityId"))
|
|
1412
|
+
for co in commodities
|
|
1413
|
+
if isinstance(co, dict) and co.get("commodityId") is not None
|
|
1414
|
+
}
|
|
1415
|
+
if keep_ids:
|
|
1416
|
+
self.session.execute(
|
|
1417
|
+
t_si.delete().where(
|
|
1418
|
+
and_(t_si.c.station_id == station_id, ~t_si.c.item_id.in_(keep_ids))
|
|
1419
|
+
)
|
|
1420
|
+
)
|
|
1421
|
+
stats["commodities"] += wrote_si
|
|
1422
|
+
if wrote_si or wrote_i:
|
|
1423
|
+
stats["market_writes"] += 1
|
|
1424
|
+
batch_ops += (wrote_i + wrote_si)
|
|
1425
|
+
stats["market_stations"] += 1
|
|
1426
|
+
else:
|
|
1427
|
+
wrote_links, delc = self._sync_market_block_fast(
|
|
1428
|
+
tables, categories,
|
|
1429
|
+
station_id=station_id,
|
|
1430
|
+
commodities=commodities,
|
|
1431
|
+
ts_sp=mkt_ts,
|
|
1432
|
+
)
|
|
1433
|
+
if wrote_links or delc:
|
|
1434
|
+
stats["market_writes"] += 1
|
|
1435
|
+
batch_ops += (wrote_links + delc)
|
|
1436
|
+
stats["market_stations"] += 1
|
|
1437
|
+
else:
|
|
1438
|
+
stats["market_stations"] += 1
|
|
1439
|
+
|
|
1440
|
+
# Baseline absent-station cleanup (global, after full stream)
|
|
1441
|
+
# We only remove baseline content (from_live=0 for markets; vendor links)
|
|
1442
|
+
# and only where modified <= json_ts, so anything newer (e.g. live/ZMQ) is preserved.
|
|
1443
|
+
try:
|
|
1444
|
+
if force_baseline and seen_station_ids:
|
|
1445
|
+
m_del, u_del, s_del = self._cleanup_absent_stations(
|
|
1446
|
+
tables,
|
|
1447
|
+
present_station_ids=seen_station_ids,
|
|
1448
|
+
json_ts=json_ts,
|
|
1449
|
+
)
|
|
1450
|
+
if (m_del + u_del + s_del) > 0 and self._debug_level >= 1:
|
|
1451
|
+
self._print(
|
|
1452
|
+
f"Baseline cleanup: markets={m_del:,} upgrades={u_del:,} ships={s_del:,}"
|
|
1453
|
+
)
|
|
1454
|
+
except Exception as e:
|
|
1455
|
+
self._warn(f"Absent-station cleanup skipped due to error: {e!r}")
|
|
1456
|
+
|
|
1457
|
+
return stats
|
|
1458
|
+
|
|
1459
|
+
|
|
1460
|
+
# ------------------------------
|
|
1461
|
+
# Upsert helpers
|
|
1462
|
+
# ------------------------------
|
|
1463
|
+
def _upsert_system(
|
|
1464
|
+
self, t_system: Table, system_id: int, name: str,
|
|
1465
|
+
x: Optional[float], y: Optional[float], z: Optional[float],
|
|
1466
|
+
modified: Optional[datetime],
|
|
1467
|
+
) -> None:
|
|
1468
|
+
"""
|
|
1469
|
+
Upsert System with timestamp guard.
|
|
1470
|
+
'added' policy (when column exists):
|
|
1471
|
+
- INSERT: set added=20 (EDSM).
|
|
1472
|
+
- UPDATE: do not overwrite, unless existing added IS NULL → set to 20.
|
|
1473
|
+
"""
|
|
1474
|
+
if modified is None:
|
|
1475
|
+
modified = datetime.utcfromtimestamp(0)
|
|
1476
|
+
|
|
1477
|
+
has_added_col = hasattr(t_system.c, "added")
|
|
1478
|
+
|
|
1479
|
+
row = {
|
|
1480
|
+
"system_id": system_id,
|
|
1481
|
+
"name": name,
|
|
1482
|
+
"pos_x": x, "pos_y": y, "pos_z": z,
|
|
1483
|
+
"modified": modified,
|
|
1484
|
+
}
|
|
1485
|
+
if has_added_col:
|
|
1486
|
+
row["added"] = 20 # EDSM on INSERT
|
|
1487
|
+
|
|
1488
|
+
if db_utils.is_sqlite(self.session):
|
|
1489
|
+
db_utils.sqlite_upsert_modified(
|
|
1490
|
+
self.session, t_system,
|
|
1491
|
+
rows=[row],
|
|
1492
|
+
key_cols=("system_id",),
|
|
1493
|
+
modified_col="modified",
|
|
1494
|
+
update_cols=("name", "pos_x", "pos_y", "pos_z"),
|
|
627
1495
|
)
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
1496
|
+
if has_added_col:
|
|
1497
|
+
self.session.execute(
|
|
1498
|
+
update(t_system)
|
|
1499
|
+
.where((t_system.c.system_id == system_id) & (t_system.c.added.is_(None)))
|
|
1500
|
+
.values(added=20)
|
|
1501
|
+
)
|
|
1502
|
+
return
|
|
1503
|
+
|
|
1504
|
+
if db_utils.is_mysql(self.session):
|
|
1505
|
+
db_utils.mysql_upsert_modified(
|
|
1506
|
+
self.session, t_system,
|
|
1507
|
+
rows=[row],
|
|
1508
|
+
key_cols=("system_id",),
|
|
1509
|
+
modified_col="modified",
|
|
1510
|
+
update_cols=("name", "pos_x", "pos_y", "pos_z"),
|
|
636
1511
|
)
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
self.
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
""" Adds a record for a ship, and registers the ship in the known_ships dict. """
|
|
665
|
-
self.execute(
|
|
666
|
-
'''
|
|
667
|
-
INSERT INTO Ship (ship_id, name) VALUES (?, ?)
|
|
668
|
-
''',
|
|
669
|
-
ship.id, ship.name,
|
|
670
|
-
commitable=True,
|
|
671
|
-
)
|
|
672
|
-
self.known_ships[ship.id] = ship.name
|
|
673
|
-
|
|
674
|
-
return ship
|
|
675
|
-
|
|
676
|
-
def ensure_module(self, module: Module):
|
|
677
|
-
""" Adds a record for a module, and registers the module in the known_modules dict. """
|
|
678
|
-
self.execute(
|
|
679
|
-
'''
|
|
680
|
-
INSERT INTO Upgrade (upgrade_id, name, class, rating, ship) VALUES (?, ?, ?, ?, ?)
|
|
681
|
-
''',
|
|
682
|
-
module.id, module.name, module.cls, module.rating, module.ship,
|
|
683
|
-
commitable=True,
|
|
684
|
-
)
|
|
685
|
-
self.known_modules[module.id] = module.name
|
|
686
|
-
|
|
687
|
-
return module
|
|
688
|
-
|
|
689
|
-
def ensure_commodity(self, commodity: Commodity):
|
|
690
|
-
""" Adds a record for a commodity and registers the commodity in the known_commodities dict. """
|
|
691
|
-
self.execute(
|
|
692
|
-
'''
|
|
693
|
-
INSERT INTO Item (item_id, category_id, name, fdev_id)
|
|
694
|
-
VALUES (?, (SELECT category_id FROM Category WHERE upper(name) = ?), ?, ?)
|
|
695
|
-
''',
|
|
696
|
-
commodity.id,
|
|
697
|
-
commodity.category.upper(),
|
|
698
|
-
corrections.correctItem(commodity.name),
|
|
699
|
-
commodity.id,
|
|
700
|
-
commitable=True,
|
|
701
|
-
)
|
|
702
|
-
|
|
703
|
-
# Need to update ui_order
|
|
704
|
-
temp = self.execute("""SELECT name, category_id, fdev_id, ui_order
|
|
705
|
-
FROM Item
|
|
706
|
-
ORDER BY category_id, name
|
|
707
|
-
""")
|
|
708
|
-
cat_id = 0
|
|
709
|
-
ui_order = 1
|
|
710
|
-
self.tdenv.DEBUG0("Updating ui_order data for items.")
|
|
711
|
-
changes = []
|
|
712
|
-
for name, db_cat, fdev_id, db_order in temp:
|
|
713
|
-
if db_cat != cat_id:
|
|
714
|
-
ui_order = 1
|
|
715
|
-
cat_id = db_cat
|
|
716
|
-
else:
|
|
717
|
-
ui_order += 1
|
|
718
|
-
if ui_order != db_order:
|
|
719
|
-
self.tdenv.DEBUG0(f"UI order for {name} ({fdev_id}) needs correction.")
|
|
720
|
-
changes += [(ui_order, fdev_id)]
|
|
721
|
-
|
|
722
|
-
if changes:
|
|
723
|
-
self.executemany(
|
|
724
|
-
"UPDATE Item SET ui_order = ? WHERE fdev_id = ?",
|
|
725
|
-
changes,
|
|
726
|
-
commitable=True
|
|
1512
|
+
if has_added_col:
|
|
1513
|
+
self.session.execute(
|
|
1514
|
+
update(t_system)
|
|
1515
|
+
.where((t_system.c.system_id == system_id) & (t_system.c.added.is_(None)))
|
|
1516
|
+
.values(added=20)
|
|
1517
|
+
)
|
|
1518
|
+
return
|
|
1519
|
+
|
|
1520
|
+
# Generic fallback
|
|
1521
|
+
sel_cols = [t_system.c.modified]
|
|
1522
|
+
if has_added_col:
|
|
1523
|
+
sel_cols.append(t_system.c.added)
|
|
1524
|
+
existing = self.session.execute(
|
|
1525
|
+
select(*sel_cols).where(t_system.c.system_id == system_id)
|
|
1526
|
+
).first()
|
|
1527
|
+
|
|
1528
|
+
if existing is None:
|
|
1529
|
+
self.session.execute(insert(t_system).values(**row))
|
|
1530
|
+
else:
|
|
1531
|
+
db_modified = existing[0]
|
|
1532
|
+
values = {"name": name, "pos_x": x, "pos_y": y, "pos_z": z}
|
|
1533
|
+
if db_modified is None or modified > db_modified:
|
|
1534
|
+
values["modified"] = modified
|
|
1535
|
+
self.session.execute(
|
|
1536
|
+
update(t_system)
|
|
1537
|
+
.where(t_system.c.system_id == system_id)
|
|
1538
|
+
.values(**values)
|
|
727
1539
|
)
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
def
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
1540
|
+
if has_added_col:
|
|
1541
|
+
db_added = existing[1] if len(existing) > 1 else None
|
|
1542
|
+
if db_added is None:
|
|
1543
|
+
self.session.execute(
|
|
1544
|
+
update(t_system)
|
|
1545
|
+
.where((t_system.c.system_id == system_id) & (t_system.c.added.is_(None)))
|
|
1546
|
+
.values(added=20)
|
|
1547
|
+
)
|
|
1548
|
+
|
|
1549
|
+
def _upsert_station(
|
|
1550
|
+
self, t_station: Table, station_id: int, system_id: int, name: str,
|
|
1551
|
+
ls_from_star: Optional[float], max_pad: str, type_id: int, planetary: str,
|
|
1552
|
+
sflags: Dict[str, str], modified: Optional[datetime],
|
|
1553
|
+
) -> None:
|
|
1554
|
+
"""
|
|
1555
|
+
Upsert Station with timestamp guard.
|
|
1556
|
+
"""
|
|
1557
|
+
if modified is None:
|
|
1558
|
+
modified = datetime.utcfromtimestamp(0)
|
|
1559
|
+
|
|
1560
|
+
if db_utils.is_sqlite(self.session):
|
|
1561
|
+
db_utils.sqlite_upsert_modified(
|
|
1562
|
+
self.session, t_station,
|
|
1563
|
+
rows=[{
|
|
1564
|
+
"station_id": station_id,
|
|
1565
|
+
"system_id": system_id,
|
|
1566
|
+
"name": name,
|
|
1567
|
+
"ls_from_star": ls_from_star,
|
|
1568
|
+
"max_pad_size": max_pad,
|
|
1569
|
+
"type_id": type_id,
|
|
1570
|
+
"planetary": planetary,
|
|
1571
|
+
"market": sflags["market"],
|
|
1572
|
+
"blackmarket": sflags["blackmarket"],
|
|
1573
|
+
"shipyard": sflags["shipyard"],
|
|
1574
|
+
"outfitting": sflags["outfitting"],
|
|
1575
|
+
"rearm": sflags["rearm"],
|
|
1576
|
+
"refuel": sflags["refuel"],
|
|
1577
|
+
"repair": sflags["repair"],
|
|
1578
|
+
"modified": modified,
|
|
1579
|
+
}],
|
|
1580
|
+
key_cols=("station_id",),
|
|
1581
|
+
modified_col="modified",
|
|
1582
|
+
update_cols=(
|
|
1583
|
+
"system_id", "name", "ls_from_star", "max_pad_size", "type_id", "planetary",
|
|
1584
|
+
"market", "blackmarket", "shipyard", "outfitting", "rearm", "refuel", "repair",
|
|
1585
|
+
),
|
|
1586
|
+
)
|
|
1587
|
+
return
|
|
1588
|
+
|
|
1589
|
+
if db_utils.is_mysql(self.session):
|
|
1590
|
+
db_utils.mysql_upsert_modified(
|
|
1591
|
+
self.session, t_station,
|
|
1592
|
+
rows=[{
|
|
1593
|
+
"station_id": station_id,
|
|
1594
|
+
"system_id": system_id,
|
|
1595
|
+
"name": name,
|
|
1596
|
+
"ls_from_star": ls_from_star,
|
|
1597
|
+
"max_pad_size": max_pad,
|
|
1598
|
+
"type_id": type_id,
|
|
1599
|
+
"planetary": planetary,
|
|
1600
|
+
"market": sflags["market"],
|
|
1601
|
+
"blackmarket": sflags["blackmarket"],
|
|
1602
|
+
"shipyard": sflags["shipyard"],
|
|
1603
|
+
"outfitting": sflags["outfitting"],
|
|
1604
|
+
"rearm": sflags["rearm"],
|
|
1605
|
+
"refuel": sflags["refuel"],
|
|
1606
|
+
"repair": sflags["repair"],
|
|
1607
|
+
"modified": modified,
|
|
1608
|
+
}],
|
|
1609
|
+
key_cols=("station_id",),
|
|
1610
|
+
modified_col="modified",
|
|
1611
|
+
update_cols=(
|
|
1612
|
+
"system_id", "name", "ls_from_star", "max_pad_size", "type_id", "planetary",
|
|
1613
|
+
"market", "blackmarket", "shipyard", "outfitting", "rearm", "refuel", "repair",
|
|
755
1614
|
),
|
|
756
|
-
|
|
1615
|
+
)
|
|
1616
|
+
return
|
|
1617
|
+
|
|
1618
|
+
# Generic fallback
|
|
1619
|
+
row = self.session.execute(
|
|
1620
|
+
select(t_station.c.system_id, t_station.c.modified)
|
|
1621
|
+
.where(t_station.c.station_id == station_id)
|
|
1622
|
+
).first()
|
|
1623
|
+
|
|
1624
|
+
if row is None:
|
|
1625
|
+
self.session.execute(
|
|
1626
|
+
insert(t_station).values(
|
|
1627
|
+
station_id=station_id,
|
|
1628
|
+
system_id=system_id,
|
|
1629
|
+
name=name,
|
|
1630
|
+
ls_from_star=ls_from_star,
|
|
1631
|
+
max_pad_size=max_pad,
|
|
1632
|
+
type_id=type_id,
|
|
1633
|
+
planetary=planetary,
|
|
1634
|
+
market=sflags["market"],
|
|
1635
|
+
blackmarket=sflags["blackmarket"],
|
|
1636
|
+
shipyard=sflags["shipyard"],
|
|
1637
|
+
outfitting=sflags["outfitting"],
|
|
1638
|
+
rearm=sflags["rearm"],
|
|
1639
|
+
refuel=sflags["refuel"],
|
|
1640
|
+
repair=sflags["repair"],
|
|
1641
|
+
modified=modified,
|
|
1642
|
+
)
|
|
1643
|
+
)
|
|
1644
|
+
else:
|
|
1645
|
+
db_system_id, db_modified = row
|
|
1646
|
+
values = {
|
|
1647
|
+
"name": name,
|
|
1648
|
+
"ls_from_star": ls_from_star,
|
|
1649
|
+
"max_pad_size": max_pad,
|
|
1650
|
+
"type_id": type_id,
|
|
1651
|
+
"planetary": planetary,
|
|
1652
|
+
"market": sflags["market"],
|
|
1653
|
+
"blackmarket": sflags["blackmarket"],
|
|
1654
|
+
"shipyard": sflags["shipyard"],
|
|
1655
|
+
"outfitting": sflags["outfitting"],
|
|
1656
|
+
"rearm": sflags["rearm"],
|
|
1657
|
+
"refuel": sflags["refuel"],
|
|
1658
|
+
"repair": sflags["repair"],
|
|
1659
|
+
}
|
|
1660
|
+
if db_system_id != system_id:
|
|
1661
|
+
values["system_id"] = system_id
|
|
1662
|
+
if db_modified is None or modified > db_modified:
|
|
1663
|
+
values["modified"] = modified
|
|
1664
|
+
|
|
1665
|
+
self.session.execute(
|
|
1666
|
+
update(t_station)
|
|
1667
|
+
.where(t_station.c.station_id == station_id)
|
|
1668
|
+
.values(**values)
|
|
757
1669
|
)
|
|
758
1670
|
|
|
1671
|
+
def _upsert_shipyard(self, tables: Dict[str, Table], station_id: int, ships: List[Dict[str, Any]], ts: datetime) -> int:
|
|
1672
|
+
t_ship, t_vendor = tables["Ship"], tables["ShipVendor"]
|
|
1673
|
+
ship_rows, vendor_rows = [], []
|
|
759
1674
|
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
for target in targets:
|
|
765
|
-
for station_data in target.get('stations', ()):
|
|
766
|
-
services = set(station_data.get('services', ()))
|
|
767
|
-
shipyard = None
|
|
768
|
-
if 'Shipyard' in services:
|
|
769
|
-
shipyard = station_data.get('shipyard', {})
|
|
770
|
-
outfitting = None
|
|
771
|
-
if 'Outfitting' in services:
|
|
772
|
-
outfitting = station_data.get('outfitting', {})
|
|
773
|
-
market = None
|
|
774
|
-
if 'Market' in services:
|
|
775
|
-
market = station_data.get('market', {})
|
|
776
|
-
if not shipyard and not outfitting and not market:
|
|
1675
|
+
for sh in ships:
|
|
1676
|
+
ship_id = sh.get("shipId")
|
|
1677
|
+
name = sh.get("name")
|
|
1678
|
+
if ship_id is None or name is None:
|
|
777
1679
|
continue
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
elif
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
name
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
1680
|
+
ship_rows.append({"ship_id": ship_id, "name": name})
|
|
1681
|
+
vendor_rows.append({"ship_id": ship_id, "station_id": station_id, "modified": ts})
|
|
1682
|
+
|
|
1683
|
+
if ship_rows:
|
|
1684
|
+
if db_utils.is_sqlite(self.session):
|
|
1685
|
+
db_utils.sqlite_upsert_simple(self.session, t_ship, rows=ship_rows, key_cols=("ship_id",), update_cols=("name",))
|
|
1686
|
+
elif db_utils.is_mysql(self.session):
|
|
1687
|
+
db_utils.mysql_upsert_simple(self.session, t_ship, rows=ship_rows, key_cols=("ship_id",), update_cols=("name",))
|
|
1688
|
+
else:
|
|
1689
|
+
for r in ship_rows:
|
|
1690
|
+
exists = self.session.execute(select(t_ship.c.name).where(t_ship.c.ship_id == r["ship_id"])).first()
|
|
1691
|
+
if exists is None:
|
|
1692
|
+
self.session.execute(insert(t_ship).values(**r))
|
|
1693
|
+
elif exists[0] != r["name"]:
|
|
1694
|
+
self.session.execute(update(t_ship).where(t_ship.c.ship_id == r["ship_id"]).values(name=r["name"]))
|
|
1695
|
+
|
|
1696
|
+
wrote = 0
|
|
1697
|
+
if vendor_rows:
|
|
1698
|
+
if db_utils.is_sqlite(self.session):
|
|
1699
|
+
db_utils.sqlite_upsert_modified(self.session, t_vendor, rows=vendor_rows,
|
|
1700
|
+
key_cols=("ship_id", "station_id"), modified_col="modified", update_cols=())
|
|
1701
|
+
wrote = len(vendor_rows)
|
|
1702
|
+
elif db_utils.is_mysql(self.session):
|
|
1703
|
+
db_utils.mysql_upsert_modified(self.session, t_vendor, rows=vendor_rows,
|
|
1704
|
+
key_cols=("ship_id", "station_id"), modified_col="modified", update_cols=())
|
|
1705
|
+
wrote = len(vendor_rows)
|
|
1706
|
+
else:
|
|
1707
|
+
for r in vendor_rows:
|
|
1708
|
+
ven = self.session.execute(
|
|
1709
|
+
select(t_vendor.c.modified).where(and_(t_vendor.c.ship_id == r["ship_id"], t_vendor.c.station_id == r["station_id"]))
|
|
1710
|
+
).first()
|
|
1711
|
+
if ven is None:
|
|
1712
|
+
self.session.execute(insert(t_vendor).values(**r)); wrote += 1
|
|
1713
|
+
else:
|
|
1714
|
+
dbm = ven[0]
|
|
1715
|
+
if dbm is None or r["modified"] > dbm:
|
|
1716
|
+
self.session.execute(
|
|
1717
|
+
update(t_vendor)
|
|
1718
|
+
.where(and_(t_vendor.c.ship_id == r["ship_id"], t_vendor.c.station_id == r["station_id"]))
|
|
1719
|
+
.values(modified=r["modified"])
|
|
1720
|
+
)
|
|
1721
|
+
wrote += 1
|
|
1722
|
+
return wrote
|
|
1723
|
+
|
|
1724
|
+
def _upsert_outfitting(self, tables: Dict[str, Table], station_id: int, modules: List[Dict[str, Any]], ts: datetime) -> int:
|
|
1725
|
+
t_up, t_vendor = tables["Upgrade"], tables["UpgradeVendor"]
|
|
1726
|
+
up_rows, vendor_rows = [], []
|
|
1727
|
+
|
|
1728
|
+
for mo in modules:
|
|
1729
|
+
up_id = mo.get("moduleId")
|
|
1730
|
+
name = mo.get("name")
|
|
1731
|
+
cls = mo.get("class")
|
|
1732
|
+
rating = mo.get("rating")
|
|
1733
|
+
ship = mo.get("ship")
|
|
1734
|
+
if up_id is None or name is None:
|
|
1735
|
+
continue
|
|
1736
|
+
|
|
1737
|
+
up_rows.append({"upgrade_id": up_id, "name": name, "class": cls, "rating": rating, "ship": ship})
|
|
1738
|
+
vendor_rows.append({"upgrade_id": up_id, "station_id": station_id, "modified": ts})
|
|
1739
|
+
|
|
1740
|
+
if up_rows:
|
|
1741
|
+
if db_utils.is_sqlite(self.session):
|
|
1742
|
+
db_utils.sqlite_upsert_simple(self.session, t_up, rows=up_rows, key_cols=("upgrade_id",),
|
|
1743
|
+
update_cols=("name", "class", "rating", "ship"))
|
|
1744
|
+
elif db_utils.is_mysql(self.session):
|
|
1745
|
+
db_utils.mysql_upsert_simple(self.session, t_up, rows=up_rows, key_cols=("upgrade_id",),
|
|
1746
|
+
update_cols=("name", "class", "rating", "ship"))
|
|
1747
|
+
else:
|
|
1748
|
+
for r in up_rows:
|
|
1749
|
+
exists = self.session.execute(select(t_up.c.upgrade_id).where(t_up.c.upgrade_id == r["upgrade_id"])).first()
|
|
1750
|
+
if exists is None:
|
|
1751
|
+
self.session.execute(insert(t_up).values(**r))
|
|
1752
|
+
else:
|
|
1753
|
+
self.session.execute(
|
|
1754
|
+
update(t_up).where(t_up.c.upgrade_id == r["upgrade_id"]).values(
|
|
1755
|
+
name=r["name"], **{"class": r["class"]}, rating=r["rating"], ship=r["ship"]
|
|
1756
|
+
)
|
|
1757
|
+
)
|
|
1758
|
+
|
|
1759
|
+
wrote = 0
|
|
1760
|
+
if vendor_rows:
|
|
1761
|
+
if db_utils.is_sqlite(self.session):
|
|
1762
|
+
db_utils.sqlite_upsert_modified(self.session, t_vendor, rows=vendor_rows,
|
|
1763
|
+
key_cols=("upgrade_id", "station_id"), modified_col="modified", update_cols=())
|
|
1764
|
+
wrote = len(vendor_rows)
|
|
1765
|
+
elif db_utils.is_mysql(self.session):
|
|
1766
|
+
db_utils.mysql_upsert_modified(self.session, t_vendor, rows=vendor_rows,
|
|
1767
|
+
key_cols=("upgrade_id", "station_id"), modified_col="modified", update_cols=())
|
|
1768
|
+
wrote = len(vendor_rows)
|
|
1769
|
+
else:
|
|
1770
|
+
for r in vendor_rows:
|
|
1771
|
+
ven = self.session.execute(
|
|
1772
|
+
select(t_vendor.c.modified).where(and_(t_vendor.c.upgrade_id == r["upgrade_id"], t_vendor.c.station_id == r["station_id"]))
|
|
1773
|
+
).first()
|
|
1774
|
+
if ven is None:
|
|
1775
|
+
self.session.execute(insert(t_vendor).values(**r)); wrote += 1
|
|
1776
|
+
else:
|
|
1777
|
+
dbm = ven[0]
|
|
1778
|
+
if dbm is None or r["modified"] > dbm:
|
|
1779
|
+
self.session.execute(
|
|
1780
|
+
update(t_vendor)
|
|
1781
|
+
.where(and_(t_vendor.c.upgrade_id == r["upgrade_id"], t_vendor.c.station_id == r["station_id"]))
|
|
1782
|
+
.values(modified=r["modified"])
|
|
1783
|
+
)
|
|
1784
|
+
wrote += 1
|
|
1785
|
+
return wrote
|
|
1786
|
+
|
|
1787
|
+
def _upsert_market(
|
|
1788
|
+
self,
|
|
1789
|
+
tables: Dict[str, Table],
|
|
1790
|
+
categories: Dict[str, int],
|
|
1791
|
+
station_id: int,
|
|
1792
|
+
commodities: List[Dict[str, Any]],
|
|
1793
|
+
ts: datetime,
|
|
1794
|
+
) -> Tuple[int, int]:
|
|
1795
|
+
t_item, t_si = tables["Item"], tables["StationItem"]
|
|
1796
|
+
item_rows, link_rows = [], []
|
|
1797
|
+
wrote_items = 0
|
|
1798
|
+
|
|
1799
|
+
for co in commodities:
|
|
1800
|
+
fdev_id = co.get("commodityId")
|
|
1801
|
+
name = co.get("name")
|
|
1802
|
+
cat_name = co.get("category")
|
|
1803
|
+
if fdev_id is None or name is None or cat_name is None:
|
|
1804
|
+
continue
|
|
1805
|
+
|
|
1806
|
+
cat_id = categories.get(str(cat_name).lower())
|
|
1807
|
+
if cat_id is None:
|
|
1808
|
+
raise CleanExit(f'Unknown commodity category "{cat_name}"')
|
|
1809
|
+
|
|
1810
|
+
item_rows.append({"item_id": fdev_id, "name": name, "category_id": cat_id, "fdev_id": fdev_id, "ui_order": 0})
|
|
1811
|
+
|
|
1812
|
+
demand = co.get("demand")
|
|
1813
|
+
supply = co.get("supply")
|
|
1814
|
+
buy = co.get("buyPrice")
|
|
1815
|
+
sell = co.get("sellPrice")
|
|
1816
|
+
|
|
1817
|
+
link_rows.append(dict(
|
|
1818
|
+
station_id=station_id,
|
|
1819
|
+
item_id=fdev_id,
|
|
1820
|
+
demand_price=sell,
|
|
1821
|
+
demand_units=demand,
|
|
1822
|
+
demand_level=-1,
|
|
1823
|
+
supply_price=buy,
|
|
1824
|
+
supply_units=supply,
|
|
1825
|
+
supply_level=-1,
|
|
1826
|
+
from_live=0,
|
|
1827
|
+
modified=ts,
|
|
1828
|
+
))
|
|
1829
|
+
|
|
1830
|
+
if item_rows:
|
|
1831
|
+
if db_utils.is_sqlite(self.session):
|
|
1832
|
+
db_utils.sqlite_upsert_simple(self.session, t_item, rows=item_rows, key_cols=("item_id",),
|
|
1833
|
+
update_cols=("name", "category_id", "fdev_id", "ui_order"))
|
|
1834
|
+
elif db_utils.is_mysql(self.session):
|
|
1835
|
+
db_utils.mysql_upsert_simple(self.session, t_item, rows=item_rows, key_cols=("item_id",),
|
|
1836
|
+
update_cols=("name", "category_id", "fdev_id", "ui_order"))
|
|
1837
|
+
else:
|
|
1838
|
+
for r in item_rows:
|
|
1839
|
+
exists = self.session.execute(
|
|
1840
|
+
select(t_item.c.item_id, t_item.c.name, t_item.c.category_id).where(t_item.c.item_id == r["item_id"])
|
|
1841
|
+
).first()
|
|
1842
|
+
if exists is None:
|
|
1843
|
+
self.session.execute(insert(t_item).values(**r))
|
|
1844
|
+
wrote_items += 1
|
|
1845
|
+
else:
|
|
1846
|
+
_, db_name, db_cat = exists
|
|
1847
|
+
if (db_name != r["name"]) or (db_cat != r["category_id"]):
|
|
1848
|
+
self.session.execute(
|
|
1849
|
+
update(t_item).where(t_item.c.item_id == r["item_id"]).values(
|
|
1850
|
+
name=r["name"], category_id=r["category_id"]
|
|
1851
|
+
)
|
|
1852
|
+
)
|
|
1853
|
+
|
|
1854
|
+
wrote_links = 0
|
|
1855
|
+
if link_rows:
|
|
1856
|
+
if db_utils.is_sqlite(self.session):
|
|
1857
|
+
db_utils.sqlite_upsert_modified(self.session, t_si, rows=link_rows,
|
|
1858
|
+
key_cols=("station_id", "item_id"), modified_col="modified",
|
|
1859
|
+
update_cols=("demand_price", "demand_units", "demand_level",
|
|
1860
|
+
"supply_price", "supply_units", "supply_level", "from_live"))
|
|
1861
|
+
wrote_links = len(link_rows)
|
|
1862
|
+
elif db_utils.is_mysql(self.session):
|
|
1863
|
+
db_utils.mysql_upsert_modified(self.session, t_si, rows=link_rows,
|
|
1864
|
+
key_cols=("station_id", "item_id"), modified_col="modified",
|
|
1865
|
+
update_cols=("demand_price", "demand_units", "demand_level",
|
|
1866
|
+
"supply_price", "supply_units", "supply_level", "from_live"))
|
|
1867
|
+
wrote_links = len(link_rows)
|
|
1868
|
+
else:
|
|
1869
|
+
for r in link_rows:
|
|
1870
|
+
si = self.session.execute(
|
|
1871
|
+
select(t_si.c.modified).where(and_(t_si.c.station_id == r["station_id"], t_si.c.item_id == r["item_id"]))
|
|
1872
|
+
).first()
|
|
1873
|
+
if si is None:
|
|
1874
|
+
self.session.execute(insert(t_si).values(**r)); wrote_links += 1
|
|
1875
|
+
else:
|
|
1876
|
+
dbm = si[0]
|
|
1877
|
+
if dbm is None or r["modified"] > dbm:
|
|
1878
|
+
self.session.execute(
|
|
1879
|
+
update(t_si)
|
|
1880
|
+
.where(and_(t_si.c.station_id == r["station_id"], t_si.c.item_id == r["item_id"]))
|
|
1881
|
+
.values(**r)
|
|
1882
|
+
)
|
|
1883
|
+
wrote_links += 1
|
|
1884
|
+
|
|
1885
|
+
return (wrote_items, wrote_links)
|
|
1886
|
+
|
|
1887
|
+
# ------------------------------
|
|
1888
|
+
# UI ordering
|
|
1889
|
+
# ------------------------------
|
|
1890
|
+
def _enforce_ui_order(self, session: Session, tables: Dict[str, Table]) -> None:
|
|
1891
|
+
t_item, t_cat = tables["Item"], tables["Category"]
|
|
1892
|
+
cats = session.execute(select(t_cat.c.category_id)).all()
|
|
1893
|
+
for (cat_id,) in cats:
|
|
1894
|
+
rows = session.execute(
|
|
1895
|
+
select(t_item.c.item_id, t_item.c.name, t_item.c.ui_order)
|
|
1896
|
+
.where(t_item.c.category_id == cat_id)
|
|
1897
|
+
.order_by(func.lower(t_item.c.name).asc(), t_item.c.name.asc(), t_item.c.item_id.asc())
|
|
1898
|
+
).all()
|
|
1899
|
+
expected = 1
|
|
1900
|
+
for item_id, _name, ui_order in rows:
|
|
1901
|
+
if ui_order != expected:
|
|
1902
|
+
session.execute(update(t_item).where(t_item.c.item_id == item_id).values(ui_order=expected))
|
|
1903
|
+
expected += 1
|
|
1904
|
+
|
|
1905
|
+
# ------------------------------
|
|
1906
|
+
# Rares import (via cache.processImportFile)
|
|
1907
|
+
# ------------------------------
|
|
1908
|
+
def _import_rareitems_edcd(self, rares_csv: Path, commodity_csv: Optional[Path] = None) -> None:
|
|
1909
|
+
"""
|
|
1910
|
+
EDCD rares → TD.RareItem
|
|
1911
|
+
|
|
1912
|
+
Supports CSV shapes:
|
|
1913
|
+
A) name, system, station
|
|
1914
|
+
B) id, symbol, market_id, category, name (FDevIDs canonical)
|
|
1915
|
+
|
|
1916
|
+
Shape B maps: station_id = int(market_id), category by name.
|
|
1917
|
+
Clears RareItem then upserts by UNIQUE(name). Writes a CSV of skipped rows to tmp/.
|
|
1918
|
+
"""
|
|
1919
|
+
|
|
1920
|
+
def _norm(s: Optional[str]) -> str:
|
|
1921
|
+
if s is None: return ""
|
|
1922
|
+
s = s.strip().strip("'").strip('"')
|
|
1923
|
+
s = s.replace("’", "'").replace("‘", "'")
|
|
1924
|
+
s = s.replace("–", "-").replace("—", "-")
|
|
1925
|
+
s = " ".join(s.split())
|
|
1926
|
+
return s.casefold()
|
|
1927
|
+
|
|
1928
|
+
def _kwant(fieldnames, *aliases) -> Optional[str]:
|
|
1929
|
+
if not fieldnames: return None
|
|
1930
|
+
canon = {}
|
|
1931
|
+
for h in fieldnames or []:
|
|
1932
|
+
if not h: continue
|
|
1933
|
+
k = h.strip().lower().replace("_", "").replace(" ", "")
|
|
1934
|
+
canon[k] = h
|
|
1935
|
+
for a in aliases:
|
|
1936
|
+
k = a.strip().lower().replace("_", "").replace(" ", "")
|
|
1937
|
+
if k in canon: return canon[k]
|
|
1938
|
+
return None
|
|
1939
|
+
|
|
1940
|
+
sess = None
|
|
1941
|
+
try:
|
|
1942
|
+
sess = self._open_session()
|
|
1943
|
+
tables = self._reflect_tables(sess.get_bind())
|
|
1944
|
+
t_sys, t_stn, t_cat, t_rare = tables["System"], tables["Station"], tables["Category"], tables["RareItem"]
|
|
1945
|
+
|
|
1946
|
+
# Build lookups for Shape A
|
|
1947
|
+
stn_by_names: Dict[tuple[str, str], int] = {}
|
|
1948
|
+
for sid, sys_name, stn_name in sess.execute(
|
|
1949
|
+
select(t_stn.c.station_id, t_sys.c.name, t_stn.c.name).where(t_stn.c.system_id == t_sys.c.system_id)
|
|
1950
|
+
).all():
|
|
1951
|
+
if sys_name and stn_name:
|
|
1952
|
+
stn_by_names[(_norm(sys_name), _norm(stn_name))] = int(sid)
|
|
1953
|
+
|
|
1954
|
+
# Category name -> id (from DB)
|
|
1955
|
+
cat_id_by_name = {
|
|
1956
|
+
_norm(n): int(cid)
|
|
1957
|
+
for cid, n in sess.execute(select(t_cat.c.category_id, t_cat.c.name)).all()
|
|
1958
|
+
if n is not None
|
|
1959
|
+
}
|
|
1960
|
+
|
|
1961
|
+
kept = skipped = 0
|
|
1962
|
+
skipped_no_station = 0
|
|
1963
|
+
skipped_no_category = 0
|
|
1964
|
+
out_rows: list[dict] = []
|
|
1965
|
+
skipped_rows: list[dict] = [] # <-- record details
|
|
1966
|
+
|
|
1967
|
+
with open(rares_csv, "r", encoding="utf-8", newline="") as fh:
|
|
1968
|
+
reader = csv.DictReader(fh)
|
|
1969
|
+
hdr = [h for h in (reader.fieldnames or []) if h]
|
|
1970
|
+
hdr_canon = [h.lower().replace("_", "").replace(" ", "") for h in hdr]
|
|
1971
|
+
|
|
1972
|
+
has_market_shape = all(x in hdr_canon for x in ["id", "symbol", "marketid", "category", "name"])
|
|
1973
|
+
has_name_shape = all(x in hdr_canon for x in ["name", "system", "station"])
|
|
1974
|
+
|
|
1975
|
+
if not (has_market_shape or has_name_shape):
|
|
1976
|
+
raise CleanExit(
|
|
1977
|
+
"rare_commodity.csv headers not recognized. "
|
|
1978
|
+
f"Seen headers: {', '.join(reader.fieldnames or [])}. File: {rares_csv}"
|
|
1979
|
+
)
|
|
1980
|
+
|
|
1981
|
+
if has_market_shape:
|
|
1982
|
+
# FDevIDs: station_id = int(market_id)
|
|
1983
|
+
k_name = _kwant(reader.fieldnames, "name")
|
|
1984
|
+
k_market = _kwant(reader.fieldnames, "market_id", "marketid")
|
|
1985
|
+
k_cat = _kwant(reader.fieldnames, "category", "categoryname")
|
|
1986
|
+
|
|
1987
|
+
for row in reader:
|
|
1988
|
+
rn_raw = row.get(k_name)
|
|
1989
|
+
mk_raw = row.get(k_market)
|
|
1990
|
+
cat_raw= row.get(k_cat)
|
|
1991
|
+
|
|
1992
|
+
try:
|
|
1993
|
+
station_id = int(mk_raw) if mk_raw is not None else None
|
|
1994
|
+
except (TypeError, ValueError):
|
|
1995
|
+
station_id = None
|
|
1996
|
+
|
|
1997
|
+
# validate station exists
|
|
1998
|
+
if station_id is None or sess.execute(
|
|
1999
|
+
select(t_stn.c.station_id).where(t_stn.c.station_id == station_id)
|
|
2000
|
+
).first() is None:
|
|
2001
|
+
skipped += 1; skipped_no_station += 1
|
|
2002
|
+
skipped_rows.append({"reason":"no_station","name":rn_raw,"market_id":mk_raw,"category":cat_raw})
|
|
2003
|
+
continue
|
|
2004
|
+
|
|
2005
|
+
cid = cat_id_by_name.get(_norm(cat_raw))
|
|
2006
|
+
if cid is None:
|
|
2007
|
+
skipped += 1; skipped_no_category += 1
|
|
2008
|
+
skipped_rows.append({"reason":"no_category","name":rn_raw,"market_id":mk_raw,"category":cat_raw})
|
|
2009
|
+
continue
|
|
2010
|
+
|
|
2011
|
+
out_rows.append({
|
|
2012
|
+
"name": rn_raw,
|
|
2013
|
+
"station_id": station_id,
|
|
2014
|
+
"category_id": cid,
|
|
2015
|
+
"cost": None,
|
|
2016
|
+
"max_allocation": None,
|
|
2017
|
+
})
|
|
2018
|
+
kept += 1
|
|
2019
|
+
|
|
2020
|
+
else:
|
|
2021
|
+
# Legacy/community: need commodity.csv to map product -> category
|
|
2022
|
+
name_to_catid: Dict[str, int] = {}
|
|
2023
|
+
if commodity_csv is None:
|
|
2024
|
+
files = self._acquire_edcd_files()
|
|
2025
|
+
commodity_csv = files.get("commodity")
|
|
2026
|
+
if commodity_csv and Path(commodity_csv).exists():
|
|
2027
|
+
with open(commodity_csv, "r", encoding="utf-8", newline="") as fh2:
|
|
2028
|
+
rd2 = _csv.DictReader(fh2)
|
|
2029
|
+
k2_name = _kwant(rd2.fieldnames, "name","commodity","commodityname","product")
|
|
2030
|
+
k2_cat = _kwant(rd2.fieldnames, "category","categoryname")
|
|
2031
|
+
if k2_name and k2_cat:
|
|
2032
|
+
for r2 in rd2:
|
|
2033
|
+
n = _norm(r2.get(k2_name)); c = _norm(r2.get(k2_cat))
|
|
2034
|
+
if n and c:
|
|
2035
|
+
cid = cat_id_by_name.get(c)
|
|
2036
|
+
if cid is not None:
|
|
2037
|
+
name_to_catid[n] = cid
|
|
2038
|
+
|
|
2039
|
+
k_name = _kwant(reader.fieldnames, "name","commodity","commodityname","product")
|
|
2040
|
+
k_system = _kwant(reader.fieldnames, "system","systemname")
|
|
2041
|
+
k_station = _kwant(reader.fieldnames, "station","stationname")
|
|
2042
|
+
|
|
2043
|
+
for row in reader:
|
|
2044
|
+
rn_raw = row.get(k_name)
|
|
2045
|
+
sys_raw = row.get(k_system)
|
|
2046
|
+
stn_raw = row.get(k_station)
|
|
2047
|
+
rn = _norm(rn_raw); sysn = _norm(sys_raw); stnn = _norm(stn_raw)
|
|
2048
|
+
|
|
2049
|
+
if not rn or not sysn or not stnn:
|
|
2050
|
+
skipped += 1
|
|
2051
|
+
skipped_rows.append({"reason":"missing_fields","name":rn_raw,"system":sys_raw,"station":stn_raw})
|
|
2052
|
+
continue
|
|
2053
|
+
|
|
2054
|
+
station_id = stn_by_names.get((sysn, stnn))
|
|
2055
|
+
if station_id is None:
|
|
2056
|
+
skipped += 1; skipped_no_station += 1
|
|
2057
|
+
skipped_rows.append({"reason":"no_station","name":rn_raw,"system":sys_raw,"station":stn_raw})
|
|
2058
|
+
continue
|
|
2059
|
+
|
|
2060
|
+
cid = name_to_catid.get(rn)
|
|
2061
|
+
if cid is None:
|
|
2062
|
+
skipped += 1; skipped_no_category += 1
|
|
2063
|
+
skipped_rows.append({"reason":"no_category","name":rn_raw,"system":sys_raw,"station":stn_raw})
|
|
2064
|
+
continue
|
|
2065
|
+
|
|
2066
|
+
out_rows.append({
|
|
2067
|
+
"name": rn_raw,
|
|
2068
|
+
"station_id": station_id,
|
|
2069
|
+
"category_id": cid,
|
|
2070
|
+
"cost": None,
|
|
2071
|
+
"max_allocation": None,
|
|
2072
|
+
})
|
|
2073
|
+
kept += 1
|
|
2074
|
+
|
|
2075
|
+
# Clear → upsert
|
|
2076
|
+
try:
|
|
2077
|
+
sess.execute(text('DELETE FROM "RareItem"'))
|
|
2078
|
+
except Exception:
|
|
2079
|
+
sess.execute(text("DELETE FROM RareItem"))
|
|
2080
|
+
|
|
2081
|
+
if out_rows:
|
|
2082
|
+
if db_utils.is_sqlite(sess):
|
|
2083
|
+
db_utils.sqlite_upsert_simple(
|
|
2084
|
+
sess, t_rare, rows=out_rows, key_cols=("name",),
|
|
2085
|
+
update_cols=tuple(k for k in out_rows[0].keys() if k != "name")
|
|
2086
|
+
)
|
|
2087
|
+
elif db_utils.is_mysql(sess):
|
|
2088
|
+
db_utils.mysql_upsert_simple(
|
|
2089
|
+
sess, t_rare, rows=out_rows, key_cols=("name",),
|
|
2090
|
+
update_cols=tuple(k for k in out_rows[0].keys() if k != "name")
|
|
2091
|
+
)
|
|
2092
|
+
else:
|
|
2093
|
+
for r in out_rows:
|
|
2094
|
+
ex = sess.execute(select(t_rare.c.name).where(t_rare.c.name == r["name"])).first()
|
|
2095
|
+
if ex is None:
|
|
2096
|
+
sess.execute(insert(t_rare).values(**r))
|
|
2097
|
+
else:
|
|
2098
|
+
sess.execute(
|
|
2099
|
+
update(t_rare).where(t_rare.c.name == r["name"])
|
|
2100
|
+
.values({k: r[k] for k in r.keys() if k != "name"})
|
|
2101
|
+
)
|
|
2102
|
+
sess.commit()
|
|
2103
|
+
|
|
2104
|
+
# Write a CSV with skipped details
|
|
2105
|
+
if skipped_rows:
|
|
2106
|
+
outp = self.tmp_dir / "edcd_rares_skipped.csv"
|
|
2107
|
+
keys = sorted({k for r in skipped_rows for k in r.keys()})
|
|
2108
|
+
with open(outp, "w", encoding="utf-8", newline="") as fh:
|
|
2109
|
+
w = csv.DictWriter(fh, fieldnames=keys)
|
|
2110
|
+
w.writeheader(); w.writerows(skipped_rows)
|
|
2111
|
+
self._print(f"EDCD Rares: imported={kept:,} skipped={skipped:,} "
|
|
2112
|
+
f"(no_station={skipped_no_station:,}, no_category={skipped_no_category:,}) "
|
|
2113
|
+
f"→ details: {outp}")
|
|
2114
|
+
else:
|
|
2115
|
+
self._print(f"EDCD Rares: imported={kept:,} skipped={skipped:,} "
|
|
2116
|
+
f"(no_station={skipped_no_station:,}, no_category={skipped_no_category:,})")
|
|
2117
|
+
|
|
2118
|
+
except Exception as e:
|
|
2119
|
+
if sess is not None:
|
|
2120
|
+
try: sess.rollback()
|
|
2121
|
+
except Exception: pass
|
|
2122
|
+
raise CleanExit(f"RareItem import failed: {e!r}")
|
|
2123
|
+
finally:
|
|
2124
|
+
if sess is not None:
|
|
2125
|
+
try: sess.close()
|
|
2126
|
+
except Exception: pass
|
|
2127
|
+
|
|
2128
|
+
|
|
2129
|
+
# ------------------------------
|
|
2130
|
+
# Export / cache refresh
|
|
2131
|
+
# ------------------------------
|
|
2132
|
+
def _export_cache(self) -> None:
|
|
2133
|
+
"""Export CSVs and regenerate TradeDangerous.prices — concurrently, with optional StationItem gating."""
|
|
2134
|
+
|
|
2135
|
+
# Option/env gate for StationItem export (large file)
|
|
2136
|
+
def _opt_true(val: Optional[str]) -> bool:
|
|
2137
|
+
if val is None:
|
|
2138
|
+
return False
|
|
2139
|
+
if isinstance(val, str):
|
|
2140
|
+
return val.strip().lower() in ("1", "true", "yes", "on", "y")
|
|
2141
|
+
return bool(val)
|
|
2142
|
+
|
|
2143
|
+
skip_stationitems = _opt_true(self.getOption("skip_stationitems")) or _opt_true(os.environ.get("TD_SKIP_STATIONITEM_EXPORT"))
|
|
2144
|
+
|
|
2145
|
+
# Heaviest tables first to maximize overlap
|
|
2146
|
+
tables = [
|
|
2147
|
+
"StationItem",
|
|
2148
|
+
"ShipVendor",
|
|
2149
|
+
"UpgradeVendor",
|
|
2150
|
+
"Station",
|
|
2151
|
+
"System",
|
|
2152
|
+
"Item",
|
|
2153
|
+
"Ship",
|
|
2154
|
+
"Upgrade",
|
|
2155
|
+
"RareItem",
|
|
2156
|
+
"FDevOutfitting",
|
|
2157
|
+
"FDevShipyard",
|
|
2158
|
+
]
|
|
2159
|
+
if skip_stationitems:
|
|
2160
|
+
tables = [t for t in tables if t != "StationItem"]
|
|
2161
|
+
|
|
2162
|
+
# Worker count (env override allowed); +1 slot reserved for prices task
|
|
2163
|
+
try:
|
|
2164
|
+
workers = int(os.environ.get("TD_EXPORT_WORKERS", "4"))
|
|
2165
|
+
except ValueError:
|
|
2166
|
+
workers = 4
|
|
2167
|
+
workers = max(1, workers) + 1 # extra slot for the prices job
|
|
2168
|
+
|
|
2169
|
+
def _export_one(table_name: str) -> str:
|
|
2170
|
+
sess = None
|
|
2171
|
+
try:
|
|
2172
|
+
sess = self._open_session() # fresh session per worker
|
|
2173
|
+
csvexport.exportTableToFile(sess, self.tdenv, table_name)
|
|
2174
|
+
return f"{table_name}.csv"
|
|
2175
|
+
finally:
|
|
2176
|
+
if sess is not None:
|
|
2177
|
+
try:
|
|
2178
|
+
sess.close()
|
|
2179
|
+
except Exception:
|
|
2180
|
+
pass
|
|
2181
|
+
|
|
2182
|
+
def _regen_prices() -> str:
|
|
2183
|
+
cache.regeneratePricesFile(self.tdb, self.tdenv)
|
|
2184
|
+
return "TradeDangerous.prices"
|
|
2185
|
+
|
|
2186
|
+
self._print("Exporting to cache...")
|
|
2187
|
+
for t in tables:
|
|
2188
|
+
self._print(f" - {t}.csv")
|
|
2189
|
+
if skip_stationitems:
|
|
2190
|
+
self._warn("Skipping StationItem.csv export (requested).")
|
|
2191
|
+
self._print("Regenerating TradeDangerous.prices …")
|
|
2192
|
+
|
|
2193
|
+
# Parallel export + prices regen, with conservative fallback
|
|
2194
|
+
try:
|
|
2195
|
+
with ThreadPoolExecutor(max_workers=workers) as ex:
|
|
2196
|
+
futures = {ex.submit(_export_one, t): f"{t}.csv" for t in tables}
|
|
2197
|
+
futures[ex.submit(_regen_prices)] = "TradeDangerous.prices"
|
|
2198
|
+
for fut in as_completed(futures):
|
|
2199
|
+
_ = fut.result() # raise on any worker failure
|
|
2200
|
+
except Exception as e:
|
|
2201
|
+
self._warn(f"Parallel export encountered an error ({e!r}); falling back to serial.")
|
|
2202
|
+
for t in tables:
|
|
2203
|
+
_export_one(t)
|
|
2204
|
+
_regen_prices()
|
|
2205
|
+
|
|
2206
|
+
self._print("Cache export completed.")
|
|
2207
|
+
|
|
2208
|
+
def _mirror_csv_exports(self) -> None:
|
|
2209
|
+
"""
|
|
2210
|
+
If TD_CSV is set, mirror all CSVs emitted into tdenv.dataDir to TD_CSV.
|
|
2211
|
+
Avoids running csvexport twice (Spansh already produced the CSVs).
|
|
2212
|
+
"""
|
|
2213
|
+
src_dir = Path(self.tdenv.dataDir).resolve()
|
|
2214
|
+
dst_env = os.environ.get("TD_CSV")
|
|
2215
|
+
if not dst_env:
|
|
2216
|
+
return
|
|
2217
|
+
dst_dir = Path(dst_env).expanduser().resolve()
|
|
2218
|
+
try:
|
|
2219
|
+
dst_dir.mkdir(parents=True, exist_ok=True)
|
|
2220
|
+
except Exception as e:
|
|
2221
|
+
self._warn(f"TD_CSV mirror: unable to create destination {dst_dir}: {e!r}")
|
|
2222
|
+
return
|
|
2223
|
+
|
|
2224
|
+
copied = 0
|
|
2225
|
+
for src in src_dir.glob("*.csv"):
|
|
2226
|
+
try:
|
|
2227
|
+
shutil.copy2(src, dst_dir / src.name)
|
|
2228
|
+
copied += 1
|
|
2229
|
+
except Exception as e:
|
|
2230
|
+
self._warn(f"TD_CSV mirror: failed to copy {src.name}: {e!r}")
|
|
2231
|
+
|
|
2232
|
+
self._print(f"TD_CSV mirror: copied {copied} csv file(s) → {dst_dir}")
|
|
2233
|
+
|
|
2234
|
+
def _export_and_mirror(self) -> None:
|
|
2235
|
+
"""
|
|
2236
|
+
Run the normal cache/CSV export, then mirror CSVs to TD_CSV if set.
|
|
2237
|
+
Use this in place of a direct _export_cache() call.
|
|
2238
|
+
"""
|
|
2239
|
+
import time
|
|
2240
|
+
t0 = time.time()
|
|
2241
|
+
self._export_cache() # existing exporter (unchanged)
|
|
2242
|
+
self._print(f"Cache export completed in {time.time()-t0:.2f}s")
|
|
2243
|
+
self._mirror_csv_exports()
|
|
2244
|
+
|
|
2245
|
+
# ------------------------------
|
|
2246
|
+
# Categories cache
|
|
2247
|
+
# ------------------------------
|
|
2248
|
+
def _load_categories(self, session: Session, tables: Dict[str, Table]) -> Dict[str, int]:
|
|
2249
|
+
t_cat = tables["Category"]
|
|
2250
|
+
rows = session.execute(select(t_cat.c.category_id, t_cat.c.name)).all()
|
|
2251
|
+
return {str(name).lower(): int(cid) for (cid, name) in rows}
|
|
2252
|
+
|
|
2253
|
+
# ------------------------------
|
|
2254
|
+
# Streaming JSON reader
|
|
2255
|
+
# ------------------------------
|
|
2256
|
+
|
|
2257
|
+
def _ijson_items(self, fh: io.BufferedReader, prefix: str):
|
|
2258
|
+
"""
|
|
2259
|
+
Use the fastest available ijson backend with clean fallback.
|
|
2260
|
+
Order: yajl2_cffi → yajl2_c → yajl2 → python.
|
|
2261
|
+
"""
|
|
2262
|
+
try:
|
|
2263
|
+
from ijson.backends import yajl2_cffi as ijson_fast
|
|
2264
|
+
return ijson_fast.items(fh, prefix)
|
|
2265
|
+
except Exception:
|
|
2266
|
+
pass
|
|
2267
|
+
try:
|
|
2268
|
+
from ijson.backends import yajl2_c as ijson_fast # ctypes wrapper
|
|
2269
|
+
return ijson_fast.items(fh, prefix)
|
|
2270
|
+
except Exception:
|
|
2271
|
+
pass
|
|
2272
|
+
try:
|
|
2273
|
+
from ijson.backends import yajl2 as ijson_fast
|
|
2274
|
+
return ijson_fast.items(fh, prefix)
|
|
2275
|
+
except Exception:
|
|
2276
|
+
pass
|
|
2277
|
+
# Fallback to whatever was imported at module top
|
|
2278
|
+
return ijson.items(fh, prefix)
|
|
2279
|
+
|
|
2280
|
+
def _iter_top_level_json_array(self, fh: io.BufferedReader) -> Generator[Dict[str, Any], None, None]:
|
|
2281
|
+
"""
|
|
2282
|
+
High-performance streaming reader for a huge top-level JSON array of systems.
|
|
2283
|
+
NOTE: As of 2025-10, we removed _parse_progress(). This iterator now
|
|
2284
|
+
maintains byte/rate metrics only; rendering is handled by _progress_line().
|
|
2285
|
+
"""
|
|
2286
|
+
start_ts = time.time()
|
|
2287
|
+
last_tick_systems = 0
|
|
2288
|
+
TICK_EVERY = 256
|
|
2289
|
+
|
|
2290
|
+
it = self._ijson_items(fh, 'item')
|
|
2291
|
+
for idx, obj in enumerate(it, 1):
|
|
2292
|
+
if (idx - last_tick_systems) >= TICK_EVERY:
|
|
2293
|
+
last_tick_systems = idx
|
|
2294
|
+
# Update parse metrics (no printing here)
|
|
2295
|
+
try:
|
|
2296
|
+
pos = fh.tell()
|
|
2297
|
+
elapsed = max(time.time() - start_ts, 1e-9)
|
|
2298
|
+
self._parse_bytes = pos
|
|
2299
|
+
self._parse_rate = pos / elapsed
|
|
2300
|
+
except Exception:
|
|
2301
|
+
pass
|
|
2302
|
+
yield obj
|
|
2303
|
+
|
|
2304
|
+
# Final metric update at EOF
|
|
2305
|
+
try:
|
|
2306
|
+
pos = fh.tell()
|
|
2307
|
+
elapsed = max(time.time() - start_ts, 1e-9)
|
|
2308
|
+
self._parse_bytes = pos
|
|
2309
|
+
self._parse_rate = pos / elapsed
|
|
2310
|
+
except Exception:
|
|
2311
|
+
pass
|
|
2312
|
+
|
|
2313
|
+
if self._is_tty:
|
|
2314
|
+
self._live_status("")
|
|
2315
|
+
|
|
2316
|
+
|
|
2317
|
+
# ------------------------------
|
|
2318
|
+
# Mapping / derivations / misc
|
|
2319
|
+
# ------------------------------
|
|
2320
|
+
@staticmethod
|
|
2321
|
+
def _build_station_type_map() -> Dict[Optional[str], Tuple[int, bool]]:
|
|
2322
|
+
return {
|
|
2323
|
+
None: (0, False),
|
|
2324
|
+
"None": (0, False),
|
|
2325
|
+
"Outpost": (1, False),
|
|
2326
|
+
"Coriolis Starport": (2, False),
|
|
2327
|
+
"Ocellus Starport": (3, False),
|
|
2328
|
+
"Orbis Starport": (4, False),
|
|
2329
|
+
"Planetary Outpost": (11, True),
|
|
2330
|
+
"Planetary Port": (12, True),
|
|
2331
|
+
"Mega ship": (13, False),
|
|
2332
|
+
"Asteroid base": (14, False),
|
|
2333
|
+
"Drake-Class Carrier": (24, False),
|
|
2334
|
+
"Settlement": (25, True),
|
|
2335
|
+
}
|
|
2336
|
+
|
|
2337
|
+
def _map_station_type(self, type_name: Optional[str]) -> Tuple[int, str]:
|
|
2338
|
+
if isinstance(type_name, str):
|
|
2339
|
+
res = self._station_type_map.get(type_name)
|
|
2340
|
+
if res:
|
|
2341
|
+
type_id, is_planetary = res
|
|
2342
|
+
return type_id, "Y" if is_planetary else "N"
|
|
2343
|
+
return (0, "?")
|
|
2344
|
+
|
|
2345
|
+
@staticmethod
|
|
2346
|
+
def _derive_pad_size(landing: Mapping[str, Any]) -> str:
|
|
2347
|
+
try:
|
|
2348
|
+
if landing.get("large"):
|
|
2349
|
+
return "L"
|
|
2350
|
+
if landing.get("medium"):
|
|
2351
|
+
return "M"
|
|
2352
|
+
if landing.get("small"):
|
|
2353
|
+
return "S"
|
|
2354
|
+
except Exception:
|
|
2355
|
+
pass
|
|
2356
|
+
return "?"
|
|
2357
|
+
|
|
2358
|
+
def _resolve_batch_size(self) -> Optional[int]:
|
|
2359
|
+
"""
|
|
2360
|
+
Decide commit batch size for *spansh* profile.
|
|
2361
|
+
"""
|
|
2362
|
+
if self.session is not None and hasattr(db_utils, "get_import_batch_size"):
|
|
2363
|
+
try:
|
|
2364
|
+
val = db_utils.get_import_batch_size(self.session, profile="spansh")
|
|
2365
|
+
if val is not None:
|
|
2366
|
+
return val
|
|
2367
|
+
except Exception:
|
|
2368
|
+
pass
|
|
2369
|
+
|
|
2370
|
+
raw = os.environ.get("TD_LISTINGS_BATCH")
|
|
2371
|
+
if raw is not None:
|
|
2372
|
+
try:
|
|
2373
|
+
envv = int(raw)
|
|
2374
|
+
return envv if envv > 0 else None
|
|
2375
|
+
except ValueError:
|
|
2376
|
+
pass
|
|
2377
|
+
|
|
2378
|
+
try:
|
|
2379
|
+
if db_utils.is_sqlite(self.session):
|
|
2380
|
+
return None
|
|
2381
|
+
if db_utils.is_mysql(self.session):
|
|
2382
|
+
return 50_000
|
|
2383
|
+
except Exception:
|
|
2384
|
+
pass
|
|
2385
|
+
|
|
2386
|
+
return 5_000
|
|
2387
|
+
|
|
2388
|
+
# ---- ts/format/logging helpers ----
|
|
2389
|
+
def _parse_ts(self, value: Any) -> Optional[datetime]:
|
|
2390
|
+
try:
|
|
2391
|
+
return db_utils.parse_ts(value) # UTC-naive, μs=0
|
|
2392
|
+
except Exception:
|
|
2393
|
+
return None
|
|
2394
|
+
|
|
2395
|
+
@staticmethod
|
|
2396
|
+
def _format_hms(seconds: float) -> str:
|
|
2397
|
+
m, s = divmod(int(seconds), 60)
|
|
2398
|
+
h, m = divmod(m, 60)
|
|
2399
|
+
return f"{h}:{m:02d}:{s:02d}"
|
|
2400
|
+
|
|
2401
|
+
def _fmt_bytes(self, n: float) -> str:
|
|
2402
|
+
units = ["B", "KiB", "MiB", "GiB", "TiB"]
|
|
2403
|
+
i = 0
|
|
2404
|
+
while n >= 1024 and i < len(units) - 1:
|
|
2405
|
+
n /= 1024.0
|
|
2406
|
+
i += 1
|
|
2407
|
+
return f"{int(n)} {units[i]}" if i == 0 else f"{n:.1f} {units[i]}"
|
|
2408
|
+
|
|
2409
|
+
def _progress_line(self, stats: Dict[str, int]) -> None:
|
|
2410
|
+
"""
|
|
2411
|
+
Single-line live status while importing.
|
|
2412
|
+
|
|
2413
|
+
Modes:
|
|
2414
|
+
- default (verbose-ish): rich long line
|
|
2415
|
+
- compact: shorter, log-friendly line (enable with -O progress_compact=1 or TD_PROGRESS_COMPACT=1)
|
|
2416
|
+
"""
|
|
2417
|
+
now = time.time()
|
|
2418
|
+
if now - self._last_progress_time < (0.5 if self._debug_level < 1 else 0.2):
|
|
2419
|
+
return
|
|
2420
|
+
self._last_progress_time = now
|
|
2421
|
+
self._started_importing = True
|
|
2422
|
+
|
|
2423
|
+
# Determine compact mode (CLI overrides env; default is rich/False)
|
|
2424
|
+
# Truthy whitelist: 1, true, yes, on, y (case-insensitive)
|
|
2425
|
+
_opt = self.getOption("progress_compact")
|
|
2426
|
+
if _opt is not None:
|
|
2427
|
+
_val = str(_opt).strip().lower()
|
|
2428
|
+
else:
|
|
2429
|
+
_env = os.getenv("TD_PROGRESS_COMPACT")
|
|
2430
|
+
_val = "" if _env is None else str(_env).strip().lower()
|
|
2431
|
+
compact = _val in {"1", "true", "yes", "on", "y"}
|
|
2432
|
+
|
|
2433
|
+
parse_bytes = getattr(self, "_parse_bytes", 0)
|
|
2434
|
+
parse_rate = getattr(self, "_parse_rate", 0.0)
|
|
2435
|
+
systems = stats.get("systems", 0)
|
|
2436
|
+
stations = stats.get("stations", 0)
|
|
2437
|
+
|
|
2438
|
+
wm = stats.get("market_writes", 0)
|
|
2439
|
+
wo = stats.get("outfit_writes", 0)
|
|
2440
|
+
ws = stats.get("ship_writes", 0)
|
|
2441
|
+
|
|
2442
|
+
km = stats.get("market_stations", 0)
|
|
2443
|
+
ko = stats.get("outfit_stations", 0)
|
|
2444
|
+
ks = stats.get("ship_stations", 0)
|
|
2445
|
+
|
|
2446
|
+
if compact:
|
|
2447
|
+
# Compact, log-friendly (newline prints)
|
|
2448
|
+
msg = (
|
|
2449
|
+
f"Importing… {parse_bytes/1048576:.1f} MiB read {parse_rate/1048576:.1f} MiB/s "
|
|
2450
|
+
f"systems:{systems:,} stations:{stations:,} "
|
|
2451
|
+
f"checked m/o/s:{km:,}/{ko:,}/{ks:,} written m/o/s:{wm:,}/{wo:,}/{ws:,}"
|
|
808
2452
|
)
|
|
2453
|
+
self._print(msg)
|
|
2454
|
+
return
|
|
809
2455
|
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
id=ship.get('shipId'),
|
|
817
|
-
name=ship.get('name'),
|
|
818
|
-
modified=parse_ts(shipyard.get('updateTime'))
|
|
819
|
-
)
|
|
2456
|
+
# Rich/long line (TTY-optimized; truncated only on TTY)
|
|
2457
|
+
msg = (
|
|
2458
|
+
f"Importing… {parse_bytes/1048576:.1f} MiB read {parse_rate/1048576:.1f} MiB/s "
|
|
2459
|
+
f"[Parsed - Systems: {systems:,} Stations: {stations:,}] "
|
|
2460
|
+
f"Checked(stations): mkt={km:,} outf={ko:,} shp={ks:,} "
|
|
2461
|
+
f"Written(stations): mkt={wm:,} outf={wo:,} shp={ws:,}"
|
|
820
2462
|
|
|
821
|
-
def ingest_outfitting(outfitting):
|
|
822
|
-
"""Ingest station-level market data, yielding commodities."""
|
|
823
|
-
if not outfitting or not outfitting.get('modules'):
|
|
824
|
-
return None
|
|
825
|
-
for module in outfitting['modules']:
|
|
826
|
-
yield Module(
|
|
827
|
-
id=module.get('moduleId'),
|
|
828
|
-
name=module.get('name'),
|
|
829
|
-
cls=module.get('class'),
|
|
830
|
-
rating=module.get('rating'),
|
|
831
|
-
ship=module.get('ship'),
|
|
832
|
-
modified=parse_ts(outfitting.get('updateTime'))
|
|
833
2463
|
)
|
|
2464
|
+
self._live_status(msg)
|
|
834
2465
|
|
|
835
|
-
def
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
2466
|
+
def _live_line(self, msg: str) -> None:
|
|
2467
|
+
self._live_status(msg)
|
|
2468
|
+
|
|
2469
|
+
def _live_status(self, msg: str) -> None:
|
|
2470
|
+
"""
|
|
2471
|
+
Live status line for TTY; plain prints for non-TTY.
|
|
2472
|
+
IMPORTANT: only truncate when TTY so logs are not cut off.
|
|
2473
|
+
"""
|
|
2474
|
+
try:
|
|
2475
|
+
if self._is_tty:
|
|
2476
|
+
import shutil
|
|
2477
|
+
width = shutil.get_terminal_size(fallback=(120, 20)).columns
|
|
2478
|
+
if width and width > 4:
|
|
2479
|
+
msg = msg[: width - 2]
|
|
2480
|
+
s = f"\x1b[2K\r{msg}"
|
|
2481
|
+
sys.stderr.write(s)
|
|
2482
|
+
sys.stderr.flush()
|
|
2483
|
+
else:
|
|
2484
|
+
# Non-TTY: emit full line, no truncation, no control codes.
|
|
2485
|
+
self._print(msg)
|
|
2486
|
+
except Exception:
|
|
2487
|
+
self._print(msg)
|
|
2488
|
+
|
|
2489
|
+
def _end_live_status(self) -> None:
|
|
2490
|
+
try:
|
|
2491
|
+
if self._is_tty:
|
|
2492
|
+
sys.stderr.write("\x1b[2K\r\n")
|
|
2493
|
+
sys.stderr.flush()
|
|
2494
|
+
except Exception:
|
|
2495
|
+
pass
|
|
2496
|
+
|
|
2497
|
+
# ---- printing/warnings ----
|
|
2498
|
+
def _print(self, *args, **kwargs):
|
|
2499
|
+
printer = getattr(self.tdenv, "print", None)
|
|
2500
|
+
if callable(printer):
|
|
2501
|
+
printer(*args, **kwargs)
|
|
2502
|
+
else:
|
|
2503
|
+
print(*args, **kwargs)
|
|
2504
|
+
|
|
2505
|
+
def _warn(self, msg: str):
|
|
2506
|
+
if self._warn_enabled:
|
|
2507
|
+
self._print(f"WARNING: {msg}")
|
|
2508
|
+
|
|
2509
|
+
def _error(self, msg: str):
|
|
2510
|
+
self._print(f"ERROR: {msg}")
|
|
2511
|
+
|
|
2512
|
+
def _safe_close_session(self):
|
|
2513
|
+
try:
|
|
2514
|
+
if self.session is not None:
|
|
2515
|
+
self.session.close()
|
|
2516
|
+
except Exception:
|
|
2517
|
+
pass
|
|
2518
|
+
self.session = None
|
|
850
2519
|
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
ts += '.0'
|
|
858
|
-
return datetime.strptime(ts, '%Y-%m-%d %H:%M:%S.%f').replace(microsecond=0)
|
|
2520
|
+
# -----------------------------------------------------------------------------
|
|
2521
|
+
# Exceptions
|
|
2522
|
+
# -----------------------------------------------------------------------------
|
|
2523
|
+
class CleanExit(Exception):
|
|
2524
|
+
"""Controlled early exit: log and stop this run so schedulers can retry later."""
|
|
2525
|
+
pass
|