Qubx 0.5.7__cp312-cp312-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/__init__.py +207 -0
- qubx/_nb_magic.py +100 -0
- qubx/backtester/__init__.py +5 -0
- qubx/backtester/account.py +145 -0
- qubx/backtester/broker.py +87 -0
- qubx/backtester/data.py +296 -0
- qubx/backtester/management.py +378 -0
- qubx/backtester/ome.py +296 -0
- qubx/backtester/optimization.py +201 -0
- qubx/backtester/simulated_data.py +558 -0
- qubx/backtester/simulator.py +362 -0
- qubx/backtester/utils.py +780 -0
- qubx/cli/__init__.py +0 -0
- qubx/cli/commands.py +67 -0
- qubx/connectors/ccxt/__init__.py +0 -0
- qubx/connectors/ccxt/account.py +495 -0
- qubx/connectors/ccxt/broker.py +132 -0
- qubx/connectors/ccxt/customizations.py +193 -0
- qubx/connectors/ccxt/data.py +612 -0
- qubx/connectors/ccxt/exceptions.py +17 -0
- qubx/connectors/ccxt/factory.py +93 -0
- qubx/connectors/ccxt/utils.py +307 -0
- qubx/core/__init__.py +0 -0
- qubx/core/account.py +251 -0
- qubx/core/basics.py +850 -0
- qubx/core/context.py +420 -0
- qubx/core/exceptions.py +38 -0
- qubx/core/helpers.py +480 -0
- qubx/core/interfaces.py +1150 -0
- qubx/core/loggers.py +514 -0
- qubx/core/lookups.py +475 -0
- qubx/core/metrics.py +1512 -0
- qubx/core/mixins/__init__.py +13 -0
- qubx/core/mixins/market.py +94 -0
- qubx/core/mixins/processing.py +428 -0
- qubx/core/mixins/subscription.py +203 -0
- qubx/core/mixins/trading.py +88 -0
- qubx/core/mixins/universe.py +270 -0
- qubx/core/series.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/series.pxd +125 -0
- qubx/core/series.pyi +118 -0
- qubx/core/series.pyx +988 -0
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.pyi +6 -0
- qubx/core/utils.pyx +62 -0
- qubx/data/__init__.py +25 -0
- qubx/data/helpers.py +416 -0
- qubx/data/readers.py +1562 -0
- qubx/data/tardis.py +100 -0
- qubx/gathering/simplest.py +88 -0
- qubx/math/__init__.py +3 -0
- qubx/math/stats.py +129 -0
- qubx/pandaz/__init__.py +23 -0
- qubx/pandaz/ta.py +2757 -0
- qubx/pandaz/utils.py +638 -0
- qubx/resources/instruments/symbols-binance.cm.json +1 -0
- qubx/resources/instruments/symbols-binance.json +1 -0
- qubx/resources/instruments/symbols-binance.um.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.f.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.json +1 -0
- qubx/resources/instruments/symbols-kraken.f.json +1 -0
- qubx/resources/instruments/symbols-kraken.json +1 -0
- qubx/ta/__init__.py +0 -0
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/ta/indicators.pxd +149 -0
- qubx/ta/indicators.pyi +41 -0
- qubx/ta/indicators.pyx +787 -0
- qubx/trackers/__init__.py +3 -0
- qubx/trackers/abvanced.py +236 -0
- qubx/trackers/composite.py +146 -0
- qubx/trackers/rebalancers.py +129 -0
- qubx/trackers/riskctrl.py +641 -0
- qubx/trackers/sizers.py +235 -0
- qubx/utils/__init__.py +5 -0
- qubx/utils/_pyxreloader.py +281 -0
- qubx/utils/charting/lookinglass.py +1057 -0
- qubx/utils/charting/mpl_helpers.py +1183 -0
- qubx/utils/marketdata/binance.py +284 -0
- qubx/utils/marketdata/ccxt.py +90 -0
- qubx/utils/marketdata/dukas.py +130 -0
- qubx/utils/misc.py +541 -0
- qubx/utils/ntp.py +63 -0
- qubx/utils/numbers_utils.py +7 -0
- qubx/utils/orderbook.py +491 -0
- qubx/utils/plotting/__init__.py +0 -0
- qubx/utils/plotting/dashboard.py +150 -0
- qubx/utils/plotting/data.py +137 -0
- qubx/utils/plotting/interfaces.py +25 -0
- qubx/utils/plotting/renderers/__init__.py +0 -0
- qubx/utils/plotting/renderers/plotly.py +0 -0
- qubx/utils/runner/__init__.py +1 -0
- qubx/utils/runner/_jupyter_runner.pyt +60 -0
- qubx/utils/runner/accounts.py +88 -0
- qubx/utils/runner/configs.py +65 -0
- qubx/utils/runner/runner.py +470 -0
- qubx/utils/time.py +312 -0
- qubx-0.5.7.dist-info/METADATA +105 -0
- qubx-0.5.7.dist-info/RECORD +100 -0
- qubx-0.5.7.dist-info/WHEEL +4 -0
- qubx-0.5.7.dist-info/entry_points.txt +3 -0
qubx/backtester/utils.py
ADDED
|
@@ -0,0 +1,780 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import Any, Callable, TypeAlias
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import stackprinter
|
|
8
|
+
|
|
9
|
+
from qubx import logger, lookup
|
|
10
|
+
from qubx.core.basics import (
|
|
11
|
+
CtrlChannel,
|
|
12
|
+
DataType,
|
|
13
|
+
Instrument,
|
|
14
|
+
ITimeProvider,
|
|
15
|
+
Signal,
|
|
16
|
+
TimestampedDict,
|
|
17
|
+
TriggerEvent,
|
|
18
|
+
dt_64,
|
|
19
|
+
)
|
|
20
|
+
from qubx.core.exceptions import SimulationConfigError, SimulationError
|
|
21
|
+
from qubx.core.helpers import BasicScheduler
|
|
22
|
+
from qubx.core.interfaces import IStrategy, IStrategyContext, PositionsTracker
|
|
23
|
+
from qubx.core.series import OHLCV, Bar, Quote, Trade
|
|
24
|
+
from qubx.core.utils import time_delta_to_str
|
|
25
|
+
from qubx.data.helpers import InMemoryCachedReader, TimeGuardedWrapper
|
|
26
|
+
from qubx.data.readers import AsDict, DataReader, InMemoryDataFrameReader
|
|
27
|
+
from qubx.utils.time import infer_series_frequency, timedelta_to_crontab
|
|
28
|
+
|
|
29
|
+
SymbolOrInstrument_t: TypeAlias = str | Instrument
|
|
30
|
+
ExchangeName_t: TypeAlias = str
|
|
31
|
+
SubsType_t: TypeAlias = str | DataType
|
|
32
|
+
RawData_t: TypeAlias = pd.DataFrame | OHLCV
|
|
33
|
+
DataDecls_t: TypeAlias = DataReader | dict[SubsType_t, DataReader | dict[SymbolOrInstrument_t, RawData_t]]
|
|
34
|
+
|
|
35
|
+
StrategyOrSignals_t: TypeAlias = IStrategy | pd.DataFrame | pd.Series
|
|
36
|
+
DictOfStrats_t: TypeAlias = dict[str, StrategyOrSignals_t]
|
|
37
|
+
StrategiesDecls_t: TypeAlias = (
|
|
38
|
+
StrategyOrSignals_t
|
|
39
|
+
| DictOfStrats_t
|
|
40
|
+
| dict[str, DictOfStrats_t]
|
|
41
|
+
| dict[str, StrategyOrSignals_t | list[StrategyOrSignals_t | PositionsTracker]]
|
|
42
|
+
| list[StrategyOrSignals_t | PositionsTracker]
|
|
43
|
+
| tuple[StrategyOrSignals_t | PositionsTracker]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class SetupTypes(Enum):
|
|
48
|
+
UKNOWN = "unknown"
|
|
49
|
+
LIST = "list"
|
|
50
|
+
TRACKER = "tracker"
|
|
51
|
+
SIGNAL = "signal"
|
|
52
|
+
STRATEGY = "strategy"
|
|
53
|
+
SIGNAL_AND_TRACKER = "signal_and_tracker"
|
|
54
|
+
STRATEGY_AND_TRACKER = "strategy_and_tracker"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _type(obj: Any) -> SetupTypes:
|
|
58
|
+
if obj is None:
|
|
59
|
+
t = SetupTypes.UKNOWN
|
|
60
|
+
elif isinstance(obj, (list, tuple)):
|
|
61
|
+
t = SetupTypes.LIST
|
|
62
|
+
elif isinstance(obj, PositionsTracker):
|
|
63
|
+
t = SetupTypes.TRACKER
|
|
64
|
+
elif isinstance(obj, (pd.DataFrame, pd.Series)):
|
|
65
|
+
t = SetupTypes.SIGNAL
|
|
66
|
+
elif isinstance(obj, IStrategy):
|
|
67
|
+
t = SetupTypes.STRATEGY
|
|
68
|
+
else:
|
|
69
|
+
t = SetupTypes.UKNOWN
|
|
70
|
+
return t
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class SimulationSetup:
|
|
75
|
+
"""
|
|
76
|
+
Configuration of setups in the simulation.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
setup_type: SetupTypes
|
|
80
|
+
name: str
|
|
81
|
+
generator: StrategyOrSignals_t
|
|
82
|
+
tracker: PositionsTracker | None
|
|
83
|
+
instruments: list[Instrument]
|
|
84
|
+
exchange: str
|
|
85
|
+
capital: float
|
|
86
|
+
base_currency: str
|
|
87
|
+
commissions: str
|
|
88
|
+
signal_timeframe: str
|
|
89
|
+
accurate_stop_orders_execution: bool
|
|
90
|
+
|
|
91
|
+
def __str__(self) -> str:
|
|
92
|
+
return f"{self.name} {self.setup_type} capital {self.capital} {self.base_currency} for [{','.join(map(lambda x: x.symbol, self.instruments))}] @ {self.exchange}[{self.commissions}]"
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
# fmt: off
|
|
96
|
+
@dataclass
|
|
97
|
+
class SimulationDataConfig:
|
|
98
|
+
"""
|
|
99
|
+
Configuration of data passed to the simulator.
|
|
100
|
+
"""
|
|
101
|
+
default_trigger_schedule: str # default trigger schedule
|
|
102
|
+
default_base_subscription: str # the base subscription type
|
|
103
|
+
data_providers: dict[str, DataReader] # dictionary of available subscription types with DataReaders
|
|
104
|
+
default_warmups: dict[str, str] # default warmups periods
|
|
105
|
+
open_close_time_indent_secs: int # open/close ticks shift in seconds
|
|
106
|
+
adjusted_open_close_time_indent_secs: int # adjusted open/close ticks shift in seconds
|
|
107
|
+
aux_data_provider: InMemoryCachedReader | None = None # auxiliary data provider
|
|
108
|
+
|
|
109
|
+
def get_timeguarded_aux_reader(self, time_provider: ITimeProvider) -> TimeGuardedWrapper | None:
|
|
110
|
+
_aux = None
|
|
111
|
+
if self.aux_data_provider is not None:
|
|
112
|
+
if not isinstance(self.aux_data_provider, InMemoryCachedReader):
|
|
113
|
+
logger.warning("Aux data provider should be an instance of InMemoryCachedReader ! Otherwise it can lead to unnecessary effects !")
|
|
114
|
+
_aux = TimeGuardedWrapper(self.aux_data_provider, time_guard=time_provider)
|
|
115
|
+
return _aux
|
|
116
|
+
# fmt: on
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class SimulatedLogFormatter:
|
|
120
|
+
def __init__(self, time_provider: ITimeProvider):
|
|
121
|
+
self.time_provider = time_provider
|
|
122
|
+
|
|
123
|
+
def formatter(self, record):
|
|
124
|
+
end = record["extra"].get("end", "\n")
|
|
125
|
+
fmt = "<lvl>{message}</lvl>%s" % end
|
|
126
|
+
if record["level"].name in {"WARNING", "SNAKY"}:
|
|
127
|
+
fmt = "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - %s" % fmt
|
|
128
|
+
|
|
129
|
+
dt = self.time_provider.time()
|
|
130
|
+
if isinstance(dt, int):
|
|
131
|
+
now = pd.Timestamp(dt).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
132
|
+
else:
|
|
133
|
+
now = self.time_provider.time().astype("datetime64[us]").item().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
|
134
|
+
|
|
135
|
+
# prefix = "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> [ <level>%s</level> ] " % record["level"].icon
|
|
136
|
+
prefix = f"<lc>{now}</lc> [<level>{record['level'].icon}</level>] "
|
|
137
|
+
|
|
138
|
+
if record["exception"] is not None:
|
|
139
|
+
record["extra"]["stack"] = stackprinter.format(record["exception"], style="darkbg3")
|
|
140
|
+
fmt += "\n{extra[stack]}\n"
|
|
141
|
+
|
|
142
|
+
if record["level"].name in {"TEXT"}:
|
|
143
|
+
prefix = ""
|
|
144
|
+
|
|
145
|
+
return prefix + fmt
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class SimulatedScheduler(BasicScheduler):
|
|
149
|
+
def run(self):
|
|
150
|
+
self._is_started = True
|
|
151
|
+
_has_tasks = False
|
|
152
|
+
_time = self.time_sec()
|
|
153
|
+
for k in self._crons.keys():
|
|
154
|
+
_has_tasks |= self._arm_schedule(k, _time)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class SimulatedCtrlChannel(CtrlChannel):
|
|
158
|
+
"""
|
|
159
|
+
Simulated communication channel. Here we don't use queue but it invokes callback directly
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
_callback: Callable[[tuple], bool]
|
|
163
|
+
|
|
164
|
+
def register(self, callback):
|
|
165
|
+
self._callback = callback
|
|
166
|
+
|
|
167
|
+
def send(self, data):
|
|
168
|
+
# - when data is sent, invoke callback
|
|
169
|
+
return self._callback.process_data(*data)
|
|
170
|
+
|
|
171
|
+
def receive(self, timeout: int | None = None) -> Any:
|
|
172
|
+
raise SimulationError("Method SimulatedCtrlChannel::receive() should not be called in a simulated environment.")
|
|
173
|
+
|
|
174
|
+
def stop(self):
|
|
175
|
+
self.control.clear()
|
|
176
|
+
|
|
177
|
+
def start(self):
|
|
178
|
+
self.control.set()
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class SimulatedTimeProvider(ITimeProvider):
|
|
182
|
+
_current_time: dt_64
|
|
183
|
+
|
|
184
|
+
def __init__(self, initial_time: dt_64 | str):
|
|
185
|
+
self._current_time = np.datetime64(initial_time, "ns") if isinstance(initial_time, str) else initial_time
|
|
186
|
+
|
|
187
|
+
def time(self) -> dt_64:
|
|
188
|
+
return self._current_time
|
|
189
|
+
|
|
190
|
+
def set_time(self, time: dt_64):
|
|
191
|
+
self._current_time = max(time, self._current_time)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class SignalsProxy(IStrategy):
|
|
195
|
+
"""
|
|
196
|
+
Proxy strategy for generated signals.
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
timeframe: str = "1m"
|
|
200
|
+
|
|
201
|
+
def on_init(self, ctx: IStrategyContext):
|
|
202
|
+
ctx.set_base_subscription(DataType.OHLC[self.timeframe])
|
|
203
|
+
|
|
204
|
+
def on_event(self, ctx: IStrategyContext, event: TriggerEvent) -> list[Signal] | None:
|
|
205
|
+
if event.data and event.type == "event":
|
|
206
|
+
signal = event.data.get("order")
|
|
207
|
+
# - TODO: also need to think about how to pass stop/take here
|
|
208
|
+
if signal is not None and event.instrument:
|
|
209
|
+
return [event.instrument.signal(signal)]
|
|
210
|
+
return None
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def find_instruments_and_exchanges(
|
|
214
|
+
instruments: list[SymbolOrInstrument_t] | dict[ExchangeName_t, list[SymbolOrInstrument_t]],
|
|
215
|
+
exchange: ExchangeName_t | None,
|
|
216
|
+
) -> tuple[list[Instrument], list[ExchangeName_t]]:
|
|
217
|
+
_instrs: list[Instrument] = []
|
|
218
|
+
_exchanges = [] if exchange is None else [exchange.lower()]
|
|
219
|
+
for i in instruments:
|
|
220
|
+
match i:
|
|
221
|
+
case str():
|
|
222
|
+
_e, _s = i.split(":") if ":" in i else (exchange, i)
|
|
223
|
+
assert _e is not None
|
|
224
|
+
|
|
225
|
+
if exchange is not None and _e.lower() != exchange.lower():
|
|
226
|
+
logger.warning("Exchange from symbol's spec ({_e}) is different from requested: {exchange} !")
|
|
227
|
+
|
|
228
|
+
if _e is None:
|
|
229
|
+
logger.warning(
|
|
230
|
+
"Can't extract exchange name from symbol's spec ({_e}) and exact exchange name is not provided - skip this symbol !"
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
if (ix := lookup.find_symbol(_e, _s)) is not None:
|
|
234
|
+
_exchanges.append(_e.lower())
|
|
235
|
+
_instrs.append(ix)
|
|
236
|
+
else:
|
|
237
|
+
logger.warning(f"Can't find instrument for specified symbol ({i}) - ignoring !")
|
|
238
|
+
|
|
239
|
+
case Instrument():
|
|
240
|
+
_exchanges.append(i.exchange)
|
|
241
|
+
_instrs.append(i)
|
|
242
|
+
|
|
243
|
+
case _:
|
|
244
|
+
raise SimulationConfigError(f"Unsupported type for {i} only str or Instrument instances are allowed!")
|
|
245
|
+
|
|
246
|
+
return _instrs, list(set(_exchanges))
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
class _StructureSniffer:
|
|
250
|
+
_probe_size: int
|
|
251
|
+
|
|
252
|
+
def __init__(self, _probe_size: int = 50) -> None:
|
|
253
|
+
self._probe_size = _probe_size
|
|
254
|
+
|
|
255
|
+
def _is_strategy(self, obj) -> bool:
|
|
256
|
+
return _type(obj) == SetupTypes.STRATEGY
|
|
257
|
+
|
|
258
|
+
def _is_tracker(self, obj) -> bool:
|
|
259
|
+
return _type(obj) == SetupTypes.TRACKER
|
|
260
|
+
|
|
261
|
+
def _is_signal(self, obj) -> bool:
|
|
262
|
+
return _type(obj) == SetupTypes.SIGNAL
|
|
263
|
+
|
|
264
|
+
def _is_signal_or_strategy(self, obj) -> bool:
|
|
265
|
+
return self._is_signal(obj) or self._is_strategy(obj)
|
|
266
|
+
|
|
267
|
+
def _possible_instruments_ids(self, i: Instrument) -> set[str]:
|
|
268
|
+
return set((i.symbol, str(i), f"{i.exchange}:{i.symbol}"))
|
|
269
|
+
|
|
270
|
+
def _pick_instruments(self, instruments: list[Instrument], s: pd.Series | pd.DataFrame) -> list[Instrument]:
|
|
271
|
+
if isinstance(s, pd.Series):
|
|
272
|
+
_instrs = [i for i in instruments if s.name in self._possible_instruments_ids(i)]
|
|
273
|
+
|
|
274
|
+
elif isinstance(s, pd.DataFrame):
|
|
275
|
+
_s_cols = set(s.columns)
|
|
276
|
+
_instrs = [i for i in instruments if self._possible_instruments_ids(i) & _s_cols]
|
|
277
|
+
|
|
278
|
+
else:
|
|
279
|
+
raise SimulationConfigError("Invalid signals or strategy configuration")
|
|
280
|
+
|
|
281
|
+
return list(set(_instrs))
|
|
282
|
+
|
|
283
|
+
def _name_in_instruments(self, n, instrs: list[Instrument]) -> bool:
|
|
284
|
+
return any([n in self._possible_instruments_ids(i) for i in instrs])
|
|
285
|
+
|
|
286
|
+
def _check_signals_structure(
|
|
287
|
+
self, instruments: list[Instrument], s: pd.Series | pd.DataFrame
|
|
288
|
+
) -> pd.Series | pd.DataFrame:
|
|
289
|
+
if isinstance(s, pd.Series):
|
|
290
|
+
# - it's possible to put anything to series name, so we convert it to string
|
|
291
|
+
s.name = str(s.name)
|
|
292
|
+
if not self._name_in_instruments(s.name, instruments):
|
|
293
|
+
raise SimulationConfigError(f"Can't find instrument for signal's name: '{s.name}'")
|
|
294
|
+
|
|
295
|
+
if isinstance(s, pd.DataFrame):
|
|
296
|
+
s.columns = s.columns.map(lambda x: str(x))
|
|
297
|
+
for col in s.columns:
|
|
298
|
+
if not self._name_in_instruments(col, instruments):
|
|
299
|
+
raise SimulationConfigError(f"Can't find instrument for signal's name: '{col}'")
|
|
300
|
+
return s
|
|
301
|
+
|
|
302
|
+
def _has_columns(self, v: pd.DataFrame, columns: list[str]) -> bool:
|
|
303
|
+
return all([c in v.columns for c in columns])
|
|
304
|
+
|
|
305
|
+
def _has_keys(self, v: dict[str, Any], keys: list[str]) -> bool:
|
|
306
|
+
return all([c in v.keys() for c in keys])
|
|
307
|
+
|
|
308
|
+
def _sniff_list(self, v: list[Any]) -> str:
|
|
309
|
+
match v[0]:
|
|
310
|
+
case Bar():
|
|
311
|
+
_tf = time_delta_to_str(infer_series_frequency([x.time for x in v[: self._probe_size]]).item())
|
|
312
|
+
return DataType.OHLC[_tf]
|
|
313
|
+
|
|
314
|
+
case dict():
|
|
315
|
+
return self._sniff_dicts(v)
|
|
316
|
+
|
|
317
|
+
case Quote():
|
|
318
|
+
return DataType.QUOTE
|
|
319
|
+
|
|
320
|
+
case TimestampedDict():
|
|
321
|
+
_t = self._sniff_dicts(v[0].data)
|
|
322
|
+
if _t in [DataType.OHLC, DataType.OHLC_TRADES, DataType.OHLC_QUOTES]:
|
|
323
|
+
_tf = time_delta_to_str(infer_series_frequency([x.time for x in v[: self._probe_size]]).item())
|
|
324
|
+
return DataType(_t)[_tf]
|
|
325
|
+
return _t
|
|
326
|
+
|
|
327
|
+
case Trade():
|
|
328
|
+
return DataType.TRADE
|
|
329
|
+
|
|
330
|
+
return DataType.RECORD
|
|
331
|
+
|
|
332
|
+
def _sniff_dicts(self, v: dict[str, Any] | list[dict[str, Any]]) -> str:
|
|
333
|
+
v, vs = (v[0], v) if isinstance(v, list) else (v, None)
|
|
334
|
+
|
|
335
|
+
if self._has_keys(v, ["open", "high", "low", "close"]):
|
|
336
|
+
if vs:
|
|
337
|
+
_tf = time_delta_to_str(infer_series_frequency([x.get("time") for x in vs[: self._probe_size]]).item())
|
|
338
|
+
return DataType.OHLC[_tf]
|
|
339
|
+
return DataType.OHLC
|
|
340
|
+
|
|
341
|
+
if self._has_keys(v, ["bid", "ask"]):
|
|
342
|
+
return DataType.QUOTE
|
|
343
|
+
|
|
344
|
+
if self._has_keys(v, ["price", "size"]):
|
|
345
|
+
return DataType.TRADE
|
|
346
|
+
|
|
347
|
+
return DataType.RECORD
|
|
348
|
+
|
|
349
|
+
def _sniff_pandas(self, v: pd.DataFrame) -> str:
|
|
350
|
+
if self._has_columns(v, ["open", "high", "low", "close"]):
|
|
351
|
+
_tf = time_delta_to_str(infer_series_frequency(v[: self._probe_size]).item())
|
|
352
|
+
return DataType.OHLC[_tf]
|
|
353
|
+
|
|
354
|
+
if self._has_columns(v, ["bid", "ask"]):
|
|
355
|
+
return DataType.QUOTE
|
|
356
|
+
|
|
357
|
+
if self._has_columns(v, ["price", "size"]):
|
|
358
|
+
return DataType.TRADE
|
|
359
|
+
|
|
360
|
+
return DataType.RECORD
|
|
361
|
+
|
|
362
|
+
def _pre_read(self, symbol: str, reader: DataReader, time: str, data_type: str) -> list[Any]:
|
|
363
|
+
for dt in ["2h", "12h", "2d", "28d", "60d", "720d"]:
|
|
364
|
+
try:
|
|
365
|
+
_it = reader.read(
|
|
366
|
+
symbol,
|
|
367
|
+
transform=AsDict(),
|
|
368
|
+
start=time,
|
|
369
|
+
stop=pd.Timestamp(time) + pd.Timedelta(dt), # type: ignore
|
|
370
|
+
timeframe=None,
|
|
371
|
+
chunksize=self._probe_size,
|
|
372
|
+
data_type=data_type,
|
|
373
|
+
)
|
|
374
|
+
if len(data := next(_it)) >= 2: # type: ignore
|
|
375
|
+
return data
|
|
376
|
+
except Exception:
|
|
377
|
+
pass
|
|
378
|
+
return []
|
|
379
|
+
|
|
380
|
+
def _sniff_reader(self, symbol: str, reader: DataReader, preferred_data_type: str | None) -> str:
|
|
381
|
+
_probing_types = [DataType.OHLC, DataType.QUOTE, DataType.TRADE]
|
|
382
|
+
_probing_types = ([preferred_data_type] + _probing_types) if preferred_data_type is not None else _probing_types
|
|
383
|
+
_found_type = None
|
|
384
|
+
for _type in _probing_types:
|
|
385
|
+
_t1, _t2 = reader.get_time_ranges(symbol, str(_type))
|
|
386
|
+
if _t1 is not None:
|
|
387
|
+
time = str(_t1 + (_t2 - _t1) / 2)
|
|
388
|
+
_found_type = _type
|
|
389
|
+
break
|
|
390
|
+
else:
|
|
391
|
+
logger.warning(f"Failed to find data start time and supported type for symbol: {symbol}")
|
|
392
|
+
return DataType.NONE
|
|
393
|
+
|
|
394
|
+
if _found_type is None:
|
|
395
|
+
logger.warning(f"Failed to detect data type for symbol: {symbol}")
|
|
396
|
+
return DataType.NONE
|
|
397
|
+
|
|
398
|
+
data = self._pre_read(symbol, reader, time, _found_type)
|
|
399
|
+
if data:
|
|
400
|
+
return self._sniff_list(data)
|
|
401
|
+
|
|
402
|
+
logger.warning(f"Failed to read probe data for symbol: {symbol}")
|
|
403
|
+
return DataType.NONE
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def recognize_simulation_configuration(
|
|
407
|
+
name: str,
|
|
408
|
+
configs: StrategiesDecls_t,
|
|
409
|
+
instruments: list[Instrument],
|
|
410
|
+
exchange: str,
|
|
411
|
+
capital: float,
|
|
412
|
+
basic_currency: str,
|
|
413
|
+
commissions: str,
|
|
414
|
+
signal_timeframe: str,
|
|
415
|
+
accurate_stop_orders_execution: bool,
|
|
416
|
+
) -> list[SimulationSetup]:
|
|
417
|
+
"""
|
|
418
|
+
Recognize and create setups based on the provided simulation configuration.
|
|
419
|
+
|
|
420
|
+
This function processes the given configuration and creates a list of SimulationSetup
|
|
421
|
+
objects that represent different simulation scenarios. It handles various types of
|
|
422
|
+
configurations including dictionaries, lists, signals, and strategies.
|
|
423
|
+
|
|
424
|
+
Parameters:
|
|
425
|
+
- name (str): The name of the simulation setup.
|
|
426
|
+
- configs (VariableStrategyConfig): The configuration for the simulation. Can be a
|
|
427
|
+
strategy, signals, or a nested structure of these.
|
|
428
|
+
- instruments (list[Instrument]): List of available instruments for the simulation.
|
|
429
|
+
- exchange (str): The name of the exchange to be used.
|
|
430
|
+
- capital (float): The initial capital for the simulation.
|
|
431
|
+
- basic_currency (str): The base currency for the simulation.
|
|
432
|
+
- commissions (str): The commission structure to be applied.
|
|
433
|
+
- signal_timeframe (str): Timeframe for generated signals.
|
|
434
|
+
- accurate_stop_orders_execution (bool): If True, enables more accurate stop order execution simulation.
|
|
435
|
+
|
|
436
|
+
Returns:
|
|
437
|
+
- list[SimulationSetup]: A list of SimulationSetup objects, each representing a
|
|
438
|
+
distinct simulation configuration based on the input parameters.
|
|
439
|
+
|
|
440
|
+
Raises:
|
|
441
|
+
- SimulationConfigError: If the signal structure is invalid or if an instrument cannot be found
|
|
442
|
+
for a given signal.
|
|
443
|
+
"""
|
|
444
|
+
|
|
445
|
+
r = list()
|
|
446
|
+
_sniffer = _StructureSniffer()
|
|
447
|
+
|
|
448
|
+
# fmt: off
|
|
449
|
+
if isinstance(configs, dict):
|
|
450
|
+
for n, v in configs.items():
|
|
451
|
+
_n = (name + "/") if name else ""
|
|
452
|
+
r.extend(
|
|
453
|
+
recognize_simulation_configuration(
|
|
454
|
+
_n + n, v, instruments, exchange, capital, basic_currency, commissions,
|
|
455
|
+
signal_timeframe, accurate_stop_orders_execution
|
|
456
|
+
)
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
elif isinstance(configs, (list, tuple)):
|
|
460
|
+
if len(configs) == 2 and _sniffer._is_signal_or_strategy(configs[0]) and _sniffer._is_tracker(configs[1]):
|
|
461
|
+
c0, c1 = configs[0], configs[1]
|
|
462
|
+
_s = _sniffer._check_signals_structure(instruments, c0) # type: ignore
|
|
463
|
+
|
|
464
|
+
if _sniffer._is_signal(c0):
|
|
465
|
+
_t = SetupTypes.SIGNAL_AND_TRACKER
|
|
466
|
+
|
|
467
|
+
if _sniffer._is_strategy(c0):
|
|
468
|
+
_t = SetupTypes.STRATEGY_AND_TRACKER
|
|
469
|
+
|
|
470
|
+
# - extract actual symbols that have signals
|
|
471
|
+
r.append(
|
|
472
|
+
SimulationSetup(
|
|
473
|
+
_t, name, _s, c1, # type: ignore
|
|
474
|
+
_sniffer._pick_instruments(instruments, _s) if _sniffer._is_signal(c0) else instruments,
|
|
475
|
+
exchange, capital, basic_currency, commissions,
|
|
476
|
+
signal_timeframe, accurate_stop_orders_execution
|
|
477
|
+
)
|
|
478
|
+
)
|
|
479
|
+
else:
|
|
480
|
+
for j, s in enumerate(configs):
|
|
481
|
+
r.extend(
|
|
482
|
+
recognize_simulation_configuration(
|
|
483
|
+
# name + "/" + str(j), s, instruments, exchange, capital, basic_currency, commissions
|
|
484
|
+
name, s, instruments, exchange, capital, basic_currency, commissions, # type: ignore
|
|
485
|
+
signal_timeframe, accurate_stop_orders_execution
|
|
486
|
+
)
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
elif _sniffer._is_strategy(configs):
|
|
490
|
+
r.append(
|
|
491
|
+
SimulationSetup(
|
|
492
|
+
SetupTypes.STRATEGY,
|
|
493
|
+
name, configs, None, instruments,
|
|
494
|
+
exchange, capital, basic_currency, commissions,
|
|
495
|
+
signal_timeframe, accurate_stop_orders_execution
|
|
496
|
+
)
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
elif _sniffer._is_signal(configs):
|
|
500
|
+
# - check structure of signals
|
|
501
|
+
c1 = _sniffer._check_signals_structure(instruments, configs) # type: ignore
|
|
502
|
+
r.append(
|
|
503
|
+
SimulationSetup(
|
|
504
|
+
SetupTypes.SIGNAL,
|
|
505
|
+
name, c1, None, _sniffer._pick_instruments(instruments, c1),
|
|
506
|
+
exchange, capital, basic_currency, commissions,
|
|
507
|
+
signal_timeframe, accurate_stop_orders_execution
|
|
508
|
+
)
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
# fmt: on
|
|
512
|
+
return r
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
def _get_default_warmup_period(base_subscription: str, in_timeframe: pd.Timedelta | None) -> pd.Timedelta:
|
|
516
|
+
if in_timeframe is None or base_subscription in [DataType.QUOTE, DataType.TRADE, DataType.ORDERBOOK]:
|
|
517
|
+
return pd.Timedelta("1Min")
|
|
518
|
+
|
|
519
|
+
if in_timeframe < pd.Timedelta("1h"):
|
|
520
|
+
return 5 * in_timeframe
|
|
521
|
+
|
|
522
|
+
return 2 * in_timeframe
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def _detect_defaults_from_subscriptions(
|
|
526
|
+
requests: dict[str, tuple[str, DataReader]], open_close_time_indent_secs: int
|
|
527
|
+
) -> SimulationDataConfig:
|
|
528
|
+
def _tf(x):
|
|
529
|
+
_p = DataType.from_str(x)[1]
|
|
530
|
+
return pd.Timedelta(_p["timeframe"]) if "timeframe" in _p else None
|
|
531
|
+
|
|
532
|
+
_base_subscr = None
|
|
533
|
+
_t_readers = {}
|
|
534
|
+
_in_base_tf = None
|
|
535
|
+
_out_tf = None
|
|
536
|
+
|
|
537
|
+
_has_in_qts = False
|
|
538
|
+
_has_in_trd = False
|
|
539
|
+
_has_in_ohlc = False
|
|
540
|
+
_has_out_trd = False
|
|
541
|
+
_has_out_qts = False
|
|
542
|
+
|
|
543
|
+
for _t, (_src, _r) in requests.items():
|
|
544
|
+
_has_in_ohlc |= _src == DataType.OHLC
|
|
545
|
+
_has_in_qts |= _src == DataType.QUOTE
|
|
546
|
+
_has_in_trd |= _src == DataType.TRADE
|
|
547
|
+
_has_out_trd |= _t == DataType.TRADE
|
|
548
|
+
_has_out_qts |= _t == DataType.QUOTE
|
|
549
|
+
|
|
550
|
+
match _t, _src:
|
|
551
|
+
case (DataType.OHLC, DataType.OHLC):
|
|
552
|
+
_t_readers[DataType.OHLC] = _r
|
|
553
|
+
_out_tf = _tf(_t)
|
|
554
|
+
_in_base_tf = _tf(_src)
|
|
555
|
+
|
|
556
|
+
if not _in_base_tf:
|
|
557
|
+
SimulationConfigError(f"ohlc data specified for {_src} but it's timeframe was not detected")
|
|
558
|
+
|
|
559
|
+
if not _out_tf:
|
|
560
|
+
_out_tf = _in_base_tf
|
|
561
|
+
|
|
562
|
+
assert _out_tf and _in_base_tf
|
|
563
|
+
if _in_base_tf > _out_tf:
|
|
564
|
+
logger.warning(
|
|
565
|
+
f"Can't produce OHLC {_out_tf} data from provided {_in_base_tf} timeframe, reduce to {_in_base_tf}"
|
|
566
|
+
)
|
|
567
|
+
_out_tf = _in_base_tf
|
|
568
|
+
|
|
569
|
+
_base_subscr = _src
|
|
570
|
+
|
|
571
|
+
case (DataType.OHLC, DataType.QUOTE) | (DataType.OHLC, DataType.TRADE):
|
|
572
|
+
_t_readers[DataType.OHLC] = _r
|
|
573
|
+
_out_tf = _tf(_t)
|
|
574
|
+
_base_subscr = _src
|
|
575
|
+
if _out_tf is None:
|
|
576
|
+
raise SimulationConfigError(f"ohlc output data timeframe is not specified for {_t}")
|
|
577
|
+
|
|
578
|
+
case (DataType.QUOTE, DataType.OHLC):
|
|
579
|
+
_t_readers[DataType.OHLC_QUOTES] = _r
|
|
580
|
+
_in_base_tf = _tf(_src)
|
|
581
|
+
|
|
582
|
+
case (DataType.TRADE, DataType.OHLC):
|
|
583
|
+
_t_readers[DataType.OHLC_TRADES] = _r
|
|
584
|
+
_in_base_tf = _tf(_src)
|
|
585
|
+
|
|
586
|
+
case (_, _):
|
|
587
|
+
_t_readers[_t] = _r
|
|
588
|
+
|
|
589
|
+
if not _base_subscr:
|
|
590
|
+
if _has_in_qts: # it has input quotes - so base subscription is quotes
|
|
591
|
+
_base_subscr = DataType.QUOTE
|
|
592
|
+
|
|
593
|
+
elif _has_in_trd: # it has input trades - so base subscription is trades
|
|
594
|
+
_base_subscr = DataType.TRADE
|
|
595
|
+
|
|
596
|
+
elif _has_in_ohlc: # it has input ohlc - let's generate quotes from this ohlc
|
|
597
|
+
_out_tf = _in_base_tf
|
|
598
|
+
|
|
599
|
+
if _has_out_trd:
|
|
600
|
+
_base_subscr = DataType.OHLC_TRADES
|
|
601
|
+
|
|
602
|
+
if _has_out_qts:
|
|
603
|
+
_base_subscr = DataType.OHLC_QUOTES
|
|
604
|
+
|
|
605
|
+
if not _base_subscr:
|
|
606
|
+
raise SimulationConfigError("Can't detect base subscription in provided data specification")
|
|
607
|
+
|
|
608
|
+
_default_trigger_schedule = "" # default trigger on every event
|
|
609
|
+
adj_open_close_time_indent_secs = open_close_time_indent_secs
|
|
610
|
+
if _out_tf:
|
|
611
|
+
_default_trigger_schedule = timedelta_to_crontab(_out_tf_tdelta := pd.Timedelta(_out_tf))
|
|
612
|
+
|
|
613
|
+
# - if strategy doesn't set it's own schedule then this default trigger schedule would be used for triggering on_event() method.
|
|
614
|
+
# - In this case we want that last price update was arrived before this trigger's time to have
|
|
615
|
+
# - most recent market data
|
|
616
|
+
adj_open_close_time_indent_secs = _adjust_open_close_time_indent_secs(
|
|
617
|
+
_out_tf_tdelta, open_close_time_indent_secs
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
# - default warmups
|
|
621
|
+
_warmups = {str(_base_subscr): time_delta_to_str(_get_default_warmup_period(_base_subscr, _in_base_tf).asm8.item())}
|
|
622
|
+
|
|
623
|
+
return SimulationDataConfig(
|
|
624
|
+
_default_trigger_schedule,
|
|
625
|
+
_base_subscr,
|
|
626
|
+
_t_readers,
|
|
627
|
+
_warmups,
|
|
628
|
+
open_close_time_indent_secs,
|
|
629
|
+
adj_open_close_time_indent_secs,
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def _adjust_open_close_time_indent_secs(timeframe: pd.Timedelta, original_indent_secs: int) -> int:
|
|
634
|
+
# - if it triggers at daily+ bar let's assume this bar is 'closed' 5 min before exact closing time
|
|
635
|
+
if timeframe >= pd.Timedelta("1d"):
|
|
636
|
+
return max(original_indent_secs, 5 * 60)
|
|
637
|
+
|
|
638
|
+
# - if it triggers at 1Min+ bar let's assume this bar is 'closed' 5 sec before exact closing time
|
|
639
|
+
if timeframe >= pd.Timedelta("1min"):
|
|
640
|
+
return max(original_indent_secs, 5)
|
|
641
|
+
|
|
642
|
+
# - for all sub-minute timeframes just use 1 sec shift
|
|
643
|
+
if timeframe > pd.Timedelta("1s"):
|
|
644
|
+
return max(original_indent_secs, 1)
|
|
645
|
+
|
|
646
|
+
# - for rest just keep original indent
|
|
647
|
+
return original_indent_secs
|
|
648
|
+
|
|
649
|
+
|
|
650
|
+
def _is_transformable(_dest: str, _src: str) -> bool:
|
|
651
|
+
match _dest:
|
|
652
|
+
case DataType.OHLC:
|
|
653
|
+
return _src in [DataType.OHLC, DataType.QUOTE, DataType.TRADE]
|
|
654
|
+
|
|
655
|
+
case DataType.QUOTE:
|
|
656
|
+
return _src in [DataType.OHLC, DataType.QUOTE]
|
|
657
|
+
|
|
658
|
+
case DataType.TRADE:
|
|
659
|
+
return _src in [DataType.OHLC, DataType.TRADE]
|
|
660
|
+
|
|
661
|
+
return True
|
|
662
|
+
|
|
663
|
+
|
|
664
|
+
def recognize_simulation_data_config(
|
|
665
|
+
decls: DataDecls_t,
|
|
666
|
+
instruments: list[Instrument],
|
|
667
|
+
exchange: str,
|
|
668
|
+
open_close_time_indent_secs: int = 1,
|
|
669
|
+
aux_data: DataReader | None = None,
|
|
670
|
+
) -> SimulationDataConfig:
|
|
671
|
+
"""
|
|
672
|
+
Recognizes and configures simulation data based on the provided declarations.
|
|
673
|
+
|
|
674
|
+
This function processes the given data declarations and determines the appropriate
|
|
675
|
+
data readers and configurations for simulation. It supports various data types and
|
|
676
|
+
structures, including DataReaders, pandas DataFrames, and dictionaries.
|
|
677
|
+
|
|
678
|
+
Parameters:
|
|
679
|
+
- decls (DataDecls_t): The data declarations for the simulation. Can be a DataReader,
|
|
680
|
+
pandas DataFrame, or a dictionary of these.
|
|
681
|
+
- instruments (list[Instrument]): List of available instruments for the simulation.
|
|
682
|
+
- exchange (str): The name of the exchange to be used.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
- tuple[str, str, dict[str, DataReader]]: A tuple containing the default trigger schedule,
|
|
686
|
+
the base subscription type, and a dictionary of available subscription types with
|
|
687
|
+
their corresponding DataReaders.
|
|
688
|
+
|
|
689
|
+
Raises:
|
|
690
|
+
- SimulationConfigError: If the data provider type is unsupported or if a requested data type
|
|
691
|
+
cannot be produced from the supported data type.
|
|
692
|
+
"""
|
|
693
|
+
sniffer = _StructureSniffer()
|
|
694
|
+
_requested_types = []
|
|
695
|
+
_requests = {}
|
|
696
|
+
exchange = exchange.upper()
|
|
697
|
+
|
|
698
|
+
match decls:
|
|
699
|
+
case DataReader():
|
|
700
|
+
_supported_data_type = sniffer._sniff_reader(f"{exchange}:{instruments[0].symbol}", decls, None)
|
|
701
|
+
_available_symbols = decls.get_symbols(exchange, DataType.from_str(_supported_data_type)[0])
|
|
702
|
+
_requests[_supported_data_type] = (_supported_data_type, decls)
|
|
703
|
+
|
|
704
|
+
case pd.DataFrame():
|
|
705
|
+
_supported_data_type = sniffer._sniff_pandas(decls)
|
|
706
|
+
_reader = InMemoryDataFrameReader(decls, exchange)
|
|
707
|
+
_available_symbols = _reader.get_symbols(exchange, DataType.from_str(_supported_data_type)[0])
|
|
708
|
+
_requests[_supported_data_type] = (_supported_data_type, _reader)
|
|
709
|
+
|
|
710
|
+
case dict():
|
|
711
|
+
_is_dict_of_pandas = False
|
|
712
|
+
|
|
713
|
+
for _requested_type, _provider in decls.items():
|
|
714
|
+
# - if we already have this type declared, skip it#-
|
|
715
|
+
# - it prevents to have duplicated ohlc (and potentially other data types with parametrization)#-
|
|
716
|
+
_t = DataType.from_str(_requested_type)[0]
|
|
717
|
+
if _t != DataType.NONE and _t in _requested_types:
|
|
718
|
+
raise SimulationConfigError(f"Type {_t} already declared")
|
|
719
|
+
|
|
720
|
+
_requested_types.append(_t)
|
|
721
|
+
|
|
722
|
+
match _provider:
|
|
723
|
+
case DataReader():
|
|
724
|
+
_supported_data_type = sniffer._sniff_reader(
|
|
725
|
+
f"{exchange}:{instruments[0].symbol}", _provider, _requested_type
|
|
726
|
+
)
|
|
727
|
+
_available_symbols = _provider.get_symbols(exchange, DataType.from_str(_supported_data_type)[0])
|
|
728
|
+
_requests[_requested_type] = (_supported_data_type, _provider)
|
|
729
|
+
if not _is_transformable(_requested_type, _supported_data_type):
|
|
730
|
+
raise SimulationConfigError(f"Can't produce {_requested_type} from {_supported_data_type}")
|
|
731
|
+
|
|
732
|
+
case dict():
|
|
733
|
+
try:
|
|
734
|
+
_reader = InMemoryDataFrameReader(_provider, exchange)
|
|
735
|
+
_available_symbols = _reader.get_symbols(exchange, None)
|
|
736
|
+
_supported_data_type = sniffer._sniff_reader(
|
|
737
|
+
_available_symbols[0], _reader, _requested_type
|
|
738
|
+
)
|
|
739
|
+
_requests[_requested_type] = (_supported_data_type, _reader)
|
|
740
|
+
if not _is_transformable(_requested_type, _supported_data_type):
|
|
741
|
+
raise SimulationConfigError(
|
|
742
|
+
f"Can't produce {_requested_type} from {_supported_data_type}"
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
except Exception as e:
|
|
746
|
+
raise SimulationConfigError(
|
|
747
|
+
f"Error in declared data provider for: {_requested_type} -> {type(_provider)} ({str(e)})"
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
case pd.DataFrame():
|
|
751
|
+
_is_dict_of_pandas = True
|
|
752
|
+
break
|
|
753
|
+
|
|
754
|
+
case _:
|
|
755
|
+
raise SimulationConfigError(f"Unsupported data provider type: {type(_provider)}")
|
|
756
|
+
|
|
757
|
+
if _is_dict_of_pandas:
|
|
758
|
+
try:
|
|
759
|
+
_reader = InMemoryDataFrameReader(decls, exchange)
|
|
760
|
+
_available_symbols = _reader.get_symbols(exchange, None)
|
|
761
|
+
_supported_data_type = sniffer._sniff_reader(_available_symbols[0], _reader, _requested_type)
|
|
762
|
+
_requests[DataType.OHLC] = (_supported_data_type, _reader)
|
|
763
|
+
if not _is_transformable(_requested_type, _supported_data_type):
|
|
764
|
+
raise SimulationConfigError(f"Can't produce {_requested_type} from {_supported_data_type}")
|
|
765
|
+
|
|
766
|
+
except Exception as e:
|
|
767
|
+
raise SimulationConfigError(
|
|
768
|
+
f"Error in declared data provider for: {_requested_type} -> {type(_provider)} ({str(e)})"
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
case _:
|
|
772
|
+
raise SimulationConfigError(f"Can't recognize declared data provider: {type(decls)}")
|
|
773
|
+
|
|
774
|
+
# detect setup's defaults from declared data
|
|
775
|
+
_setup_defaults = _detect_defaults_from_subscriptions(_requests, open_close_time_indent_secs)
|
|
776
|
+
|
|
777
|
+
# - just pass it to config, TODO: we need to think how to handle auxiliary data provider better
|
|
778
|
+
_setup_defaults.aux_data_provider = aux_data # type: ignore
|
|
779
|
+
|
|
780
|
+
return _setup_defaults
|