Qubx 0.6.23__cp312-cp312-manylinux_2_39_x86_64.whl → 0.6.24__cp312-cp312-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/backtester/data.py +5 -134
- qubx/backtester/runner.py +252 -47
- qubx/backtester/simulator.py +13 -22
- qubx/backtester/utils.py +27 -39
- qubx/connectors/ccxt/account.py +4 -4
- qubx/connectors/ccxt/data.py +93 -18
- qubx/connectors/ccxt/exchanges/__init__.py +5 -1
- qubx/connectors/ccxt/exchanges/binance/exchange.py +1 -0
- qubx/connectors/ccxt/exchanges/bitfinex/bitfinex.py +43 -0
- qubx/connectors/ccxt/exchanges/kraken/kraken.py +14 -0
- qubx/connectors/ccxt/utils.py +20 -6
- qubx/connectors/tardis/data.py +733 -0
- qubx/connectors/tardis/utils.py +249 -0
- qubx/core/account.py +206 -20
- qubx/core/basics.py +0 -9
- qubx/core/context.py +55 -53
- qubx/core/interfaces.py +38 -36
- qubx/core/lookups.py +129 -18
- qubx/core/metrics.py +14 -11
- qubx/core/mixins/market.py +24 -9
- qubx/core/mixins/subscription.py +58 -28
- qubx/core/mixins/trading.py +35 -31
- qubx/core/mixins/universe.py +0 -20
- qubx/core/series.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/series.pyx +1 -1
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/data/helpers.py +1 -1
- qubx/data/tardis.py +0 -1
- qubx/restorers/state.py +2 -0
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/utils/runner/accounts.py +0 -1
- qubx/utils/runner/configs.py +8 -0
- qubx/utils/runner/runner.py +36 -15
- {qubx-0.6.23.dist-info → qubx-0.6.24.dist-info}/METADATA +1 -1
- {qubx-0.6.23.dist-info → qubx-0.6.24.dist-info}/RECORD +38 -34
- {qubx-0.6.23.dist-info → qubx-0.6.24.dist-info}/LICENSE +0 -0
- {qubx-0.6.23.dist-info → qubx-0.6.24.dist-info}/WHEEL +0 -0
- {qubx-0.6.23.dist-info → qubx-0.6.24.dist-info}/entry_points.txt +0 -0
qubx/backtester/data.py
CHANGED
|
@@ -1,9 +1,6 @@
|
|
|
1
1
|
from collections import defaultdict
|
|
2
|
-
from typing import Any
|
|
3
2
|
|
|
4
|
-
import numpy as np
|
|
5
3
|
import pandas as pd
|
|
6
|
-
from tqdm.auto import tqdm
|
|
7
4
|
|
|
8
5
|
from qubx import logger
|
|
9
6
|
from qubx.backtester.simulated_data import IterableSimulationData
|
|
@@ -13,7 +10,6 @@ from qubx.core.basics import (
|
|
|
13
10
|
Instrument,
|
|
14
11
|
TimestampedDict,
|
|
15
12
|
)
|
|
16
|
-
from qubx.core.exceptions import SimulationError
|
|
17
13
|
from qubx.core.helpers import BasicScheduler
|
|
18
14
|
from qubx.core.interfaces import IDataProvider
|
|
19
15
|
from qubx.core.series import Bar, Quote, time_as_nsec
|
|
@@ -32,8 +28,6 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
32
28
|
_account: SimulatedAccountProcessor
|
|
33
29
|
_last_quotes: dict[Instrument, Quote | None]
|
|
34
30
|
_readers: dict[str, DataReader]
|
|
35
|
-
_pregenerated_signals: dict[Instrument, pd.Series | pd.DataFrame]
|
|
36
|
-
_to_process: dict[Instrument, list]
|
|
37
31
|
_data_source: IterableSimulationData
|
|
38
32
|
_open_close_time_indent_ns: int
|
|
39
33
|
|
|
@@ -45,6 +39,7 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
45
39
|
time_provider: SimulatedTimeProvider,
|
|
46
40
|
account: SimulatedAccountProcessor,
|
|
47
41
|
readers: dict[str, DataReader],
|
|
42
|
+
data_source: IterableSimulationData,
|
|
48
43
|
open_close_time_indent_secs=1,
|
|
49
44
|
):
|
|
50
45
|
self.channel = channel
|
|
@@ -54,79 +49,14 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
54
49
|
self._account = account
|
|
55
50
|
self._readers = readers
|
|
56
51
|
|
|
57
|
-
# - create exchange's instance
|
|
58
|
-
self._last_quotes = defaultdict(lambda: None)
|
|
59
|
-
|
|
60
|
-
# - pregenerated signals storage
|
|
61
|
-
self._pregenerated_signals = dict()
|
|
62
|
-
self._to_process = {}
|
|
63
|
-
|
|
64
52
|
# - simulation data source
|
|
65
|
-
self._data_source =
|
|
66
|
-
self._readers, open_close_time_indent_secs=open_close_time_indent_secs
|
|
67
|
-
)
|
|
53
|
+
self._data_source = data_source
|
|
68
54
|
self._open_close_time_indent_ns = open_close_time_indent_secs * 1_000_000_000 # convert seconds to nanoseconds
|
|
69
55
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def run(
|
|
73
|
-
self,
|
|
74
|
-
start: str | pd.Timestamp,
|
|
75
|
-
end: str | pd.Timestamp,
|
|
76
|
-
silent: bool = False,
|
|
77
|
-
) -> None:
|
|
78
|
-
logger.info(f"{self.__class__.__name__} ::: Simulation started at {start} :::")
|
|
79
|
-
|
|
80
|
-
if self._pregenerated_signals:
|
|
81
|
-
self._prepare_generated_signals(start, end)
|
|
82
|
-
_run = self._process_generated_signals
|
|
83
|
-
else:
|
|
84
|
-
_run = self._process_strategy
|
|
85
|
-
|
|
86
|
-
start, end = pd.Timestamp(start), pd.Timestamp(end)
|
|
87
|
-
total_duration = end - start
|
|
88
|
-
update_delta = total_duration / 100
|
|
89
|
-
prev_dt = pd.Timestamp(start)
|
|
90
|
-
|
|
91
|
-
# - date iteration
|
|
92
|
-
qiter = self._data_source.create_iterable(start, end)
|
|
93
|
-
if silent:
|
|
94
|
-
for instrument, data_type, event, is_hist in qiter:
|
|
95
|
-
if not _run(instrument, data_type, event, is_hist):
|
|
96
|
-
break
|
|
97
|
-
else:
|
|
98
|
-
_p = 0
|
|
99
|
-
with tqdm(total=100, desc="Simulating", unit="%", leave=False) as pbar:
|
|
100
|
-
for instrument, data_type, event, is_hist in qiter:
|
|
101
|
-
if not _run(instrument, data_type, event, is_hist):
|
|
102
|
-
break
|
|
103
|
-
dt = pd.Timestamp(event.time)
|
|
104
|
-
# update only if date has changed
|
|
105
|
-
if dt - prev_dt > update_delta:
|
|
106
|
-
_p += 1
|
|
107
|
-
pbar.n = _p
|
|
108
|
-
pbar.refresh()
|
|
109
|
-
prev_dt = dt
|
|
110
|
-
pbar.n = 100
|
|
111
|
-
pbar.refresh()
|
|
112
|
-
|
|
113
|
-
logger.info(f"{self.__class__.__name__} ::: Simulation finished at {end} :::")
|
|
114
|
-
|
|
115
|
-
def set_generated_signals(self, signals: pd.Series | pd.DataFrame):
|
|
116
|
-
logger.debug(
|
|
117
|
-
f"[<y>{self.__class__.__name__}</y>] :: Using pre-generated signals:\n {str(signals.count()).strip('ndtype: int64')}"
|
|
118
|
-
)
|
|
119
|
-
# - sanity check
|
|
120
|
-
signals.index = pd.DatetimeIndex(signals.index)
|
|
121
|
-
|
|
122
|
-
if isinstance(signals, pd.Series):
|
|
123
|
-
self._pregenerated_signals[str(signals.name)] = signals # type: ignore
|
|
56
|
+
# - create exchange's instance
|
|
57
|
+
self._last_quotes = defaultdict(lambda: None)
|
|
124
58
|
|
|
125
|
-
|
|
126
|
-
for col in signals.columns:
|
|
127
|
-
self._pregenerated_signals[col] = signals[col] # type: ignore
|
|
128
|
-
else:
|
|
129
|
-
raise ValueError("Invalid signals or strategy configuration")
|
|
59
|
+
logger.info(f"{self.__class__.__name__}.{exchange_id} is initialized")
|
|
130
60
|
|
|
131
61
|
@property
|
|
132
62
|
def is_simulation(self) -> bool:
|
|
@@ -200,26 +130,6 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
200
130
|
def close(self):
|
|
201
131
|
pass
|
|
202
132
|
|
|
203
|
-
def _prepare_generated_signals(self, start: str | pd.Timestamp, end: str | pd.Timestamp):
|
|
204
|
-
for s, v in self._pregenerated_signals.items():
|
|
205
|
-
_s_inst = None
|
|
206
|
-
|
|
207
|
-
for i in self.get_subscribed_instruments():
|
|
208
|
-
# - we can process series with variable id's if we can find some similar instrument
|
|
209
|
-
if s == i.symbol or s == str(i) or s == f"{i.exchange}:{i.symbol}" or str(s) == str(i):
|
|
210
|
-
_start, _end = pd.Timestamp(start), pd.Timestamp(end)
|
|
211
|
-
_start_idx, _end_idx = v.index.get_indexer([_start, _end], method="ffill")
|
|
212
|
-
sel = v.iloc[max(_start_idx, 0) : _end_idx + 1]
|
|
213
|
-
|
|
214
|
-
# TODO: check if data has exec_price - it means we have deals
|
|
215
|
-
self._to_process[i] = list(zip(sel.index, sel.values))
|
|
216
|
-
_s_inst = i
|
|
217
|
-
break
|
|
218
|
-
|
|
219
|
-
if _s_inst is None:
|
|
220
|
-
logger.error(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
221
|
-
raise SimulationError(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
222
|
-
|
|
223
133
|
def _convert_records_to_bars(
|
|
224
134
|
self, records: list[TimestampedDict], cut_time_ns: int, timeframe_ns: int
|
|
225
135
|
) -> list[Bar]:
|
|
@@ -252,44 +162,5 @@ class SimulatedDataProvider(IDataProvider):
|
|
|
252
162
|
|
|
253
163
|
return bars
|
|
254
164
|
|
|
255
|
-
def _process_generated_signals(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
256
|
-
cc = self.channel
|
|
257
|
-
t = np.datetime64(data.time, "ns")
|
|
258
|
-
|
|
259
|
-
if not is_hist:
|
|
260
|
-
# - signals for this instrument
|
|
261
|
-
sigs = self._to_process[instrument]
|
|
262
|
-
|
|
263
|
-
while sigs and t >= (_signal_time := sigs[0][0].as_unit("ns").asm8):
|
|
264
|
-
self.time_provider.set_time(_signal_time)
|
|
265
|
-
cc.send((instrument, "event", {"order": sigs[0][1]}, False))
|
|
266
|
-
sigs.pop(0)
|
|
267
|
-
|
|
268
|
-
if q := self._account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
269
|
-
self._last_quotes[instrument] = q
|
|
270
|
-
|
|
271
|
-
self.time_provider.set_time(t)
|
|
272
|
-
cc.send((instrument, data_type, data, is_hist))
|
|
273
|
-
|
|
274
|
-
return cc.control.is_set()
|
|
275
|
-
|
|
276
|
-
def _process_strategy(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
277
|
-
cc = self.channel
|
|
278
|
-
t = np.datetime64(data.time, "ns")
|
|
279
|
-
|
|
280
|
-
if not is_hist:
|
|
281
|
-
if t >= (_next_exp_time := self._scheduler.next_expected_event_time()):
|
|
282
|
-
# - we use exact event's time
|
|
283
|
-
self.time_provider.set_time(_next_exp_time)
|
|
284
|
-
self._scheduler.check_and_run_tasks()
|
|
285
|
-
|
|
286
|
-
if q := self._account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
287
|
-
self._last_quotes[instrument] = q
|
|
288
|
-
|
|
289
|
-
self.time_provider.set_time(t)
|
|
290
|
-
cc.send((instrument, data_type, data, is_hist))
|
|
291
|
-
|
|
292
|
-
return cc.control.is_set()
|
|
293
|
-
|
|
294
165
|
def exchange(self) -> str:
|
|
295
166
|
return self._exchange_id.upper()
|
qubx/backtester/runner.py
CHANGED
|
@@ -2,14 +2,24 @@ from typing import Any
|
|
|
2
2
|
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
5
|
+
from tqdm.auto import tqdm
|
|
5
6
|
|
|
6
7
|
from qubx import logger
|
|
7
|
-
from qubx.
|
|
8
|
+
from qubx.backtester.simulated_data import IterableSimulationData
|
|
9
|
+
from qubx.core.account import CompositeAccountProcessor
|
|
10
|
+
from qubx.core.basics import SW, DataType, Instrument, TransactionCostsCalculator
|
|
8
11
|
from qubx.core.context import StrategyContext
|
|
9
12
|
from qubx.core.exceptions import SimulationConfigError, SimulationError
|
|
10
13
|
from qubx.core.helpers import extract_parameters_from_object, full_qualified_class_name
|
|
11
14
|
from qubx.core.initializer import BasicStrategyInitializer
|
|
12
|
-
from qubx.core.interfaces import
|
|
15
|
+
from qubx.core.interfaces import (
|
|
16
|
+
CtrlChannel,
|
|
17
|
+
IMetricEmitter,
|
|
18
|
+
IStrategy,
|
|
19
|
+
IStrategyContext,
|
|
20
|
+
ITimeProvider,
|
|
21
|
+
StrategyState,
|
|
22
|
+
)
|
|
13
23
|
from qubx.core.loggers import InMemoryLogsWriter, StrategyLogging
|
|
14
24
|
from qubx.core.lookups import lookup
|
|
15
25
|
from qubx.pandaz.utils import _frame_to_str
|
|
@@ -42,15 +52,22 @@ class SimulationRunner:
|
|
|
42
52
|
account_id: str
|
|
43
53
|
portfolio_log_freq: str
|
|
44
54
|
ctx: IStrategyContext
|
|
45
|
-
data_provider: SimulatedDataProvider
|
|
46
55
|
logs_writer: InMemoryLogsWriter
|
|
47
56
|
|
|
57
|
+
account: CompositeAccountProcessor
|
|
58
|
+
channel: CtrlChannel
|
|
59
|
+
time_provider: SimulatedTimeProvider
|
|
60
|
+
scheduler: SimulatedScheduler
|
|
48
61
|
strategy_params: dict[str, Any]
|
|
49
62
|
strategy_class: str
|
|
50
63
|
|
|
51
64
|
# adjusted times
|
|
52
65
|
_stop: pd.Timestamp | None = None
|
|
53
66
|
|
|
67
|
+
_data_source: IterableSimulationData
|
|
68
|
+
_data_providers: list[SimulatedDataProvider]
|
|
69
|
+
_exchange_to_data_provider: dict[str, SimulatedDataProvider]
|
|
70
|
+
|
|
54
71
|
def __init__(
|
|
55
72
|
self,
|
|
56
73
|
setup: SimulationSetup,
|
|
@@ -84,7 +101,8 @@ class SimulationRunner:
|
|
|
84
101
|
self.emitter = emitter
|
|
85
102
|
self.strategy_state = strategy_state if strategy_state is not None else StrategyState()
|
|
86
103
|
self.initializer = initializer
|
|
87
|
-
self.
|
|
104
|
+
self._pregenerated_signals = dict()
|
|
105
|
+
self._to_process = {}
|
|
88
106
|
|
|
89
107
|
# - get strategy parameters BEFORE simulation start
|
|
90
108
|
# potentially strategy may change it's parameters during simulation
|
|
@@ -94,6 +112,8 @@ class SimulationRunner:
|
|
|
94
112
|
self.strategy_params = extract_parameters_from_object(self.setup.generator)
|
|
95
113
|
self.strategy_class = full_qualified_class_name(self.setup.generator)
|
|
96
114
|
|
|
115
|
+
self.ctx = self._create_backtest_context()
|
|
116
|
+
|
|
97
117
|
def run(self, silent: bool = False, catch_keyboard_interrupt: bool = True, close_data_readers: bool = False):
|
|
98
118
|
"""
|
|
99
119
|
Run the backtest from start to stop.
|
|
@@ -138,7 +158,7 @@ class SimulationRunner:
|
|
|
138
158
|
stop = self._stop or self.stop
|
|
139
159
|
|
|
140
160
|
try:
|
|
141
|
-
self.
|
|
161
|
+
self._run(self.start, stop, silent=silent)
|
|
142
162
|
except KeyboardInterrupt:
|
|
143
163
|
logger.error("Simulated trading interrupted by user!")
|
|
144
164
|
if not catch_keyboard_interrupt:
|
|
@@ -147,10 +167,131 @@ class SimulationRunner:
|
|
|
147
167
|
# Stop the context
|
|
148
168
|
self.ctx.stop()
|
|
149
169
|
if close_data_readers:
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
170
|
+
for dp in self._data_providers:
|
|
171
|
+
for reader in dp._readers.values():
|
|
172
|
+
if hasattr(reader, "close"):
|
|
173
|
+
reader.close() # type: ignore
|
|
174
|
+
|
|
175
|
+
def _set_generated_signals(self, signals: pd.Series | pd.DataFrame):
|
|
176
|
+
logger.debug(
|
|
177
|
+
f"[<y>{self.__class__.__name__}</y>] :: Using pre-generated signals:\n {str(signals.count()).strip('ndtype: int64')}"
|
|
178
|
+
)
|
|
179
|
+
# - sanity check
|
|
180
|
+
signals.index = pd.DatetimeIndex(signals.index)
|
|
181
|
+
|
|
182
|
+
if isinstance(signals, pd.Series):
|
|
183
|
+
self._pregenerated_signals[str(signals.name)] = signals # type: ignore
|
|
184
|
+
|
|
185
|
+
elif isinstance(signals, pd.DataFrame):
|
|
186
|
+
for col in signals.columns:
|
|
187
|
+
self._pregenerated_signals[col] = signals[col] # type: ignore
|
|
188
|
+
else:
|
|
189
|
+
raise ValueError("Invalid signals or strategy configuration")
|
|
190
|
+
|
|
191
|
+
def _prepare_generated_signals(self, start: str | pd.Timestamp, end: str | pd.Timestamp):
|
|
192
|
+
for s, v in self._pregenerated_signals.items():
|
|
193
|
+
_s_inst = None
|
|
194
|
+
|
|
195
|
+
for i in self._data_providers[0].get_subscribed_instruments():
|
|
196
|
+
# - we can process series with variable id's if we can find some similar instrument
|
|
197
|
+
if s == i.symbol or s == str(i) or s == f"{i.exchange}:{i.symbol}" or str(s) == str(i):
|
|
198
|
+
_start, _end = pd.Timestamp(start), pd.Timestamp(end)
|
|
199
|
+
_start_idx, _end_idx = v.index.get_indexer([_start, _end], method="ffill")
|
|
200
|
+
sel = v.iloc[max(_start_idx, 0) : _end_idx + 1]
|
|
201
|
+
|
|
202
|
+
# TODO: check if data has exec_price - it means we have deals
|
|
203
|
+
self._to_process[i] = list(zip(sel.index, sel.values))
|
|
204
|
+
_s_inst = i
|
|
205
|
+
break
|
|
206
|
+
|
|
207
|
+
if _s_inst is None:
|
|
208
|
+
logger.error(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
209
|
+
raise SimulationError(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
210
|
+
|
|
211
|
+
def _process_generated_signals(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
212
|
+
cc = self.channel
|
|
213
|
+
t = np.datetime64(data.time, "ns")
|
|
214
|
+
_account = self.account.get_account_processor(instrument.exchange)
|
|
215
|
+
_data_provider = self._exchange_to_data_provider[instrument.exchange]
|
|
216
|
+
assert isinstance(_account, SimulatedAccountProcessor)
|
|
217
|
+
assert isinstance(_data_provider, SimulatedDataProvider)
|
|
218
|
+
|
|
219
|
+
if not is_hist:
|
|
220
|
+
# - signals for this instrument
|
|
221
|
+
sigs = self._to_process[instrument]
|
|
222
|
+
|
|
223
|
+
while sigs and t >= (_signal_time := sigs[0][0].as_unit("ns").asm8):
|
|
224
|
+
self.time_provider.set_time(_signal_time)
|
|
225
|
+
cc.send((instrument, "event", {"order": sigs[0][1]}, False))
|
|
226
|
+
sigs.pop(0)
|
|
227
|
+
|
|
228
|
+
if q := _account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
229
|
+
_data_provider._last_quotes[instrument] = q
|
|
230
|
+
|
|
231
|
+
self.time_provider.set_time(t)
|
|
232
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
233
|
+
|
|
234
|
+
return cc.control.is_set()
|
|
235
|
+
|
|
236
|
+
def _process_strategy(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
237
|
+
cc = self.channel
|
|
238
|
+
t = np.datetime64(data.time, "ns")
|
|
239
|
+
_account = self.account.get_account_processor(instrument.exchange)
|
|
240
|
+
_data_provider = self._exchange_to_data_provider[instrument.exchange]
|
|
241
|
+
assert isinstance(_account, SimulatedAccountProcessor)
|
|
242
|
+
assert isinstance(_data_provider, SimulatedDataProvider)
|
|
243
|
+
|
|
244
|
+
if not is_hist:
|
|
245
|
+
if t >= (_next_exp_time := self.scheduler.next_expected_event_time()):
|
|
246
|
+
# - we use exact event's time
|
|
247
|
+
self.time_provider.set_time(_next_exp_time)
|
|
248
|
+
self.scheduler.check_and_run_tasks()
|
|
249
|
+
|
|
250
|
+
if q := _account._exchange.emulate_quote_from_data(instrument, t, data):
|
|
251
|
+
_data_provider._last_quotes[instrument] = q
|
|
252
|
+
|
|
253
|
+
self.time_provider.set_time(t)
|
|
254
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
255
|
+
|
|
256
|
+
return cc.control.is_set()
|
|
257
|
+
|
|
258
|
+
def _run(self, start: pd.Timestamp, stop: pd.Timestamp, silent: bool = False) -> None:
|
|
259
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation started at {start} :::")
|
|
260
|
+
|
|
261
|
+
if self._pregenerated_signals:
|
|
262
|
+
self._prepare_generated_signals(start, stop)
|
|
263
|
+
_run = self._process_generated_signals
|
|
264
|
+
else:
|
|
265
|
+
_run = self._process_strategy
|
|
266
|
+
|
|
267
|
+
start, stop = pd.Timestamp(start), pd.Timestamp(stop)
|
|
268
|
+
total_duration = stop - start
|
|
269
|
+
update_delta = total_duration / 100
|
|
270
|
+
prev_dt = pd.Timestamp(start)
|
|
271
|
+
|
|
272
|
+
# - date iteration
|
|
273
|
+
qiter = self._data_source.create_iterable(start, stop)
|
|
274
|
+
if silent:
|
|
275
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
276
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
277
|
+
break
|
|
278
|
+
else:
|
|
279
|
+
_p = 0
|
|
280
|
+
with tqdm(total=100, desc="Simulating", unit="%", leave=False) as pbar:
|
|
281
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
282
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
283
|
+
break
|
|
284
|
+
dt = pd.Timestamp(event.time)
|
|
285
|
+
# update only if date has changed
|
|
286
|
+
if dt - prev_dt > update_delta:
|
|
287
|
+
_p += 1
|
|
288
|
+
pbar.n = _p
|
|
289
|
+
pbar.refresh()
|
|
290
|
+
prev_dt = dt
|
|
291
|
+
pbar.n = 100
|
|
292
|
+
pbar.refresh()
|
|
293
|
+
|
|
294
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation finished at {stop} :::")
|
|
154
295
|
|
|
155
296
|
def print_latency_report(self) -> None:
|
|
156
297
|
_l_r = SW.latency_report()
|
|
@@ -164,47 +305,47 @@ class SimulationRunner:
|
|
|
164
305
|
)
|
|
165
306
|
|
|
166
307
|
def _create_backtest_context(self) -> IStrategyContext:
|
|
167
|
-
tcc = lookup.fees.find(self.setup.exchange.lower(), self.setup.commissions)
|
|
168
|
-
if tcc is None:
|
|
169
|
-
raise SimulationConfigError(
|
|
170
|
-
f"Can't find transaction costs calculator for '{self.setup.exchange}' for specification '{self.setup.commissions}' !"
|
|
171
|
-
)
|
|
172
|
-
|
|
173
|
-
channel = SimulatedCtrlChannel("databus", sentinel=(None, None, None, None))
|
|
174
|
-
simulated_clock = SimulatedTimeProvider(np.datetime64(self.start, "ns"))
|
|
175
|
-
|
|
176
308
|
logger.debug(
|
|
177
|
-
f"[<y>
|
|
309
|
+
f"[<y>Simulator</y>] :: Preparing simulated trading on <g>{self.setup.exchanges}</g> "
|
|
310
|
+
f"for {self.setup.capital} {self.setup.base_currency}..."
|
|
178
311
|
)
|
|
179
312
|
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
simulated_exchange = get_simulated_exchange(
|
|
184
|
-
self.setup.exchange, simulated_clock, tcc, self.setup.accurate_stop_orders_execution
|
|
313
|
+
data_source = IterableSimulationData(
|
|
314
|
+
self.data_config.data_providers,
|
|
315
|
+
open_close_time_indent_secs=self.data_config.adjusted_open_close_time_indent_secs,
|
|
185
316
|
)
|
|
186
317
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
initial_capital=self.setup.capital,
|
|
318
|
+
channel = SimulatedCtrlChannel("databus", sentinel=(None, None, None, None))
|
|
319
|
+
simulated_clock = SimulatedTimeProvider(np.datetime64(self.start, "ns"))
|
|
320
|
+
|
|
321
|
+
account = self._construct_account_processor(
|
|
322
|
+
self.setup.exchanges, self.setup.commissions, simulated_clock, channel
|
|
193
323
|
)
|
|
194
|
-
scheduler = SimulatedScheduler(channel, lambda: simulated_clock.time().item())
|
|
195
324
|
|
|
196
|
-
|
|
197
|
-
broker = SimulatedBroker(channel, account, simulated_exchange)
|
|
325
|
+
scheduler = SimulatedScheduler(channel, lambda: simulated_clock.time().item())
|
|
198
326
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
327
|
+
brokers = []
|
|
328
|
+
for exchange in self.setup.exchanges:
|
|
329
|
+
_exchange_account = account.get_account_processor(exchange)
|
|
330
|
+
assert isinstance(_exchange_account, SimulatedAccountProcessor)
|
|
331
|
+
brokers.append(SimulatedBroker(channel, _exchange_account, _exchange_account._exchange))
|
|
332
|
+
|
|
333
|
+
data_providers = []
|
|
334
|
+
for exchange in self.setup.exchanges:
|
|
335
|
+
_exchange_account = account.get_account_processor(exchange)
|
|
336
|
+
assert isinstance(_exchange_account, SimulatedAccountProcessor)
|
|
337
|
+
data_providers.append(
|
|
338
|
+
SimulatedDataProvider(
|
|
339
|
+
exchange_id=exchange,
|
|
340
|
+
channel=channel,
|
|
341
|
+
scheduler=scheduler,
|
|
342
|
+
time_provider=simulated_clock,
|
|
343
|
+
account=_exchange_account,
|
|
344
|
+
readers=self.data_config.data_providers,
|
|
345
|
+
data_source=data_source,
|
|
346
|
+
open_close_time_indent_secs=self.data_config.adjusted_open_close_time_indent_secs,
|
|
347
|
+
)
|
|
348
|
+
)
|
|
208
349
|
|
|
209
350
|
# - get aux data provider
|
|
210
351
|
_aux_data = self.data_config.get_timeguarded_aux_reader(simulated_clock)
|
|
@@ -225,7 +366,10 @@ class SimulationRunner:
|
|
|
225
366
|
|
|
226
367
|
case SetupTypes.SIGNAL:
|
|
227
368
|
strat = SignalsProxy(timeframe=self.setup.signal_timeframe)
|
|
228
|
-
|
|
369
|
+
if len(data_providers) > 1:
|
|
370
|
+
raise SimulationConfigError("Signal setup is not supported for multiple exchanges !")
|
|
371
|
+
|
|
372
|
+
self._set_generated_signals(self.setup.generator) # type: ignore
|
|
229
373
|
|
|
230
374
|
# - we don't need any unexpected triggerings
|
|
231
375
|
self._stop = min(self.setup.generator.index[-1], self.stop) # type: ignore
|
|
@@ -233,7 +377,10 @@ class SimulationRunner:
|
|
|
233
377
|
case SetupTypes.SIGNAL_AND_TRACKER:
|
|
234
378
|
strat = SignalsProxy(timeframe=self.setup.signal_timeframe)
|
|
235
379
|
strat.tracker = lambda ctx: self.setup.tracker
|
|
236
|
-
|
|
380
|
+
if len(data_providers) > 1:
|
|
381
|
+
raise SimulationConfigError("Signal setup is not supported for multiple exchanges !")
|
|
382
|
+
|
|
383
|
+
self._set_generated_signals(self.setup.generator) # type: ignore
|
|
237
384
|
|
|
238
385
|
# - we don't need any unexpected triggerings
|
|
239
386
|
self._stop = min(self.setup.generator.index[-1], self.stop) # type: ignore
|
|
@@ -246,8 +393,8 @@ class SimulationRunner:
|
|
|
246
393
|
|
|
247
394
|
ctx = StrategyContext(
|
|
248
395
|
strategy=strat,
|
|
249
|
-
|
|
250
|
-
|
|
396
|
+
brokers=brokers,
|
|
397
|
+
data_providers=data_providers,
|
|
251
398
|
account=account,
|
|
252
399
|
scheduler=scheduler,
|
|
253
400
|
time_provider=simulated_clock,
|
|
@@ -274,6 +421,64 @@ class SimulationRunner:
|
|
|
274
421
|
logger.debug(f"[<y>simulator</y>] :: Setting default schedule: {self.data_config.default_trigger_schedule}")
|
|
275
422
|
ctx.set_event_schedule(self.data_config.default_trigger_schedule)
|
|
276
423
|
|
|
277
|
-
self.data_provider = data_provider
|
|
278
424
|
self.logs_writer = logs_writer
|
|
425
|
+
self.channel = channel
|
|
426
|
+
self.time_provider = simulated_clock
|
|
427
|
+
self.account = account
|
|
428
|
+
self.scheduler = scheduler
|
|
429
|
+
self._data_source = data_source
|
|
430
|
+
self._data_providers = data_providers
|
|
431
|
+
self._exchange_to_data_provider = {dp.exchange(): dp for dp in data_providers}
|
|
279
432
|
return ctx
|
|
433
|
+
|
|
434
|
+
def _construct_tcc(
|
|
435
|
+
self, exchanges: list[str], commissions: str | dict[str, str | None] | None
|
|
436
|
+
) -> dict[str, TransactionCostsCalculator]:
|
|
437
|
+
_exchange_to_tcc = {}
|
|
438
|
+
if isinstance(commissions, (str, type(None))):
|
|
439
|
+
commissions = {e: commissions for e in exchanges}
|
|
440
|
+
for exchange in exchanges:
|
|
441
|
+
_exchange_to_tcc[exchange] = lookup.fees.find(exchange.lower(), commissions.get(exchange))
|
|
442
|
+
return _exchange_to_tcc
|
|
443
|
+
|
|
444
|
+
def _construct_account_processor(
|
|
445
|
+
self,
|
|
446
|
+
exchanges: list[str],
|
|
447
|
+
commissions: str | dict[str, str | None] | None,
|
|
448
|
+
time_provider: ITimeProvider,
|
|
449
|
+
channel: CtrlChannel,
|
|
450
|
+
) -> CompositeAccountProcessor:
|
|
451
|
+
_exchange_to_tcc = self._construct_tcc(exchanges, commissions)
|
|
452
|
+
for tcc in _exchange_to_tcc.values():
|
|
453
|
+
if tcc is None:
|
|
454
|
+
raise SimulationConfigError(
|
|
455
|
+
f"Can't find transaction costs calculator for '{self.setup.exchanges}' for specification '{self.setup.commissions}' !"
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
_exchange_to_simulated_exchange = {}
|
|
459
|
+
for exchange in self.setup.exchanges:
|
|
460
|
+
# - create simulated exchange:
|
|
461
|
+
# - we can use different emulations of real exchanges features in future here: for Binance, Bybit, InteractiveBrokers, etc.
|
|
462
|
+
# - for now we use simple basic simulated exchange implementation
|
|
463
|
+
_exchange_to_simulated_exchange[exchange] = get_simulated_exchange(
|
|
464
|
+
exchange, time_provider, _exchange_to_tcc[exchange], self.setup.accurate_stop_orders_execution
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
_account_processors = {}
|
|
468
|
+
for exchange in self.setup.exchanges:
|
|
469
|
+
_initial_capital = self.setup.capital
|
|
470
|
+
if isinstance(_initial_capital, dict):
|
|
471
|
+
_initial_capital = _initial_capital[exchange]
|
|
472
|
+
assert isinstance(_initial_capital, (float, int))
|
|
473
|
+
_account_processors[exchange] = SimulatedAccountProcessor(
|
|
474
|
+
account_id=self.account_id,
|
|
475
|
+
exchange=_exchange_to_simulated_exchange[exchange],
|
|
476
|
+
channel=channel,
|
|
477
|
+
base_currency=self.setup.base_currency,
|
|
478
|
+
initial_capital=_initial_capital,
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
return CompositeAccountProcessor(
|
|
482
|
+
time_provider=time_provider,
|
|
483
|
+
account_processors=_account_processors,
|
|
484
|
+
)
|
qubx/backtester/simulator.py
CHANGED
|
@@ -30,9 +30,9 @@ from .utils import (
|
|
|
30
30
|
def simulate(
|
|
31
31
|
strategies: StrategiesDecls_t,
|
|
32
32
|
data: DataDecls_t,
|
|
33
|
-
capital: float,
|
|
33
|
+
capital: float | dict[str, float],
|
|
34
34
|
instruments: list[SymbolOrInstrument_t] | dict[ExchangeName_t, list[SymbolOrInstrument_t]],
|
|
35
|
-
commissions: str | None,
|
|
35
|
+
commissions: str | dict[str, str | None] | None,
|
|
36
36
|
start: str | pd.Timestamp,
|
|
37
37
|
stop: str | pd.Timestamp | None = None,
|
|
38
38
|
exchange: ExchangeName_t | None = None,
|
|
@@ -95,29 +95,20 @@ def simulate(
|
|
|
95
95
|
)
|
|
96
96
|
raise SimulationError(_msg)
|
|
97
97
|
|
|
98
|
-
# - check if instruments are from the same exchange (mmulti-exchanges is not supported yet)
|
|
99
|
-
if len(_exchanges) > 1:
|
|
100
|
-
logger.error(
|
|
101
|
-
_msg := f"Multiple exchanges found: {', '.join(_exchanges)} - this mode is not supported yet in Qubx !"
|
|
102
|
-
)
|
|
103
|
-
raise SimulationError(_msg)
|
|
104
|
-
|
|
105
|
-
exchange = _exchanges[0]
|
|
106
|
-
|
|
107
98
|
# - recognize provided data
|
|
108
|
-
data_setup = recognize_simulation_data_config(data, _instruments,
|
|
99
|
+
data_setup = recognize_simulation_data_config(data, _instruments, open_close_time_indent_secs, aux_data)
|
|
109
100
|
|
|
110
101
|
# - recognize setup: it can be either a strategy or set of signals
|
|
111
102
|
simulation_setups = recognize_simulation_configuration(
|
|
112
|
-
"",
|
|
113
|
-
strategies,
|
|
114
|
-
_instruments,
|
|
115
|
-
|
|
116
|
-
capital,
|
|
117
|
-
base_currency,
|
|
118
|
-
commissions,
|
|
119
|
-
signal_timeframe,
|
|
120
|
-
accurate_stop_orders_execution,
|
|
103
|
+
name="",
|
|
104
|
+
configs=strategies,
|
|
105
|
+
instruments=_instruments,
|
|
106
|
+
exchanges=_exchanges,
|
|
107
|
+
capital=capital,
|
|
108
|
+
basic_currency=base_currency,
|
|
109
|
+
commissions=commissions,
|
|
110
|
+
signal_timeframe=signal_timeframe,
|
|
111
|
+
accurate_stop_orders_execution=accurate_stop_orders_execution,
|
|
121
112
|
)
|
|
122
113
|
if not simulation_setups:
|
|
123
114
|
logger.error(
|
|
@@ -232,7 +223,7 @@ def _run_setup(
|
|
|
232
223
|
setup.name,
|
|
233
224
|
start,
|
|
234
225
|
stop,
|
|
235
|
-
setup.
|
|
226
|
+
setup.exchanges,
|
|
236
227
|
setup.instruments,
|
|
237
228
|
setup.capital,
|
|
238
229
|
setup.base_currency,
|