Qubx 0.5.7__cp312-cp312-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/__init__.py +207 -0
- qubx/_nb_magic.py +100 -0
- qubx/backtester/__init__.py +5 -0
- qubx/backtester/account.py +145 -0
- qubx/backtester/broker.py +87 -0
- qubx/backtester/data.py +296 -0
- qubx/backtester/management.py +378 -0
- qubx/backtester/ome.py +296 -0
- qubx/backtester/optimization.py +201 -0
- qubx/backtester/simulated_data.py +558 -0
- qubx/backtester/simulator.py +362 -0
- qubx/backtester/utils.py +780 -0
- qubx/cli/__init__.py +0 -0
- qubx/cli/commands.py +67 -0
- qubx/connectors/ccxt/__init__.py +0 -0
- qubx/connectors/ccxt/account.py +495 -0
- qubx/connectors/ccxt/broker.py +132 -0
- qubx/connectors/ccxt/customizations.py +193 -0
- qubx/connectors/ccxt/data.py +612 -0
- qubx/connectors/ccxt/exceptions.py +17 -0
- qubx/connectors/ccxt/factory.py +93 -0
- qubx/connectors/ccxt/utils.py +307 -0
- qubx/core/__init__.py +0 -0
- qubx/core/account.py +251 -0
- qubx/core/basics.py +850 -0
- qubx/core/context.py +420 -0
- qubx/core/exceptions.py +38 -0
- qubx/core/helpers.py +480 -0
- qubx/core/interfaces.py +1150 -0
- qubx/core/loggers.py +514 -0
- qubx/core/lookups.py +475 -0
- qubx/core/metrics.py +1512 -0
- qubx/core/mixins/__init__.py +13 -0
- qubx/core/mixins/market.py +94 -0
- qubx/core/mixins/processing.py +428 -0
- qubx/core/mixins/subscription.py +203 -0
- qubx/core/mixins/trading.py +88 -0
- qubx/core/mixins/universe.py +270 -0
- qubx/core/series.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/series.pxd +125 -0
- qubx/core/series.pyi +118 -0
- qubx/core/series.pyx +988 -0
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.pyi +6 -0
- qubx/core/utils.pyx +62 -0
- qubx/data/__init__.py +25 -0
- qubx/data/helpers.py +416 -0
- qubx/data/readers.py +1562 -0
- qubx/data/tardis.py +100 -0
- qubx/gathering/simplest.py +88 -0
- qubx/math/__init__.py +3 -0
- qubx/math/stats.py +129 -0
- qubx/pandaz/__init__.py +23 -0
- qubx/pandaz/ta.py +2757 -0
- qubx/pandaz/utils.py +638 -0
- qubx/resources/instruments/symbols-binance.cm.json +1 -0
- qubx/resources/instruments/symbols-binance.json +1 -0
- qubx/resources/instruments/symbols-binance.um.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.f.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.json +1 -0
- qubx/resources/instruments/symbols-kraken.f.json +1 -0
- qubx/resources/instruments/symbols-kraken.json +1 -0
- qubx/ta/__init__.py +0 -0
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/ta/indicators.pxd +149 -0
- qubx/ta/indicators.pyi +41 -0
- qubx/ta/indicators.pyx +787 -0
- qubx/trackers/__init__.py +3 -0
- qubx/trackers/abvanced.py +236 -0
- qubx/trackers/composite.py +146 -0
- qubx/trackers/rebalancers.py +129 -0
- qubx/trackers/riskctrl.py +641 -0
- qubx/trackers/sizers.py +235 -0
- qubx/utils/__init__.py +5 -0
- qubx/utils/_pyxreloader.py +281 -0
- qubx/utils/charting/lookinglass.py +1057 -0
- qubx/utils/charting/mpl_helpers.py +1183 -0
- qubx/utils/marketdata/binance.py +284 -0
- qubx/utils/marketdata/ccxt.py +90 -0
- qubx/utils/marketdata/dukas.py +130 -0
- qubx/utils/misc.py +541 -0
- qubx/utils/ntp.py +63 -0
- qubx/utils/numbers_utils.py +7 -0
- qubx/utils/orderbook.py +491 -0
- qubx/utils/plotting/__init__.py +0 -0
- qubx/utils/plotting/dashboard.py +150 -0
- qubx/utils/plotting/data.py +137 -0
- qubx/utils/plotting/interfaces.py +25 -0
- qubx/utils/plotting/renderers/__init__.py +0 -0
- qubx/utils/plotting/renderers/plotly.py +0 -0
- qubx/utils/runner/__init__.py +1 -0
- qubx/utils/runner/_jupyter_runner.pyt +60 -0
- qubx/utils/runner/accounts.py +88 -0
- qubx/utils/runner/configs.py +65 -0
- qubx/utils/runner/runner.py +470 -0
- qubx/utils/time.py +312 -0
- qubx-0.5.7.dist-info/METADATA +105 -0
- qubx-0.5.7.dist-info/RECORD +100 -0
- qubx-0.5.7.dist-info/WHEEL +4 -0
- qubx-0.5.7.dist-info/entry_points.txt +3 -0
qubx/backtester/data.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
2
|
+
from typing import Any, Dict, Optional
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from tqdm.auto import tqdm
|
|
7
|
+
|
|
8
|
+
from qubx import logger
|
|
9
|
+
from qubx.backtester.simulated_data import IterableSimulationData
|
|
10
|
+
from qubx.core.basics import (
|
|
11
|
+
CtrlChannel,
|
|
12
|
+
DataType,
|
|
13
|
+
Instrument,
|
|
14
|
+
TimestampedDict,
|
|
15
|
+
)
|
|
16
|
+
from qubx.core.exceptions import SimulationError
|
|
17
|
+
from qubx.core.helpers import BasicScheduler
|
|
18
|
+
from qubx.core.interfaces import IDataProvider
|
|
19
|
+
from qubx.core.series import Bar, Quote, time_as_nsec
|
|
20
|
+
from qubx.data.readers import AsDict, DataReader
|
|
21
|
+
from qubx.utils.time import infer_series_frequency
|
|
22
|
+
|
|
23
|
+
from .account import SimulatedAccountProcessor
|
|
24
|
+
from .utils import SimulatedTimeProvider
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class SimulatedDataProvider(IDataProvider):
|
|
28
|
+
time_provider: SimulatedTimeProvider
|
|
29
|
+
channel: CtrlChannel
|
|
30
|
+
|
|
31
|
+
_scheduler: BasicScheduler
|
|
32
|
+
_account: SimulatedAccountProcessor
|
|
33
|
+
_last_quotes: Dict[Instrument, Optional[Quote]]
|
|
34
|
+
_readers: dict[str, DataReader]
|
|
35
|
+
_scheduler: BasicScheduler
|
|
36
|
+
_pregenerated_signals: dict[Instrument, pd.Series | pd.DataFrame]
|
|
37
|
+
_to_process: dict[Instrument, list]
|
|
38
|
+
_data_source: IterableSimulationData
|
|
39
|
+
_open_close_time_indent_ns: int
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
exchange_id: str,
|
|
44
|
+
channel: CtrlChannel,
|
|
45
|
+
scheduler: BasicScheduler,
|
|
46
|
+
time_provider: SimulatedTimeProvider,
|
|
47
|
+
account: SimulatedAccountProcessor,
|
|
48
|
+
readers: dict[str, DataReader],
|
|
49
|
+
open_close_time_indent_secs=1,
|
|
50
|
+
):
|
|
51
|
+
self.channel = channel
|
|
52
|
+
self.time_provider = time_provider
|
|
53
|
+
self._exchange_id = exchange_id
|
|
54
|
+
self._scheduler = scheduler
|
|
55
|
+
self._account = account
|
|
56
|
+
self._readers = readers
|
|
57
|
+
|
|
58
|
+
# - create exchange's instance
|
|
59
|
+
self._last_quotes = defaultdict(lambda: None)
|
|
60
|
+
|
|
61
|
+
# - pregenerated signals storage
|
|
62
|
+
self._pregenerated_signals = dict()
|
|
63
|
+
self._to_process = {}
|
|
64
|
+
|
|
65
|
+
# - simulation data source
|
|
66
|
+
self._data_source = IterableSimulationData(
|
|
67
|
+
self._readers, open_close_time_indent_secs=open_close_time_indent_secs
|
|
68
|
+
)
|
|
69
|
+
self._open_close_time_indent_ns = open_close_time_indent_secs * 1_000_000_000 # convert seconds to nanoseconds
|
|
70
|
+
|
|
71
|
+
logger.info(f"{self.__class__.__name__}.{exchange_id} is initialized")
|
|
72
|
+
|
|
73
|
+
def run(
|
|
74
|
+
self,
|
|
75
|
+
start: str | pd.Timestamp,
|
|
76
|
+
end: str | pd.Timestamp,
|
|
77
|
+
silent: bool = False,
|
|
78
|
+
) -> None:
|
|
79
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation started at {start} :::")
|
|
80
|
+
|
|
81
|
+
if self._pregenerated_signals:
|
|
82
|
+
self._prepare_generated_signals(start, end)
|
|
83
|
+
_run = self._process_generated_signals
|
|
84
|
+
else:
|
|
85
|
+
_run = self._process_strategy
|
|
86
|
+
|
|
87
|
+
start, end = pd.Timestamp(start), pd.Timestamp(end)
|
|
88
|
+
total_duration = end - start
|
|
89
|
+
update_delta = total_duration / 100
|
|
90
|
+
prev_dt = pd.Timestamp(start)
|
|
91
|
+
|
|
92
|
+
# - date iteration
|
|
93
|
+
qiter = self._data_source.create_iterable(start, end)
|
|
94
|
+
if silent:
|
|
95
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
96
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
97
|
+
break
|
|
98
|
+
else:
|
|
99
|
+
_p = 0
|
|
100
|
+
with tqdm(total=100, desc="Simulating", unit="%", leave=False) as pbar:
|
|
101
|
+
for instrument, data_type, event, is_hist in qiter:
|
|
102
|
+
if not _run(instrument, data_type, event, is_hist):
|
|
103
|
+
break
|
|
104
|
+
dt = pd.Timestamp(event.time)
|
|
105
|
+
# update only if date has changed
|
|
106
|
+
if dt - prev_dt > update_delta:
|
|
107
|
+
_p += 1
|
|
108
|
+
pbar.n = _p
|
|
109
|
+
pbar.refresh()
|
|
110
|
+
prev_dt = dt
|
|
111
|
+
pbar.n = 100
|
|
112
|
+
pbar.refresh()
|
|
113
|
+
|
|
114
|
+
logger.info(f"{self.__class__.__name__} ::: Simulation finished at {end} :::")
|
|
115
|
+
|
|
116
|
+
def set_generated_signals(self, signals: pd.Series | pd.DataFrame):
|
|
117
|
+
logger.debug(
|
|
118
|
+
f"[<y>{self.__class__.__name__}</y>] :: Using pre-generated signals:\n {str(signals.count()).strip('ndtype: int64')}"
|
|
119
|
+
)
|
|
120
|
+
# - sanity check
|
|
121
|
+
signals.index = pd.DatetimeIndex(signals.index)
|
|
122
|
+
|
|
123
|
+
if isinstance(signals, pd.Series):
|
|
124
|
+
self._pregenerated_signals[str(signals.name)] = signals # type: ignore
|
|
125
|
+
|
|
126
|
+
elif isinstance(signals, pd.DataFrame):
|
|
127
|
+
for col in signals.columns:
|
|
128
|
+
self._pregenerated_signals[col] = signals[col] # type: ignore
|
|
129
|
+
else:
|
|
130
|
+
raise ValueError("Invalid signals or strategy configuration")
|
|
131
|
+
|
|
132
|
+
@property
|
|
133
|
+
def is_simulation(self) -> bool:
|
|
134
|
+
return True
|
|
135
|
+
|
|
136
|
+
def subscribe(self, subscription_type: str, instruments: set[Instrument], reset: bool) -> None:
|
|
137
|
+
_new_instr = [i for i in instruments if not self.has_subscription(i, subscription_type)]
|
|
138
|
+
self._data_source.add_instruments_for_subscription(subscription_type, list(instruments))
|
|
139
|
+
|
|
140
|
+
# - provide historical data and last quote for subscribed instruments
|
|
141
|
+
for i in _new_instr:
|
|
142
|
+
h_data = self._data_source.peek_historical_data(i, subscription_type)
|
|
143
|
+
if h_data:
|
|
144
|
+
# _s_type = DataType.from_str(subscription_type)[0]
|
|
145
|
+
last_update = h_data[-1]
|
|
146
|
+
if last_quote := self._account.emulate_quote_from_data(i, last_update.time, last_update): # type: ignore
|
|
147
|
+
# - send historical data to the channel
|
|
148
|
+
self.channel.send((i, subscription_type, h_data, True))
|
|
149
|
+
|
|
150
|
+
# - set last quote
|
|
151
|
+
self._last_quotes[i] = last_quote
|
|
152
|
+
|
|
153
|
+
# - also need to pass this quote to OME !
|
|
154
|
+
self._account._process_new_quote(i, last_quote)
|
|
155
|
+
|
|
156
|
+
logger.debug(f" | subscribed {subscription_type} {i} -> {last_quote}")
|
|
157
|
+
|
|
158
|
+
def unsubscribe(self, subscription_type: str, instruments: set[Instrument] | Instrument | None = None) -> None:
|
|
159
|
+
# logger.debug(f" | unsubscribe: {subscription_type} -> {instruments}")
|
|
160
|
+
if instruments is not None:
|
|
161
|
+
self._data_source.remove_instruments_from_subscription(
|
|
162
|
+
subscription_type, [instruments] if isinstance(instruments, Instrument) else list(instruments)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def has_subscription(self, instrument: Instrument, subscription_type: str) -> bool:
|
|
166
|
+
return self._data_source.has_subscription(instrument, subscription_type)
|
|
167
|
+
|
|
168
|
+
def get_subscriptions(self, instrument: Instrument) -> list[str]:
|
|
169
|
+
_s_lst = self._data_source.get_subscriptions_for_instrument(instrument)
|
|
170
|
+
# logger.debug(f" | get_subscriptions {instrument} -> {_s_lst}")
|
|
171
|
+
return _s_lst
|
|
172
|
+
|
|
173
|
+
def get_subscribed_instruments(self, subscription_type: str | None = None) -> list[Instrument]:
|
|
174
|
+
_in_lst = self._data_source.get_instruments_for_subscription(subscription_type or DataType.ALL)
|
|
175
|
+
# logger.debug(f" | get_subscribed_instruments {subscription_type} -> {_in_lst}")
|
|
176
|
+
return _in_lst
|
|
177
|
+
|
|
178
|
+
def warmup(self, configs: dict[tuple[str, Instrument], str]) -> None:
|
|
179
|
+
for si, warm_period in configs.items():
|
|
180
|
+
logger.debug(f" | Warming up {si} -> {warm_period}")
|
|
181
|
+
self._data_source.set_warmup_period(si[0], warm_period)
|
|
182
|
+
|
|
183
|
+
def get_ohlc(self, instrument: Instrument, timeframe: str, nbarsback: int) -> list[Bar]:
|
|
184
|
+
_reader = self._readers.get(DataType.OHLC)
|
|
185
|
+
if _reader is None:
|
|
186
|
+
logger.error(f"Reader for {DataType.OHLC} data not configured")
|
|
187
|
+
return []
|
|
188
|
+
|
|
189
|
+
start = pd.Timestamp(self.time_provider.time())
|
|
190
|
+
end = start - nbarsback * (_timeframe := pd.Timedelta(timeframe))
|
|
191
|
+
_spec = f"{instrument.exchange}:{instrument.symbol}"
|
|
192
|
+
return self._convert_records_to_bars(
|
|
193
|
+
_reader.read(data_id=_spec, start=start, stop=end, transform=AsDict()), # type: ignore
|
|
194
|
+
time_as_nsec(self.time_provider.time()),
|
|
195
|
+
_timeframe.asm8.item(),
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
def get_quote(self, instrument: Instrument) -> Quote | None:
|
|
199
|
+
return self._last_quotes[instrument]
|
|
200
|
+
|
|
201
|
+
def close(self):
|
|
202
|
+
pass
|
|
203
|
+
|
|
204
|
+
def _prepare_generated_signals(self, start: str | pd.Timestamp, end: str | pd.Timestamp):
|
|
205
|
+
for s, v in self._pregenerated_signals.items():
|
|
206
|
+
_s_inst = None
|
|
207
|
+
|
|
208
|
+
for i in self.get_subscribed_instruments():
|
|
209
|
+
# - we can process series with variable id's if we can find some similar instrument
|
|
210
|
+
if s == i.symbol or s == str(i) or s == f"{i.exchange}:{i.symbol}" or str(s) == str(i):
|
|
211
|
+
_start, _end = pd.Timestamp(start), pd.Timestamp(end)
|
|
212
|
+
_start_idx, _end_idx = v.index.get_indexer([_start, _end], method="ffill")
|
|
213
|
+
sel = v.iloc[max(_start_idx, 0) : _end_idx + 1]
|
|
214
|
+
|
|
215
|
+
# TODO: check if data has exec_price - it means we have deals
|
|
216
|
+
self._to_process[i] = list(zip(sel.index, sel.values))
|
|
217
|
+
_s_inst = i
|
|
218
|
+
break
|
|
219
|
+
|
|
220
|
+
if _s_inst is None:
|
|
221
|
+
logger.error(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
222
|
+
raise SimulationError(f"Can't find instrument for pregenerated signals with id '{s}'")
|
|
223
|
+
|
|
224
|
+
def _convert_records_to_bars(
|
|
225
|
+
self, records: list[TimestampedDict], cut_time_ns: int, timeframe_ns: int
|
|
226
|
+
) -> list[Bar]:
|
|
227
|
+
"""
|
|
228
|
+
Convert records to bars and we need to cut last bar up to the cut_time_ns
|
|
229
|
+
"""
|
|
230
|
+
bars = []
|
|
231
|
+
|
|
232
|
+
# - if no records, return empty list to avoid exception from infer_series_frequency
|
|
233
|
+
if not records:
|
|
234
|
+
return bars
|
|
235
|
+
|
|
236
|
+
_data_tf = infer_series_frequency([r.time for r in records[:50]])
|
|
237
|
+
timeframe_ns = _data_tf.item()
|
|
238
|
+
|
|
239
|
+
if records is not None:
|
|
240
|
+
for r in records:
|
|
241
|
+
# _b_ts_0 = np.datetime64(r.time, "ns").item()
|
|
242
|
+
_b_ts_0 = r.time
|
|
243
|
+
_b_ts_1 = _b_ts_0 + timeframe_ns - self._open_close_time_indent_ns
|
|
244
|
+
|
|
245
|
+
if _b_ts_0 <= cut_time_ns and cut_time_ns < _b_ts_1:
|
|
246
|
+
break
|
|
247
|
+
|
|
248
|
+
bars.append(
|
|
249
|
+
Bar(
|
|
250
|
+
_b_ts_0, r.data["open"], r.data["high"], r.data["low"], r.data["close"], r.data.get("volume", 0)
|
|
251
|
+
)
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
return bars
|
|
255
|
+
|
|
256
|
+
def _process_generated_signals(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
257
|
+
cc = self.channel
|
|
258
|
+
t = np.datetime64(data.time, "ns")
|
|
259
|
+
|
|
260
|
+
if not is_hist:
|
|
261
|
+
# - signals for this instrument
|
|
262
|
+
sigs = self._to_process[instrument]
|
|
263
|
+
|
|
264
|
+
while sigs and t >= (_signal_time := sigs[0][0].as_unit("ns").asm8):
|
|
265
|
+
self.time_provider.set_time(_signal_time)
|
|
266
|
+
cc.send((instrument, "event", {"order": sigs[0][1]}, False))
|
|
267
|
+
sigs.pop(0)
|
|
268
|
+
|
|
269
|
+
if q := self._account.emulate_quote_from_data(instrument, t, data):
|
|
270
|
+
self._last_quotes[instrument] = q
|
|
271
|
+
|
|
272
|
+
self.time_provider.set_time(t)
|
|
273
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
274
|
+
|
|
275
|
+
return cc.control.is_set()
|
|
276
|
+
|
|
277
|
+
def _process_strategy(self, instrument: Instrument, data_type: str, data: Any, is_hist: bool) -> bool:
|
|
278
|
+
cc = self.channel
|
|
279
|
+
t = np.datetime64(data.time, "ns")
|
|
280
|
+
|
|
281
|
+
if not is_hist:
|
|
282
|
+
if t >= (_next_exp_time := self._scheduler.next_expected_event_time()):
|
|
283
|
+
# - we use exact event's time
|
|
284
|
+
self.time_provider.set_time(_next_exp_time)
|
|
285
|
+
self._scheduler.check_and_run_tasks()
|
|
286
|
+
|
|
287
|
+
if q := self._account.emulate_quote_from_data(instrument, t, data):
|
|
288
|
+
self._last_quotes[instrument] = q
|
|
289
|
+
|
|
290
|
+
self.time_provider.set_time(t)
|
|
291
|
+
cc.send((instrument, data_type, data, is_hist))
|
|
292
|
+
|
|
293
|
+
return cc.control.is_set()
|
|
294
|
+
|
|
295
|
+
def exchange(self) -> str:
|
|
296
|
+
return self._exchange_id.upper()
|
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import zipfile
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import yaml
|
|
8
|
+
|
|
9
|
+
from qubx.core.metrics import TradingSessionResult
|
|
10
|
+
from qubx.utils.misc import blue, cyan, green, magenta, red, yellow
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BacktestsResultsManager:
|
|
14
|
+
"""
|
|
15
|
+
Manager class for handling backtesting results.
|
|
16
|
+
|
|
17
|
+
This class provides functionality to load, list and manage backtesting results stored in zip files.
|
|
18
|
+
Each result contains trading session information and metrics that can be loaded and analyzed.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
path : str
|
|
23
|
+
Path to directory containing backtesting result zip files
|
|
24
|
+
|
|
25
|
+
Methods
|
|
26
|
+
-------
|
|
27
|
+
- reload()
|
|
28
|
+
Reloads all backtesting results from the specified path
|
|
29
|
+
- list(regex="", with_metrics=False)
|
|
30
|
+
Lists all backtesting results, optionally filtered by regex and including metrics
|
|
31
|
+
- load(name)
|
|
32
|
+
Loads a specific backtesting result by name
|
|
33
|
+
- load_config(name)
|
|
34
|
+
Loads the configuration YAML file for a specific backtest result
|
|
35
|
+
- delete(name)
|
|
36
|
+
Deletes one or more backtest results
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, path: str):
|
|
40
|
+
self.path = path
|
|
41
|
+
self.reload()
|
|
42
|
+
|
|
43
|
+
def reload(self) -> "BacktestsResultsManager":
|
|
44
|
+
self.results = {}
|
|
45
|
+
self.variations = {}
|
|
46
|
+
|
|
47
|
+
_vars = defaultdict(list)
|
|
48
|
+
names = defaultdict(lambda: 0)
|
|
49
|
+
for p in Path(self.path).glob("**/*.zip"):
|
|
50
|
+
with zipfile.ZipFile(p, "r") as zip_ref:
|
|
51
|
+
try:
|
|
52
|
+
info = yaml.safe_load(zip_ref.read("info.yml"))
|
|
53
|
+
info["path"] = str(p)
|
|
54
|
+
n = info.get("name", "")
|
|
55
|
+
var_set_name = info.get("variation_name", "")
|
|
56
|
+
|
|
57
|
+
# - put variations aside
|
|
58
|
+
if var_set_name:
|
|
59
|
+
_vars[var_set_name].append(info)
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
_new_name = n if names[n] == 0 else f"{n}.{names[n]}"
|
|
63
|
+
names[n] += 1
|
|
64
|
+
info["name"] = _new_name
|
|
65
|
+
self.results[_new_name] = info
|
|
66
|
+
except Exception:
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
# - reindex
|
|
70
|
+
_idx = 1
|
|
71
|
+
for n in sorted(self.results.keys()):
|
|
72
|
+
self.results[n]["idx"] = _idx
|
|
73
|
+
_idx += 1
|
|
74
|
+
|
|
75
|
+
# - reindex variations at the end
|
|
76
|
+
for n in sorted(_vars.keys()):
|
|
77
|
+
self.variations[_idx] = {
|
|
78
|
+
"name": n,
|
|
79
|
+
"idx": _idx,
|
|
80
|
+
"variations": _vars[n],
|
|
81
|
+
"created": pd.Timestamp(_vars[n][0].get("creation_time", "")).round("1s"),
|
|
82
|
+
"author": _vars[n][0].get("author", ""),
|
|
83
|
+
"description": _vars[n][0].get("description", ""),
|
|
84
|
+
}
|
|
85
|
+
_idx += 1
|
|
86
|
+
|
|
87
|
+
return self
|
|
88
|
+
|
|
89
|
+
def __getitem__(
|
|
90
|
+
self, name: str | int | list[int] | list[str] | slice
|
|
91
|
+
) -> TradingSessionResult | list[TradingSessionResult]:
|
|
92
|
+
return self.load(name)
|
|
93
|
+
|
|
94
|
+
def load(
|
|
95
|
+
self, name_or_idx: str | int | list[int] | list[str] | slice
|
|
96
|
+
) -> TradingSessionResult | list[TradingSessionResult]:
|
|
97
|
+
match name_or_idx:
|
|
98
|
+
case list():
|
|
99
|
+
return [self.load(i) for i in name_or_idx] # type: ignore
|
|
100
|
+
case str():
|
|
101
|
+
return [self.load(i) for i in self._find_indices(name_or_idx)] # type: ignore
|
|
102
|
+
case slice():
|
|
103
|
+
return [
|
|
104
|
+
self.load(i)
|
|
105
|
+
for i in range(name_or_idx.start, name_or_idx.stop, name_or_idx.step if name_or_idx.step else 1)
|
|
106
|
+
] # type: ignore
|
|
107
|
+
case int():
|
|
108
|
+
if name_or_idx > len(self.results) and name_or_idx in self.variations:
|
|
109
|
+
return [
|
|
110
|
+
TradingSessionResult.from_file(v.get("path", ""))
|
|
111
|
+
for v in self.variations[name_or_idx].get("variations", [])
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
# - load by index
|
|
115
|
+
for info in self.results.values():
|
|
116
|
+
if info.get("idx", -1) == name_or_idx:
|
|
117
|
+
return TradingSessionResult.from_file(info["path"])
|
|
118
|
+
|
|
119
|
+
raise ValueError(f"No result found for '{name_or_idx}' !")
|
|
120
|
+
|
|
121
|
+
def load_config(self, name: str | int) -> str:
|
|
122
|
+
"""Load the configuration YAML file for a specific backtest result.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
name (str | int): The name or index of the backtest result. If str, matches against the backtest name.
|
|
126
|
+
If int, matches against the backtest index.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
str: The contents of the configuration YAML file as a string.
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
ValueError: If no backtest result is found matching the provided name/index.
|
|
133
|
+
"""
|
|
134
|
+
p = None
|
|
135
|
+
for info in self.results.values():
|
|
136
|
+
match name:
|
|
137
|
+
case int():
|
|
138
|
+
if info.get("idx", -1) == name:
|
|
139
|
+
n = info.get("name", "")
|
|
140
|
+
p = info.get("path", {})
|
|
141
|
+
break
|
|
142
|
+
case str():
|
|
143
|
+
if info.get("name", "") == name:
|
|
144
|
+
n = info.get("name", "")
|
|
145
|
+
p = info.get("path", {})
|
|
146
|
+
break
|
|
147
|
+
if p is None:
|
|
148
|
+
raise ValueError(f"No result found for {name}")
|
|
149
|
+
|
|
150
|
+
# - name may have .1, .2, etc. so we need to remove it
|
|
151
|
+
n = n.split(".")[0] if "." in n else n
|
|
152
|
+
with zipfile.ZipFile(p, "r") as zip_ref:
|
|
153
|
+
return zip_ref.read(f"{n}.yaml").decode("utf-8")
|
|
154
|
+
|
|
155
|
+
def delete(self, name: str | int | list[int] | list[str] | slice):
|
|
156
|
+
"""Delete one or more backtest results.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
name: Identifier(s) for the backtest result(s) to delete. Can be:
|
|
160
|
+
- str: Name of backtest or regex pattern to match multiple backtests
|
|
161
|
+
- int: Index of specific backtest
|
|
162
|
+
- list[int]: List of backtest indices
|
|
163
|
+
- list[str]: List of backtest names
|
|
164
|
+
- slice: Range of backtest indices to delete
|
|
165
|
+
|
|
166
|
+
Prints:
|
|
167
|
+
Message confirming which backtest(s) were deleted, or error if none found.
|
|
168
|
+
Deleted backtest names are shown in red text.
|
|
169
|
+
|
|
170
|
+
Note:
|
|
171
|
+
- For string names, supports regex pattern matching against backtest names and strategy class names
|
|
172
|
+
- Deletes the underlying results files and reloads the results index
|
|
173
|
+
- Operation is irreversible
|
|
174
|
+
"""
|
|
175
|
+
|
|
176
|
+
def _del_idx(idx):
|
|
177
|
+
for info in self.results.values():
|
|
178
|
+
if info.get("idx", -1) == idx:
|
|
179
|
+
Path(info["path"]).unlink()
|
|
180
|
+
return info.get("name", idx)
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
match name:
|
|
184
|
+
case str():
|
|
185
|
+
nms = [_del_idx(i) for i in self._find_indices(name)]
|
|
186
|
+
self.reload()
|
|
187
|
+
print(f" -> Deleted {red(', '.join(nms))} ...")
|
|
188
|
+
return
|
|
189
|
+
|
|
190
|
+
case list():
|
|
191
|
+
nms = [_del_idx(i) for i in name]
|
|
192
|
+
self.reload()
|
|
193
|
+
print(f" -> Deleted {red(', '.join(nms))} ...")
|
|
194
|
+
return
|
|
195
|
+
|
|
196
|
+
case slice():
|
|
197
|
+
nms = [_del_idx(i) for i in range(name.start, name.stop, name.step if name.step else 1)]
|
|
198
|
+
self.reload()
|
|
199
|
+
print(f" -> Deleted {red(', '.join(nms))} ...")
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
for info in self.results.values():
|
|
203
|
+
match name:
|
|
204
|
+
case int():
|
|
205
|
+
if info.get("idx", -1) == name:
|
|
206
|
+
Path(info["path"]).unlink()
|
|
207
|
+
print(f" -> Deleted {red(info.get('name', name))} ...")
|
|
208
|
+
self.reload()
|
|
209
|
+
return
|
|
210
|
+
case str():
|
|
211
|
+
if info.get("name", "") == name:
|
|
212
|
+
Path(info["path"]).unlink()
|
|
213
|
+
print(f" -> Deleted {red(info.get('name', name))} ...")
|
|
214
|
+
self.reload()
|
|
215
|
+
return
|
|
216
|
+
print(f" -> No results found for {red(name)} !")
|
|
217
|
+
|
|
218
|
+
def _find_indices(self, regex: str):
|
|
219
|
+
for n in sorted(self.results.keys()):
|
|
220
|
+
info = self.results[n]
|
|
221
|
+
s_cls = info.get("strategy_class", "").split(".")[-1]
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
if not re.match(regex, n, re.IGNORECASE):
|
|
225
|
+
# if not re.match(regex, s_cls, re.IGNORECASE):
|
|
226
|
+
continue
|
|
227
|
+
except Exception:
|
|
228
|
+
if regex.lower() != n.lower() and regex.lower() != s_cls.lower():
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
yield info.get("idx", -1)
|
|
232
|
+
|
|
233
|
+
def list(
|
|
234
|
+
self,
|
|
235
|
+
regex: str = "",
|
|
236
|
+
with_metrics=True,
|
|
237
|
+
params=False,
|
|
238
|
+
as_table=False,
|
|
239
|
+
pretty_print=False,
|
|
240
|
+
sort_by: str | None = "sharpe",
|
|
241
|
+
ascending=False,
|
|
242
|
+
show_variations=True,
|
|
243
|
+
):
|
|
244
|
+
"""List backtesting results with optional filtering and formatting.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
- regex (str, optional): Regular expression pattern to filter results by strategy name or class. Defaults to "".
|
|
248
|
+
- with_metrics (bool, optional): Whether to include performance metrics in output. Defaults to True.
|
|
249
|
+
- params (bool, optional): Whether to display strategy parameters. Defaults to False.
|
|
250
|
+
- as_table (bool, optional): Return results as a pandas DataFrame instead of printing. Defaults to False.
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
- Optional[pd.DataFrame]: If as_table=True, returns a DataFrame containing the results sorted by creation time.
|
|
254
|
+
- Otherwise prints formatted results to console.
|
|
255
|
+
"""
|
|
256
|
+
_t_rep = []
|
|
257
|
+
for n in sorted(self.results.keys()):
|
|
258
|
+
info = self.results[n]
|
|
259
|
+
s_cls = info.get("strategy_class", "").split(".")[-1]
|
|
260
|
+
|
|
261
|
+
if regex:
|
|
262
|
+
if not re.match(regex, n, re.IGNORECASE):
|
|
263
|
+
# if not re.match(regex, s_cls, re.IGNORECASE):
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
name = info.get("name", "")
|
|
267
|
+
smbs = ", ".join(info.get("symbols", list()))
|
|
268
|
+
start = pd.Timestamp(info.get("start", "")).round("1s")
|
|
269
|
+
stop = pd.Timestamp(info.get("stop", "")).round("1s")
|
|
270
|
+
dscr = info.get("description", "")
|
|
271
|
+
created = pd.Timestamp(info.get("creation_time", "")).round("1s")
|
|
272
|
+
metrics = info.get("performance", {})
|
|
273
|
+
author = info.get("author", "")
|
|
274
|
+
_s = f"{yellow(str(info.get('idx')))} - {red(name)} ::: {magenta(created)} by {cyan(author)}"
|
|
275
|
+
|
|
276
|
+
_one_line_dscr = ""
|
|
277
|
+
if dscr:
|
|
278
|
+
dscr = dscr.split("\n")
|
|
279
|
+
for _d in dscr:
|
|
280
|
+
_s += f"\n\t{magenta('# ' + _d)}"
|
|
281
|
+
_one_line_dscr += "\u25cf " + _d + "\n"
|
|
282
|
+
|
|
283
|
+
_s += f"\n\tstrategy: {green(s_cls)}"
|
|
284
|
+
_s += f"\n\tinterval: {blue(start)} - {blue(stop)}"
|
|
285
|
+
_s += f"\n\tcapital: {blue(info.get('capital', ''))} {info.get('base_currency', '')} ({info.get('commissions', '')})"
|
|
286
|
+
_s += f"\n\tinstruments: {blue(smbs)}"
|
|
287
|
+
if params:
|
|
288
|
+
formats = ["{" + f":<{i}" + "}" for i in [50]]
|
|
289
|
+
_p = pd.DataFrame.from_dict(info.get("parameters", {}), orient="index")
|
|
290
|
+
for i in _p.to_string(
|
|
291
|
+
max_colwidth=30,
|
|
292
|
+
header=False,
|
|
293
|
+
formatters=[(lambda x: cyan(fmt.format(str(x)))) for fmt in formats],
|
|
294
|
+
justify="left",
|
|
295
|
+
).split("\n"):
|
|
296
|
+
_s += f"\n\t | {yellow(i)}"
|
|
297
|
+
|
|
298
|
+
if not as_table:
|
|
299
|
+
print(_s)
|
|
300
|
+
|
|
301
|
+
if with_metrics:
|
|
302
|
+
_m_repr = (
|
|
303
|
+
pd.DataFrame.from_dict(metrics, orient="index")
|
|
304
|
+
.T[["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]]
|
|
305
|
+
.astype(float)
|
|
306
|
+
)
|
|
307
|
+
_m_repr = _m_repr.round(3).to_string(index=False)
|
|
308
|
+
_h, _v = _m_repr.split("\n")
|
|
309
|
+
if not as_table:
|
|
310
|
+
print("\t " + red(_h))
|
|
311
|
+
print("\t " + cyan(_v))
|
|
312
|
+
|
|
313
|
+
if not as_table:
|
|
314
|
+
print()
|
|
315
|
+
else:
|
|
316
|
+
metrics = {
|
|
317
|
+
m: round(v, 3)
|
|
318
|
+
for m, v in metrics.items()
|
|
319
|
+
if m in ["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]
|
|
320
|
+
}
|
|
321
|
+
_t_rep.append(
|
|
322
|
+
{"Index": info.get("idx", ""), "Strategy": name}
|
|
323
|
+
| metrics
|
|
324
|
+
| {
|
|
325
|
+
"start": start,
|
|
326
|
+
"stop": stop,
|
|
327
|
+
"Created": created,
|
|
328
|
+
"Author": author,
|
|
329
|
+
"Description": _one_line_dscr,
|
|
330
|
+
},
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
# - variations (only if not as_table for the time being)
|
|
334
|
+
if not as_table and show_variations:
|
|
335
|
+
for _i, vi in self.variations.items():
|
|
336
|
+
n = vi.get("name", "")
|
|
337
|
+
if regex:
|
|
338
|
+
if not re.match(regex, n, re.IGNORECASE):
|
|
339
|
+
continue
|
|
340
|
+
|
|
341
|
+
_s = f"{yellow(str(_i))} - {red(str(n))} set of {len(vi.get('variations'))} variations ::: {magenta(vi.get('created'))} by {cyan(vi.get('author'))}"
|
|
342
|
+
|
|
343
|
+
dscr = vi.get("description", "").split("\n")
|
|
344
|
+
for _d in dscr:
|
|
345
|
+
_s += f"\n\t{magenta('# ' + _d)}"
|
|
346
|
+
|
|
347
|
+
_mtrx = {}
|
|
348
|
+
for v in vi.get("variations", []):
|
|
349
|
+
_nm = v.get("name", "")
|
|
350
|
+
_nm = _nm.split("_")[-1].strip("()")
|
|
351
|
+
_mtrx[_nm] = v.get("performance", {})
|
|
352
|
+
|
|
353
|
+
_m_repr = pd.DataFrame.from_dict(_mtrx, orient="index")[
|
|
354
|
+
["gain", "cagr", "sharpe", "qr", "max_dd_pct", "mdd_usd", "fees", "execs"]
|
|
355
|
+
].astype(float)
|
|
356
|
+
_m_repr = _m_repr.round(3)
|
|
357
|
+
_m_repr = _m_repr.sort_values(by=sort_by, ascending=ascending) if sort_by else _m_repr
|
|
358
|
+
_m_repr = _m_repr.to_string(index=True)
|
|
359
|
+
|
|
360
|
+
print(_s)
|
|
361
|
+
for _i, _l in enumerate(_m_repr.split("\n")):
|
|
362
|
+
if _i == 0:
|
|
363
|
+
print("\t " + red(_l))
|
|
364
|
+
else:
|
|
365
|
+
print("\t " + blue(_l))
|
|
366
|
+
|
|
367
|
+
if as_table:
|
|
368
|
+
_df = pd.DataFrame.from_records(_t_rep, index="Index")
|
|
369
|
+
_df = _df.sort_values(by=sort_by, ascending=ascending) if sort_by else _df
|
|
370
|
+
if pretty_print:
|
|
371
|
+
from IPython.display import HTML
|
|
372
|
+
|
|
373
|
+
return HTML(
|
|
374
|
+
_df.to_html()
|
|
375
|
+
.replace("\\n", "<br><hr style='border-color: #005000; '/>")
|
|
376
|
+
.replace("<td>", '<td align="left" valign="top">')
|
|
377
|
+
)
|
|
378
|
+
return _df
|