Qubx 0.5.7__cp312-cp312-manylinux_2_39_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Qubx might be problematic. Click here for more details.
- qubx/__init__.py +207 -0
- qubx/_nb_magic.py +100 -0
- qubx/backtester/__init__.py +5 -0
- qubx/backtester/account.py +145 -0
- qubx/backtester/broker.py +87 -0
- qubx/backtester/data.py +296 -0
- qubx/backtester/management.py +378 -0
- qubx/backtester/ome.py +296 -0
- qubx/backtester/optimization.py +201 -0
- qubx/backtester/simulated_data.py +558 -0
- qubx/backtester/simulator.py +362 -0
- qubx/backtester/utils.py +780 -0
- qubx/cli/__init__.py +0 -0
- qubx/cli/commands.py +67 -0
- qubx/connectors/ccxt/__init__.py +0 -0
- qubx/connectors/ccxt/account.py +495 -0
- qubx/connectors/ccxt/broker.py +132 -0
- qubx/connectors/ccxt/customizations.py +193 -0
- qubx/connectors/ccxt/data.py +612 -0
- qubx/connectors/ccxt/exceptions.py +17 -0
- qubx/connectors/ccxt/factory.py +93 -0
- qubx/connectors/ccxt/utils.py +307 -0
- qubx/core/__init__.py +0 -0
- qubx/core/account.py +251 -0
- qubx/core/basics.py +850 -0
- qubx/core/context.py +420 -0
- qubx/core/exceptions.py +38 -0
- qubx/core/helpers.py +480 -0
- qubx/core/interfaces.py +1150 -0
- qubx/core/loggers.py +514 -0
- qubx/core/lookups.py +475 -0
- qubx/core/metrics.py +1512 -0
- qubx/core/mixins/__init__.py +13 -0
- qubx/core/mixins/market.py +94 -0
- qubx/core/mixins/processing.py +428 -0
- qubx/core/mixins/subscription.py +203 -0
- qubx/core/mixins/trading.py +88 -0
- qubx/core/mixins/universe.py +270 -0
- qubx/core/series.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/series.pxd +125 -0
- qubx/core/series.pyi +118 -0
- qubx/core/series.pyx +988 -0
- qubx/core/utils.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/core/utils.pyi +6 -0
- qubx/core/utils.pyx +62 -0
- qubx/data/__init__.py +25 -0
- qubx/data/helpers.py +416 -0
- qubx/data/readers.py +1562 -0
- qubx/data/tardis.py +100 -0
- qubx/gathering/simplest.py +88 -0
- qubx/math/__init__.py +3 -0
- qubx/math/stats.py +129 -0
- qubx/pandaz/__init__.py +23 -0
- qubx/pandaz/ta.py +2757 -0
- qubx/pandaz/utils.py +638 -0
- qubx/resources/instruments/symbols-binance.cm.json +1 -0
- qubx/resources/instruments/symbols-binance.json +1 -0
- qubx/resources/instruments/symbols-binance.um.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.f.json +1 -0
- qubx/resources/instruments/symbols-bitfinex.json +1 -0
- qubx/resources/instruments/symbols-kraken.f.json +1 -0
- qubx/resources/instruments/symbols-kraken.json +1 -0
- qubx/ta/__init__.py +0 -0
- qubx/ta/indicators.cpython-312-x86_64-linux-gnu.so +0 -0
- qubx/ta/indicators.pxd +149 -0
- qubx/ta/indicators.pyi +41 -0
- qubx/ta/indicators.pyx +787 -0
- qubx/trackers/__init__.py +3 -0
- qubx/trackers/abvanced.py +236 -0
- qubx/trackers/composite.py +146 -0
- qubx/trackers/rebalancers.py +129 -0
- qubx/trackers/riskctrl.py +641 -0
- qubx/trackers/sizers.py +235 -0
- qubx/utils/__init__.py +5 -0
- qubx/utils/_pyxreloader.py +281 -0
- qubx/utils/charting/lookinglass.py +1057 -0
- qubx/utils/charting/mpl_helpers.py +1183 -0
- qubx/utils/marketdata/binance.py +284 -0
- qubx/utils/marketdata/ccxt.py +90 -0
- qubx/utils/marketdata/dukas.py +130 -0
- qubx/utils/misc.py +541 -0
- qubx/utils/ntp.py +63 -0
- qubx/utils/numbers_utils.py +7 -0
- qubx/utils/orderbook.py +491 -0
- qubx/utils/plotting/__init__.py +0 -0
- qubx/utils/plotting/dashboard.py +150 -0
- qubx/utils/plotting/data.py +137 -0
- qubx/utils/plotting/interfaces.py +25 -0
- qubx/utils/plotting/renderers/__init__.py +0 -0
- qubx/utils/plotting/renderers/plotly.py +0 -0
- qubx/utils/runner/__init__.py +1 -0
- qubx/utils/runner/_jupyter_runner.pyt +60 -0
- qubx/utils/runner/accounts.py +88 -0
- qubx/utils/runner/configs.py +65 -0
- qubx/utils/runner/runner.py +470 -0
- qubx/utils/time.py +312 -0
- qubx-0.5.7.dist-info/METADATA +105 -0
- qubx-0.5.7.dist-info/RECORD +100 -0
- qubx-0.5.7.dist-info/WHEEL +4 -0
- qubx-0.5.7.dist-info/entry_points.txt +3 -0
qubx/core/metrics.py
ADDED
|
@@ -0,0 +1,1512 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
from copy import copy
|
|
5
|
+
from io import BytesIO
|
|
6
|
+
from itertools import chain
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Callable, List, Tuple
|
|
9
|
+
|
|
10
|
+
import matplotlib
|
|
11
|
+
import matplotlib.pylab as plt
|
|
12
|
+
import numpy as np
|
|
13
|
+
import pandas as pd
|
|
14
|
+
import plotly.graph_objects as go
|
|
15
|
+
import yaml
|
|
16
|
+
from IPython.display import HTML
|
|
17
|
+
from scipy import stats
|
|
18
|
+
from scipy.stats import norm
|
|
19
|
+
from statsmodels.regression.linear_model import OLS
|
|
20
|
+
|
|
21
|
+
from qubx import logger
|
|
22
|
+
from qubx.core.basics import Instrument
|
|
23
|
+
from qubx.core.series import OHLCV
|
|
24
|
+
from qubx.pandaz.utils import ohlc_resample
|
|
25
|
+
from qubx.utils.charting.lookinglass import LookingGlass
|
|
26
|
+
from qubx.utils.charting.mpl_helpers import sbp
|
|
27
|
+
from qubx.utils.misc import makedirs, version
|
|
28
|
+
from qubx.utils.time import infer_series_frequency
|
|
29
|
+
|
|
30
|
+
YEARLY = 1
|
|
31
|
+
MONTHLY = 12
|
|
32
|
+
WEEKLY = 52
|
|
33
|
+
DAILY = 252
|
|
34
|
+
DAILY_365 = 365
|
|
35
|
+
HOURLY = DAILY * 6.5
|
|
36
|
+
MINUTELY = HOURLY * 60
|
|
37
|
+
HOURLY_FX = DAILY * 24
|
|
38
|
+
MINUTELY_FX = HOURLY_FX * 60
|
|
39
|
+
|
|
40
|
+
_D1 = pd.Timedelta("1D")
|
|
41
|
+
_W1 = pd.Timedelta("1W")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def absmaxdd(data: List | Tuple | pd.Series | np.ndarray) -> Tuple[float, int, int, int, pd.Series]:
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
Calculates the maximum absolute drawdown of series data.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
data: vector of doubles. Data may be presented as list,
|
|
51
|
+
tuple, numpy array or pandas series object.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
(max_abs_dd, d_start, d_peak, d_recovered, dd_data)
|
|
55
|
+
|
|
56
|
+
Where:
|
|
57
|
+
- max_abs_dd: absolute maximal drawdown value
|
|
58
|
+
- d_start: index from data array where drawdown starts
|
|
59
|
+
- d_peak: index when drawdown reach it's maximal value
|
|
60
|
+
- d_recovered: index when DD is fully recovered
|
|
61
|
+
- dd_data: drawdown series
|
|
62
|
+
|
|
63
|
+
Example:
|
|
64
|
+
|
|
65
|
+
mdd, ds, dp, dr, dd_data = absmaxdd(np.random.randn(1,100).cumsum())
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
if not isinstance(data, (list, tuple, np.ndarray, pd.Series)):
|
|
69
|
+
raise TypeError("Unknown type of input series")
|
|
70
|
+
|
|
71
|
+
datatype = type(data)
|
|
72
|
+
|
|
73
|
+
if datatype is pd.Series:
|
|
74
|
+
indexes = data.index
|
|
75
|
+
data = data.values
|
|
76
|
+
elif datatype is not np.ndarray:
|
|
77
|
+
data = np.array(data)
|
|
78
|
+
|
|
79
|
+
dd = np.maximum.accumulate(data) - data
|
|
80
|
+
mdd = dd.max()
|
|
81
|
+
d_peak = dd.argmax()
|
|
82
|
+
|
|
83
|
+
if mdd == 0:
|
|
84
|
+
return 0, 0, 0, 0, [0]
|
|
85
|
+
|
|
86
|
+
zeros_ixs = np.where(dd == 0)[0]
|
|
87
|
+
zeros_ixs = np.insert(zeros_ixs, 0, 0)
|
|
88
|
+
zeros_ixs = np.append(zeros_ixs, dd.size)
|
|
89
|
+
|
|
90
|
+
d_start = zeros_ixs[zeros_ixs < d_peak][-1]
|
|
91
|
+
d_recover = zeros_ixs[zeros_ixs > d_peak][0]
|
|
92
|
+
|
|
93
|
+
if d_recover >= data.__len__():
|
|
94
|
+
d_recover = data.__len__() - 1
|
|
95
|
+
|
|
96
|
+
if datatype is pd.Series:
|
|
97
|
+
dd = pd.Series(dd, index=indexes)
|
|
98
|
+
|
|
99
|
+
return mdd, d_start, d_peak, d_recover, dd
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def max_drawdown_pct(returns):
|
|
103
|
+
"""
|
|
104
|
+
Finds the maximum drawdown of a strategy returns in percents
|
|
105
|
+
|
|
106
|
+
:param returns: pd.Series or np.ndarray daily returns of the strategy, noncumulative
|
|
107
|
+
:return: maximum drawdown in percents
|
|
108
|
+
"""
|
|
109
|
+
if len(returns) < 1:
|
|
110
|
+
return np.nan
|
|
111
|
+
|
|
112
|
+
if isinstance(returns, pd.Series):
|
|
113
|
+
returns = returns.values
|
|
114
|
+
|
|
115
|
+
# drop nans
|
|
116
|
+
returns[np.isnan(returns) | np.isinf(returns)] = 0.0
|
|
117
|
+
|
|
118
|
+
cumrets = 100 * (returns + 1).cumprod(axis=0)
|
|
119
|
+
max_return = np.fmax.accumulate(cumrets)
|
|
120
|
+
return np.nanmin((cumrets - max_return) / max_return)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def portfolio_returns(portfolio_log: pd.DataFrame, method="pct", init_cash: float = 0.0) -> pd.Series:
|
|
124
|
+
"""
|
|
125
|
+
Calculates returns based on specified method.
|
|
126
|
+
|
|
127
|
+
:param pfl_log: portfolio log frame
|
|
128
|
+
:param method: method to calculate, there are 3 main methods:
|
|
129
|
+
- percentage on equity ('pct', 'equity', 'on equity')
|
|
130
|
+
- percentage on previous portfolio value ('gmv', 'gross')
|
|
131
|
+
- percentage on fixed deposit amount ('fixed')
|
|
132
|
+
|
|
133
|
+
:param init_cash: must be > 0 if used method is 'depo'
|
|
134
|
+
:return: returns series
|
|
135
|
+
"""
|
|
136
|
+
if "Total_PnL" not in portfolio_log.columns:
|
|
137
|
+
portfolio_log = calculate_total_pnl(portfolio_log, split_cumulative=True)
|
|
138
|
+
|
|
139
|
+
if method in ["pct", "equity", "on equity"]:
|
|
140
|
+
# 'standard' percent of changes. It also takes initial deposit
|
|
141
|
+
rets = (portfolio_log["Total_PnL"] + init_cash).pct_change()
|
|
142
|
+
elif method in ["gmv", "gross"]:
|
|
143
|
+
# today return is pct of yesterday's portfolio value (is USD)
|
|
144
|
+
rets = (
|
|
145
|
+
portfolio_log["Total_PnL"].diff() / (portfolio_log.filter(regex=".*_Value").abs().sum(axis=1).shift(1))
|
|
146
|
+
).fillna(0)
|
|
147
|
+
elif method in ["fixed"]:
|
|
148
|
+
# return is pct of PL changes to initial deposit (for fixed BP)
|
|
149
|
+
if init_cash <= 0:
|
|
150
|
+
raise ValueError("You must specify exact initial cash value when using 'fixed' method")
|
|
151
|
+
rets = portfolio_log["Total_PnL"].diff() / init_cash
|
|
152
|
+
else:
|
|
153
|
+
raise ValueError("Unknown returns calculation method '%s'" % method)
|
|
154
|
+
|
|
155
|
+
# cleanup returns
|
|
156
|
+
rets.name = "Returns"
|
|
157
|
+
rets[np.isinf(abs(rets))] = 0
|
|
158
|
+
rets[np.isnan(rets)] = 0
|
|
159
|
+
|
|
160
|
+
return rets
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def cagr(returns, periods=DAILY):
|
|
164
|
+
"""
|
|
165
|
+
Calculates the Compound Annual Growth Rate (CAGR) for the portfolio, by determining the number of years
|
|
166
|
+
and then creating a compound annualised rate based on the total return.
|
|
167
|
+
|
|
168
|
+
:param returns: A pandas Series or np.ndarray representing the returns
|
|
169
|
+
:param periods: Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
170
|
+
:return: CAGR's value
|
|
171
|
+
"""
|
|
172
|
+
if len(returns) < 1:
|
|
173
|
+
return np.nan
|
|
174
|
+
|
|
175
|
+
cumrets = (returns + 1).cumprod(axis=0)
|
|
176
|
+
years = len(cumrets) / float(periods)
|
|
177
|
+
return (cumrets.iloc[-1] ** (1.0 / years)) - 1.0
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def calmar_ratio(returns, periods=DAILY):
|
|
181
|
+
"""
|
|
182
|
+
Calculates the Calmar ratio, or drawdown ratio, of a strategy.
|
|
183
|
+
|
|
184
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
185
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
186
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
187
|
+
:return: Calmar ratio (drawdown ratio) as float
|
|
188
|
+
"""
|
|
189
|
+
max_dd = max_drawdown_pct(returns)
|
|
190
|
+
if max_dd < 0:
|
|
191
|
+
temp = cagr(returns, periods) / abs(max_dd)
|
|
192
|
+
else:
|
|
193
|
+
return np.nan
|
|
194
|
+
|
|
195
|
+
if np.isinf(temp):
|
|
196
|
+
return np.nan
|
|
197
|
+
|
|
198
|
+
return temp
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def sharpe_ratio(returns, risk_free=0.0, periods=DAILY) -> float:
|
|
202
|
+
"""
|
|
203
|
+
Calculates the Sharpe ratio.
|
|
204
|
+
|
|
205
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
206
|
+
:param risk_free: constant risk-free return throughout the period
|
|
207
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
208
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
209
|
+
:return: Sharpe ratio
|
|
210
|
+
"""
|
|
211
|
+
if len(returns) < 2:
|
|
212
|
+
return np.nan
|
|
213
|
+
|
|
214
|
+
returns_risk_adj = returns - risk_free
|
|
215
|
+
returns_risk_adj = returns_risk_adj[~np.isnan(returns_risk_adj)]
|
|
216
|
+
|
|
217
|
+
if np.std(returns_risk_adj, ddof=1) == 0:
|
|
218
|
+
return np.nan
|
|
219
|
+
|
|
220
|
+
return np.mean(returns_risk_adj) / np.std(returns_risk_adj, ddof=1) * np.sqrt(periods)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def rolling_sharpe_ratio(returns, risk_free=0.0, periods=DAILY) -> pd.Series:
|
|
224
|
+
"""
|
|
225
|
+
Rolling Sharpe ratio.
|
|
226
|
+
:param returns: pd.Series periodic returns of the strategy, noncumulative
|
|
227
|
+
:param risk_free: constant risk-free return throughout the period
|
|
228
|
+
:param periods: rolling window length
|
|
229
|
+
:return:
|
|
230
|
+
"""
|
|
231
|
+
returns_risk_adj = returns - risk_free
|
|
232
|
+
returns_risk_adj = returns_risk_adj[~np.isnan(returns_risk_adj)]
|
|
233
|
+
rolling = returns_risk_adj.rolling(window=periods)
|
|
234
|
+
return pd.Series(np.sqrt(periods) * (rolling.mean() / rolling.std()), name="RollingSharpe")
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def sortino_ratio(returns: pd.Series, required_return=0, periods=DAILY, _downside_risk=None) -> float:
|
|
238
|
+
"""
|
|
239
|
+
Calculates the Sortino ratio of a strategy.
|
|
240
|
+
|
|
241
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
242
|
+
:param required_return: minimum acceptable return
|
|
243
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
244
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
245
|
+
:param _downside_risk: the downside risk of the given inputs, if known. Will be calculated if not provided
|
|
246
|
+
:return: annualized Sortino ratio
|
|
247
|
+
"""
|
|
248
|
+
if len(returns) < 2:
|
|
249
|
+
return np.nan
|
|
250
|
+
|
|
251
|
+
mu = np.nanmean(returns - required_return, axis=0)
|
|
252
|
+
dsr = _downside_risk if _downside_risk is not None else downside_risk(returns, required_return)
|
|
253
|
+
if dsr == 0.0:
|
|
254
|
+
return np.nan if mu == 0 else np.inf
|
|
255
|
+
return periods * mu / dsr
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def information_ratio(returns, factor_returns) -> float:
|
|
259
|
+
"""
|
|
260
|
+
Calculates the Information ratio of a strategy (see https://en.wikipedia.org/wiki/information_ratio)
|
|
261
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
262
|
+
:param factor_returns: benchmark return to compare returns against
|
|
263
|
+
:return: information ratio
|
|
264
|
+
"""
|
|
265
|
+
if len(returns) < 2:
|
|
266
|
+
return np.nan
|
|
267
|
+
|
|
268
|
+
active_return = returns - factor_returns
|
|
269
|
+
tracking_error = np.nanstd(active_return, ddof=1)
|
|
270
|
+
if np.isnan(tracking_error):
|
|
271
|
+
return 0.0
|
|
272
|
+
if tracking_error == 0:
|
|
273
|
+
return np.nan
|
|
274
|
+
return np.nanmean(active_return) / tracking_error
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def downside_risk(returns, required_return=0.0, periods=DAILY):
|
|
278
|
+
"""
|
|
279
|
+
Calculates the downside deviation below a threshold
|
|
280
|
+
|
|
281
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
282
|
+
:param required_return: minimum acceptable return
|
|
283
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
284
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
285
|
+
:return: annualized downside deviation
|
|
286
|
+
"""
|
|
287
|
+
if len(returns) < 1:
|
|
288
|
+
return np.nan
|
|
289
|
+
|
|
290
|
+
downside_diff = (returns - required_return).copy()
|
|
291
|
+
downside_diff[downside_diff > 0] = 0.0
|
|
292
|
+
mean_squares = np.nanmean(np.square(downside_diff), axis=0)
|
|
293
|
+
ds_risk = np.sqrt(mean_squares) * np.sqrt(periods)
|
|
294
|
+
|
|
295
|
+
if len(returns.shape) == 2 and isinstance(returns, pd.DataFrame):
|
|
296
|
+
ds_risk = pd.Series(ds_risk, index=returns.columns)
|
|
297
|
+
|
|
298
|
+
return ds_risk
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def omega_ratio(returns, risk_free=0.0, required_return=0.0, periods=DAILY):
|
|
302
|
+
"""
|
|
303
|
+
Omega ratio (see https://en.wikipedia.org/wiki/Omega_ratio for more details)
|
|
304
|
+
|
|
305
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
306
|
+
:param risk_free: constant risk-free return throughout the period
|
|
307
|
+
:param required_return: Minimum acceptance return of the investor. Threshold over which to
|
|
308
|
+
consider positive vs negative returns. It will be converted to a
|
|
309
|
+
value appropriate for the period of the returns. E.g. An annual minimum
|
|
310
|
+
acceptable return of 100 will translate to a minimum acceptable
|
|
311
|
+
return of 0.018.
|
|
312
|
+
:param periods: Factor used to convert the required_return into a daily
|
|
313
|
+
value. Enter 1 if no time period conversion is necessary.
|
|
314
|
+
:return: Omega ratio
|
|
315
|
+
"""
|
|
316
|
+
if len(returns) < 2:
|
|
317
|
+
return np.nan
|
|
318
|
+
|
|
319
|
+
if periods == 1:
|
|
320
|
+
return_threshold = required_return
|
|
321
|
+
elif required_return <= -1:
|
|
322
|
+
return np.nan
|
|
323
|
+
else:
|
|
324
|
+
return_threshold = (1 + required_return) ** (1.0 / periods) - 1
|
|
325
|
+
|
|
326
|
+
returns_less_thresh = returns - risk_free - return_threshold
|
|
327
|
+
numer = sum(returns_less_thresh[returns_less_thresh > 0.0])
|
|
328
|
+
denom = -1.0 * sum(returns_less_thresh[returns_less_thresh < 0.0])
|
|
329
|
+
|
|
330
|
+
return (numer / denom) if denom > 0.0 else np.nan
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def aggregate_returns(returns: pd.Series, convert_to: str) -> pd.DataFrame | pd.Series:
|
|
334
|
+
"""
|
|
335
|
+
Aggregates returns by specified time period
|
|
336
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
337
|
+
:param convert_to: 'D', 'W', 'M', 'Y' (and any supported in pandas.resample method)
|
|
338
|
+
:return: aggregated returns
|
|
339
|
+
"""
|
|
340
|
+
|
|
341
|
+
def cumulate_returns(x):
|
|
342
|
+
return ((x + 1).cumprod(axis=0) - 1).iloc[-1] if len(x) > 0 else 0.0
|
|
343
|
+
|
|
344
|
+
str_check = convert_to.lower()
|
|
345
|
+
resample_mod = None
|
|
346
|
+
if str_check in ["a", "annual", "y", "yearly"]:
|
|
347
|
+
resample_mod = "YE"
|
|
348
|
+
elif str_check in ["m", "monthly", "mon"]:
|
|
349
|
+
resample_mod = "ME"
|
|
350
|
+
elif str_check in ["w", "weekly"]:
|
|
351
|
+
resample_mod = "W"
|
|
352
|
+
elif str_check in ["d", "daily"]:
|
|
353
|
+
resample_mod = "D"
|
|
354
|
+
else:
|
|
355
|
+
resample_mod = convert_to
|
|
356
|
+
|
|
357
|
+
return returns.resample(resample_mod).apply(cumulate_returns)
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def annual_volatility(returns, periods=DAILY, alpha=2.0):
|
|
361
|
+
"""
|
|
362
|
+
Calculates annual volatility of a strategy
|
|
363
|
+
|
|
364
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
365
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
366
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
367
|
+
:param alpha: scaling relation (Levy stability exponent).
|
|
368
|
+
:return:
|
|
369
|
+
"""
|
|
370
|
+
if len(returns) < 2:
|
|
371
|
+
return np.nan
|
|
372
|
+
|
|
373
|
+
return np.nanstd(returns, ddof=1) * (periods ** (1.0 / alpha))
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def stability_of_returns(returns):
|
|
377
|
+
"""
|
|
378
|
+
Calculates R-squared of a linear fit to the cumulative log returns.
|
|
379
|
+
Computes an ordinary least squares linear fit, and returns R-squared.
|
|
380
|
+
|
|
381
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
382
|
+
:return: R-squared
|
|
383
|
+
"""
|
|
384
|
+
if len(returns) < 2:
|
|
385
|
+
return np.nan
|
|
386
|
+
|
|
387
|
+
returns = np.asanyarray(returns)
|
|
388
|
+
returns = returns[~np.isnan(returns)]
|
|
389
|
+
cum_log_returns = np.log1p(returns, where=returns > -1).cumsum()
|
|
390
|
+
rhat = stats.linregress(np.arange(len(cum_log_returns)), cum_log_returns).rvalue # type: ignore
|
|
391
|
+
return rhat**2
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def tail_ratio(returns):
|
|
395
|
+
"""
|
|
396
|
+
Calculates the ratio between the right (95%) and left tail (5%).
|
|
397
|
+
|
|
398
|
+
For example, a ratio of 0.25 means that losses are four times as bad as profits.
|
|
399
|
+
|
|
400
|
+
:param returns: pd.Series or np.ndarray periodic returns of the strategy, noncumulative
|
|
401
|
+
:return: tail ratio
|
|
402
|
+
"""
|
|
403
|
+
if len(returns) < 1:
|
|
404
|
+
return np.nan
|
|
405
|
+
|
|
406
|
+
returns = np.asanyarray(returns)
|
|
407
|
+
returns = returns[~np.isnan(returns)]
|
|
408
|
+
if len(returns) < 1:
|
|
409
|
+
return np.nan
|
|
410
|
+
|
|
411
|
+
pc5 = np.abs(np.percentile(returns, 5))
|
|
412
|
+
|
|
413
|
+
return (np.abs(np.percentile(returns, 95)) / pc5) if pc5 != 0 else np.nan
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def split_cumulative_pnl(pfl_log: pd.DataFrame) -> pd.DataFrame:
|
|
417
|
+
"""
|
|
418
|
+
Position.pnl tracks cumulative PnL (realized+unrealized) but if we want to operate with PnL for every bar
|
|
419
|
+
we need to find diff from these cumulative series
|
|
420
|
+
|
|
421
|
+
:param pfl_log: position manager log (portfolio log)
|
|
422
|
+
:return: frame with splitted PL
|
|
423
|
+
"""
|
|
424
|
+
# take in account commissions (now we cumsum it)
|
|
425
|
+
pl = pfl_log.filter(regex=r".*_PnL|.*_Commissions")
|
|
426
|
+
if pl.shape[1] == 0:
|
|
427
|
+
raise ValueError("PnL columns not found. Input frame must contain at least 1 column with '_PnL' suffix")
|
|
428
|
+
|
|
429
|
+
pl_diff = pl.diff()
|
|
430
|
+
|
|
431
|
+
# at first row we use first value of PnL
|
|
432
|
+
pl_diff.loc[pl.index[0]] = pl.iloc[0]
|
|
433
|
+
|
|
434
|
+
# substitute new diff PL
|
|
435
|
+
pfl_splitted = pfl_log.copy()
|
|
436
|
+
pfl_splitted.loc[:, pfl_log.columns.isin(pl_diff.columns)] = pl_diff
|
|
437
|
+
return pfl_splitted
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def calculate_total_pnl(pfl_log: pd.DataFrame, split_cumulative=True) -> pd.DataFrame:
|
|
441
|
+
"""
|
|
442
|
+
Finds summary of all P&L column (should have '_PnL' suffix) in given portfolio log dataframe.
|
|
443
|
+
Attaches additional Total_PnL column with result.
|
|
444
|
+
|
|
445
|
+
:param pfl_log: position manager log (portfolio log)
|
|
446
|
+
:param split_cumulative: set true if we need to split cumulative PnL [default is True]
|
|
447
|
+
:return:
|
|
448
|
+
"""
|
|
449
|
+
n_pfl = pfl_log.copy()
|
|
450
|
+
if "Total_PnL" not in n_pfl.columns:
|
|
451
|
+
if split_cumulative:
|
|
452
|
+
n_pfl = split_cumulative_pnl(n_pfl)
|
|
453
|
+
|
|
454
|
+
n_pfl["Total_PnL"] = n_pfl.filter(regex=r".*_PnL").sum(axis=1)
|
|
455
|
+
n_pfl["Total_Commissions"] = n_pfl.filter(regex=r".*_Commissions").sum(axis=1)
|
|
456
|
+
|
|
457
|
+
return n_pfl
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
def alpha(returns, factor_returns, risk_free=0.0, period=DAILY, _beta=None):
|
|
461
|
+
"""
|
|
462
|
+
Calculates annualized alpha of portfolio.
|
|
463
|
+
|
|
464
|
+
:param returns: Daily returns of the strategy, noncumulative.
|
|
465
|
+
:param factor_returns: Daily noncumulative returns of the factor to which beta is
|
|
466
|
+
computed. Usually a benchmark such as the market
|
|
467
|
+
:param risk_free: Constant risk-free return throughout the period. For example, the
|
|
468
|
+
interest rate on a three month us treasury bill
|
|
469
|
+
:param periods: Defines the periodicity of the 'returns' data for purposes of annualizing.
|
|
470
|
+
Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
|
|
471
|
+
:param _beta: The beta for the given inputs, if already known. Will be calculated
|
|
472
|
+
internally if not provided.
|
|
473
|
+
:return: alpha
|
|
474
|
+
"""
|
|
475
|
+
if len(returns) < 2:
|
|
476
|
+
return np.nan
|
|
477
|
+
|
|
478
|
+
if _beta is None:
|
|
479
|
+
_beta = beta(returns, factor_returns, risk_free)
|
|
480
|
+
|
|
481
|
+
adj_returns = returns - risk_free
|
|
482
|
+
adj_factor_returns = factor_returns - risk_free
|
|
483
|
+
alpha_series = adj_returns - (_beta * adj_factor_returns)
|
|
484
|
+
|
|
485
|
+
return np.nanmean(alpha_series) * period
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
def beta(returns, benchmark_returns, risk_free=0.0):
|
|
489
|
+
"""
|
|
490
|
+
Calculates beta of portfolio.
|
|
491
|
+
|
|
492
|
+
If they are pd.Series, expects returns and factor_returns have already
|
|
493
|
+
been aligned on their labels. If np.ndarray, these arguments should have
|
|
494
|
+
the same shape.
|
|
495
|
+
|
|
496
|
+
:param returns: pd.Series or np.ndarray. Daily returns of the strategy, noncumulative.
|
|
497
|
+
:param benchmark_returns: pd.Series or np.ndarray. Daily noncumulative returns of the factor to which beta is
|
|
498
|
+
computed. Usually a benchmark such as the market.
|
|
499
|
+
:param risk_free: Constant risk-free return throughout the period. For example, the interest rate
|
|
500
|
+
on a three month us treasury bill.
|
|
501
|
+
:return: beta
|
|
502
|
+
"""
|
|
503
|
+
if len(returns) < 2 or len(benchmark_returns) < 2:
|
|
504
|
+
return np.nan
|
|
505
|
+
|
|
506
|
+
# Filter out dates with np.nan as a return value
|
|
507
|
+
|
|
508
|
+
if len(returns) != len(benchmark_returns):
|
|
509
|
+
if len(returns) > len(benchmark_returns):
|
|
510
|
+
returns = returns.drop(returns.index.difference(benchmark_returns.index))
|
|
511
|
+
else:
|
|
512
|
+
benchmark_returns = benchmark_returns.drop(benchmark_returns.index.difference(returns.index))
|
|
513
|
+
|
|
514
|
+
joint = np.vstack([returns - risk_free, benchmark_returns])
|
|
515
|
+
joint = joint[:, ~np.isnan(joint).any(axis=0)]
|
|
516
|
+
if joint.shape[1] < 2:
|
|
517
|
+
return np.nan
|
|
518
|
+
|
|
519
|
+
cov = np.cov(joint, ddof=0)
|
|
520
|
+
|
|
521
|
+
if np.absolute(cov[1, 1]) < 1.0e-30:
|
|
522
|
+
return np.nan
|
|
523
|
+
|
|
524
|
+
return cov[0, 1] / cov[1, 1]
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def var_cov_var(P_usd, mu, sigma, c=0.95):
|
|
528
|
+
"""
|
|
529
|
+
Variance-Covariance calculation of daily Value-at-Risk
|
|
530
|
+
using confidence level c, with mean of returns mu
|
|
531
|
+
and standard deviation of returns sigma, on a portfolio of value P.
|
|
532
|
+
|
|
533
|
+
https://www.quantstart.com/articles/Value-at-Risk-VaR-for-Algorithmic-Trading-Risk-Management-Part-I
|
|
534
|
+
|
|
535
|
+
also here:
|
|
536
|
+
http://stackoverflow.com/questions/30878265/calculating-value-at-risk-or-most-probable-loss-for-a-given-distribution-of-r#30895548
|
|
537
|
+
|
|
538
|
+
:param P_usd: portfolio value
|
|
539
|
+
:param c: confidence level
|
|
540
|
+
:param mu: mean of returns
|
|
541
|
+
:param sigma: standard deviation of returns
|
|
542
|
+
:return: value at risk
|
|
543
|
+
"""
|
|
544
|
+
alpha = norm.ppf(1 - c, mu, sigma) if sigma != 0.0 else 0
|
|
545
|
+
return P_usd - P_usd * (alpha + 1)
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def qr(equity):
|
|
549
|
+
"""
|
|
550
|
+
|
|
551
|
+
QR = R2 * B / S
|
|
552
|
+
|
|
553
|
+
Where:
|
|
554
|
+
B - slope (roughly in the ranges of average trade PnL, higher is better)
|
|
555
|
+
R2 - r squared metric (proportion of variance explained by linear model, straight line has r2 == 1)
|
|
556
|
+
S - standard error represents volatility of equity curve (lower is better)
|
|
557
|
+
|
|
558
|
+
:param equity: equity (cumulative)
|
|
559
|
+
:return: QR measure or NaN if not enough data for calculaitons
|
|
560
|
+
"""
|
|
561
|
+
if len(equity) < 1 or all(equity == 0.0):
|
|
562
|
+
return np.nan
|
|
563
|
+
|
|
564
|
+
rgr = OLS(equity, np.vander(np.linspace(-1, 1, len(equity)), 2)).fit()
|
|
565
|
+
b = rgr.params.iloc[0] if isinstance(rgr.params, pd.Series) else rgr.params[0]
|
|
566
|
+
return rgr.rsquared * b / np.std(rgr.resid)
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
def monthly_returns(
|
|
570
|
+
portfolio, init_cash, period="monthly", daily="pct", monthly="pct", weekly="pct", performace_period=DAILY
|
|
571
|
+
):
|
|
572
|
+
"""
|
|
573
|
+
Calculate monthly or weekly returns table along with account balance
|
|
574
|
+
"""
|
|
575
|
+
pft_total = calculate_total_pnl(portfolio, split_cumulative=False)
|
|
576
|
+
pft_total["Total_PnL"] = pft_total["Total_PnL"].cumsum()
|
|
577
|
+
returns = portfolio_returns(pft_total, init_cash=init_cash, method=daily)
|
|
578
|
+
r_daily = aggregate_returns(returns, "daily")
|
|
579
|
+
print("CAGR: %.2f%%" % (100 * cagr(r_daily, performace_period)))
|
|
580
|
+
|
|
581
|
+
if period == "weekly":
|
|
582
|
+
returns = portfolio_returns(pft_total, init_cash=init_cash, method=weekly)
|
|
583
|
+
r_month = aggregate_returns(returns, "weekly")
|
|
584
|
+
acc_balance = init_cash + pft_total.Total_PnL.groupby(pd.Grouper(freq="1W")).last()
|
|
585
|
+
else:
|
|
586
|
+
returns = portfolio_returns(pft_total, init_cash=init_cash, method=monthly)
|
|
587
|
+
r_month = aggregate_returns(returns, "monthly")
|
|
588
|
+
acc_balance = init_cash + pft_total.Total_PnL.groupby(pd.Grouper(freq="1M")).last()
|
|
589
|
+
|
|
590
|
+
return pd.concat((100 * r_month, acc_balance), axis=1, keys=["Returns", "Balance"])
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
class TradingSessionResult:
|
|
594
|
+
# fmt: off
|
|
595
|
+
id: int
|
|
596
|
+
name: str
|
|
597
|
+
start: str | pd.Timestamp
|
|
598
|
+
stop: str | pd.Timestamp
|
|
599
|
+
exchange: str # exchange name (TODO: need to think how to do with it for multiple exchanges)
|
|
600
|
+
instruments: list[Instrument] # instruments used at the start of the session (TODO: need to collect all traded instruments)
|
|
601
|
+
capital: float
|
|
602
|
+
base_currency: str
|
|
603
|
+
commissions: str # used commissions ("vip0_usdt" etc)
|
|
604
|
+
portfolio_log: pd.DataFrame # portfolio log records
|
|
605
|
+
executions_log: pd.DataFrame # executed trades
|
|
606
|
+
signals_log: pd.DataFrame # signals generated by the strategy
|
|
607
|
+
strategy_class: str # strategy full qualified class name
|
|
608
|
+
parameters: dict[str, Any] # strategy parameters if provided
|
|
609
|
+
is_simulation: bool
|
|
610
|
+
creation_time: pd.Timestamp | None = None # when result was created
|
|
611
|
+
author: str | None = None # who created the result
|
|
612
|
+
qubx_version: str | None = None # Qubx version used to create the result
|
|
613
|
+
_metrics: dict[str, float] | None = None # performance metrics
|
|
614
|
+
variation_name: str | None = None # variation name if this belongs to a variated set
|
|
615
|
+
# fmt: on
|
|
616
|
+
|
|
617
|
+
def __init__(
|
|
618
|
+
self,
|
|
619
|
+
id: int,
|
|
620
|
+
name: str,
|
|
621
|
+
start: str | pd.Timestamp,
|
|
622
|
+
stop: str | pd.Timestamp,
|
|
623
|
+
exchange: str,
|
|
624
|
+
instruments: list[Instrument],
|
|
625
|
+
capital: float,
|
|
626
|
+
base_currency: str,
|
|
627
|
+
commissions: str,
|
|
628
|
+
portfolio_log: pd.DataFrame,
|
|
629
|
+
executions_log: pd.DataFrame,
|
|
630
|
+
signals_log: pd.DataFrame,
|
|
631
|
+
strategy_class: str,
|
|
632
|
+
parameters: dict[str, Any] | None = None,
|
|
633
|
+
is_simulation=True,
|
|
634
|
+
creation_time: str | pd.Timestamp | None = None,
|
|
635
|
+
author: str | None = None,
|
|
636
|
+
variation_name: str | None = None,
|
|
637
|
+
):
|
|
638
|
+
self.id = id
|
|
639
|
+
self.name = name
|
|
640
|
+
self.start = start
|
|
641
|
+
self.stop = stop
|
|
642
|
+
self.exchange = exchange
|
|
643
|
+
self.instruments = instruments
|
|
644
|
+
self.capital = capital
|
|
645
|
+
self.base_currency = base_currency
|
|
646
|
+
self.commissions = commissions
|
|
647
|
+
self.portfolio_log = portfolio_log
|
|
648
|
+
self.executions_log = executions_log
|
|
649
|
+
self.signals_log = signals_log
|
|
650
|
+
self.strategy_class = strategy_class
|
|
651
|
+
self.parameters = parameters if parameters else {}
|
|
652
|
+
self.is_simulation = is_simulation
|
|
653
|
+
self.creation_time = pd.Timestamp(creation_time) if creation_time else pd.Timestamp.now()
|
|
654
|
+
self.author = author
|
|
655
|
+
self.qubx_version = version()
|
|
656
|
+
self.variation_name = variation_name
|
|
657
|
+
self._metrics = None
|
|
658
|
+
|
|
659
|
+
def performance(self) -> dict[str, float]:
|
|
660
|
+
"""
|
|
661
|
+
Calculate performance metrics for the trading session
|
|
662
|
+
"""
|
|
663
|
+
if not self._metrics:
|
|
664
|
+
# - caluclate short statistics
|
|
665
|
+
self._metrics = portfolio_metrics(
|
|
666
|
+
self.portfolio_log,
|
|
667
|
+
self.executions_log,
|
|
668
|
+
self.capital,
|
|
669
|
+
performance_statistics_period=DAILY_365,
|
|
670
|
+
account_transactions=True,
|
|
671
|
+
commission_factor=1,
|
|
672
|
+
)
|
|
673
|
+
# - convert timestamps to isoformat
|
|
674
|
+
for k, v in self._metrics.items():
|
|
675
|
+
match v:
|
|
676
|
+
case pd.Timestamp():
|
|
677
|
+
self._metrics[k] = v.isoformat()
|
|
678
|
+
case np.float64():
|
|
679
|
+
self._metrics[k] = float(v)
|
|
680
|
+
# fmt: off
|
|
681
|
+
for k in [
|
|
682
|
+
"equity", "drawdown_usd", "drawdown_pct",
|
|
683
|
+
"compound_returns", "returns_daily", "returns", "monthly_returns",
|
|
684
|
+
"rolling_sharpe", "long_value", "short_value",
|
|
685
|
+
]:
|
|
686
|
+
self._metrics.pop(k, None)
|
|
687
|
+
# fmt: on
|
|
688
|
+
|
|
689
|
+
return self._metrics
|
|
690
|
+
|
|
691
|
+
@property
|
|
692
|
+
def symbols(self) -> list[str]:
|
|
693
|
+
"""
|
|
694
|
+
Extracts all traded symbols from the portfolio log
|
|
695
|
+
"""
|
|
696
|
+
if not self.portfolio_log.empty:
|
|
697
|
+
return list(set(self.portfolio_log.columns.str.split("_").str.get(0).values))
|
|
698
|
+
return []
|
|
699
|
+
|
|
700
|
+
def config(self, short=True) -> str:
|
|
701
|
+
"""
|
|
702
|
+
Return configuration as string: "test.strategies.Strategy1(parameter1=12345)"
|
|
703
|
+
TODO: probably we need to return recreated new object
|
|
704
|
+
"""
|
|
705
|
+
_cfg = ""
|
|
706
|
+
if self.strategy_class:
|
|
707
|
+
_params = ", ".join([f"{k}={repr(v)}" for k, v in self.parameters.items()])
|
|
708
|
+
_class = self.strategy_class.split(".")[-1] if short else self.strategy_class
|
|
709
|
+
_cfg = f"{_class}({_params})"
|
|
710
|
+
# _cfg = f"{{ {repr(self.name)}: {_class}({_params}) }}"
|
|
711
|
+
# if instantiated: return eval(_cfg)
|
|
712
|
+
return _cfg
|
|
713
|
+
|
|
714
|
+
def info(self) -> dict[str, Any]:
|
|
715
|
+
return {
|
|
716
|
+
"id": self.id,
|
|
717
|
+
"name": self.name,
|
|
718
|
+
"start": pd.Timestamp(self.start).isoformat(),
|
|
719
|
+
"stop": pd.Timestamp(self.stop).isoformat(),
|
|
720
|
+
"exchange": self.exchange,
|
|
721
|
+
"capital": self.capital,
|
|
722
|
+
"base_currency": self.base_currency,
|
|
723
|
+
"commissions": self.commissions,
|
|
724
|
+
"strategy_class": self.strategy_class,
|
|
725
|
+
"parameters": self.parameters,
|
|
726
|
+
"is_simulation": self.is_simulation,
|
|
727
|
+
"creation_time": pd.Timestamp(self.creation_time).isoformat(),
|
|
728
|
+
"author": self.author,
|
|
729
|
+
"qubx_version": self.qubx_version,
|
|
730
|
+
"symbols": self.symbols,
|
|
731
|
+
"performance": dict(self.performance()),
|
|
732
|
+
"variation_name": self.variation_name,
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
def to_html(self, compound=True) -> HTML:
|
|
736
|
+
table: pd.DataFrame = tearsheet(self, compound=compound, plot_equities=True, plot_leverage=True, no_title=True) # type: ignore
|
|
737
|
+
|
|
738
|
+
# - make it bit more readable
|
|
739
|
+
table.index = table.index.map(lambda x: "/".join(x.split(",")[:3]))
|
|
740
|
+
_rep = table.round(3).to_html(classes="rep_table")
|
|
741
|
+
_eqty = _plt_to_base64()
|
|
742
|
+
|
|
743
|
+
_s = "Simulation Report" if self.is_simulation else "Live"
|
|
744
|
+
_name = f"{_s} for (<font color='red'>{self.name}.{self.id}</font>) generated <font color='green'>{str(self.creation_time)}</font>"
|
|
745
|
+
_cap = f"{self.capital} {self.base_currency} ({self.commissions} @ {self.exchange})"
|
|
746
|
+
|
|
747
|
+
_tmpl = f"""
|
|
748
|
+
<style>
|
|
749
|
+
.report div {{
|
|
750
|
+
font-family: 'Maven Pro', 'Roboto', 'JetBrains mono', 'Meslo LG S', 'Pragmata Pro Mono', 'hasklig semibold' !important;
|
|
751
|
+
font-size: 12px; background-color: #000;
|
|
752
|
+
}}
|
|
753
|
+
.wrap_table th {{ text-align:center !important; font-weight: bold; font-size: 18px; color: #756732; }}
|
|
754
|
+
.wrap_table td, .wrap_table tr {{ background: none !important; text-align:left !important; }}
|
|
755
|
+
|
|
756
|
+
.rep_table table {{ width:100%;}}
|
|
757
|
+
.rep_table th {{ text-align:center !important; font-weight: bold; font-size: 12px; color: #328032; }}
|
|
758
|
+
.rep_table td, .rep_table tr {{ background: none !important; text-align:left !important; }}
|
|
759
|
+
|
|
760
|
+
.flex-container {{ display: flex; align-items: flex-start; width: 100%; }}
|
|
761
|
+
.table_block {{ width:100%; }}
|
|
762
|
+
.wrap_table table, .wrap_table td, .wrap_table tr, .wrap_table th {{
|
|
763
|
+
border: 1px solid #55554a85; border-collapse: collapse; color: #9eb9c3d9 !important; background-color: #000; padding-left: 5px;
|
|
764
|
+
}}
|
|
765
|
+
</style>
|
|
766
|
+
<h1>{_name}</h1>
|
|
767
|
+
<div class="report">
|
|
768
|
+
<table class="wrap_table" width=100%>
|
|
769
|
+
<tr><td width=15%>Strategy</td> <td>{self.config(False)}</td></tr>
|
|
770
|
+
<tr><td width=15%>Period</td><td>{str(self.start)} : {str(self.stop)}</td></tr>
|
|
771
|
+
<tr><td width=15%>Instruments</td> <td>{self.symbols}</td></tr>
|
|
772
|
+
<tr><td width=15%>Capital</td> <td>{_cap}</td></tr>
|
|
773
|
+
<tr><td width=15%>Author</td> <td>{self.author}</td></tr>
|
|
774
|
+
<tr><td width=15%>Qubx version</td> <td>{self.qubx_version}</td></tr>
|
|
775
|
+
</table>
|
|
776
|
+
<div class="report">
|
|
777
|
+
<table class="wrap_table" width=100%> <th>Performance</th> </table>
|
|
778
|
+
{_rep}
|
|
779
|
+
<table class="wrap_table" width=100%> <th>Equity</th> </table>
|
|
780
|
+
<img src='{_eqty}' style="max-width:1000px; width:100%; height:450px;"/>
|
|
781
|
+
</div>
|
|
782
|
+
</div>
|
|
783
|
+
"""
|
|
784
|
+
return HTML(_tmpl)
|
|
785
|
+
|
|
786
|
+
def to_file(
|
|
787
|
+
self,
|
|
788
|
+
name: str,
|
|
789
|
+
description: str | None = None,
|
|
790
|
+
compound=True,
|
|
791
|
+
archive=True,
|
|
792
|
+
suffix: str | None = None,
|
|
793
|
+
attachments: list[str] | None = None,
|
|
794
|
+
):
|
|
795
|
+
"""
|
|
796
|
+
Save the trading session results to files.
|
|
797
|
+
|
|
798
|
+
Args:
|
|
799
|
+
name (str): Base name/path for saving the files
|
|
800
|
+
description (str | None, optional): Description to include in info file. Defaults to None.
|
|
801
|
+
compound (bool, optional): Whether to use compound returns in report. Defaults to True.
|
|
802
|
+
archive (bool, optional): Whether to zip the output files. Defaults to True.
|
|
803
|
+
suffix (str | None, optional): Optional suffix to append to filename. Defaults to None.
|
|
804
|
+
attachments (list[str] | None, optional): Additional files to include. Defaults to None.
|
|
805
|
+
|
|
806
|
+
The following files are saved:
|
|
807
|
+
- info.yml: Contains strategy configuration and metadata
|
|
808
|
+
- portfolio.csv: Portfolio state log
|
|
809
|
+
- executions.csv: Trade execution log
|
|
810
|
+
- signals.csv: Strategy signals log
|
|
811
|
+
- report.html: HTML performance report
|
|
812
|
+
- Any provided attachment files
|
|
813
|
+
|
|
814
|
+
If archive=True, all files are zipped into a single archive and the directory is removed.
|
|
815
|
+
"""
|
|
816
|
+
import shutil
|
|
817
|
+
|
|
818
|
+
if suffix is not None:
|
|
819
|
+
name = f"{name}{suffix}"
|
|
820
|
+
else:
|
|
821
|
+
name = (name + self.creation_time.strftime("%Y%m%d%H%M%S")) if self.creation_time else name
|
|
822
|
+
p = Path(makedirs(name))
|
|
823
|
+
with open(p / "info.yml", "w") as f:
|
|
824
|
+
info = self.info()
|
|
825
|
+
if description:
|
|
826
|
+
info["description"] = description
|
|
827
|
+
yaml.safe_dump(info, f, sort_keys=False, indent=4)
|
|
828
|
+
|
|
829
|
+
# - save logs
|
|
830
|
+
self.portfolio_log.to_csv(p / "portfolio.csv")
|
|
831
|
+
self.executions_log.to_csv(p / "executions.csv")
|
|
832
|
+
self.signals_log.to_csv(p / "signals.csv")
|
|
833
|
+
|
|
834
|
+
# - save report
|
|
835
|
+
with open(p / "report.html", "w") as f:
|
|
836
|
+
f.write(self.to_html(compound=compound).data)
|
|
837
|
+
|
|
838
|
+
# - save attachments
|
|
839
|
+
if attachments:
|
|
840
|
+
for a in attachments:
|
|
841
|
+
if (af := Path(a)).is_file():
|
|
842
|
+
shutil.copy(af, p / af.name)
|
|
843
|
+
|
|
844
|
+
if archive:
|
|
845
|
+
shutil.make_archive(name, "zip", p) # type: ignore
|
|
846
|
+
shutil.rmtree(p) # type: ignore
|
|
847
|
+
|
|
848
|
+
@staticmethod
|
|
849
|
+
def from_file(path: str):
|
|
850
|
+
import zipfile
|
|
851
|
+
|
|
852
|
+
path = path + ".zip" if not path.endswith(".zip") else path
|
|
853
|
+
if not os.path.exists(path):
|
|
854
|
+
raise FileNotFoundError(f"File {path} not found")
|
|
855
|
+
|
|
856
|
+
with zipfile.ZipFile(path, "r") as zip_ref:
|
|
857
|
+
info = yaml.safe_load(zip_ref.read("info.yml"))
|
|
858
|
+
portfolio = pd.read_csv(zip_ref.open("portfolio.csv"), index_col=["timestamp"], parse_dates=["timestamp"])
|
|
859
|
+
executions = pd.read_csv(zip_ref.open("executions.csv"), index_col=["timestamp"], parse_dates=["timestamp"])
|
|
860
|
+
signals = pd.read_csv(zip_ref.open("signals.csv"), index_col=["timestamp"], parse_dates=["timestamp"])
|
|
861
|
+
|
|
862
|
+
# load result
|
|
863
|
+
_qbx_version = info.pop("qubx_version")
|
|
864
|
+
_decr = info.pop("description", None)
|
|
865
|
+
_perf = info.pop("performance", None)
|
|
866
|
+
info["instruments"] = info.pop("symbols")
|
|
867
|
+
tsr = TradingSessionResult(**info, portfolio_log=portfolio, executions_log=executions, signals_log=signals)
|
|
868
|
+
tsr.qubx_version = _qbx_version
|
|
869
|
+
tsr._metrics = _perf
|
|
870
|
+
return tsr
|
|
871
|
+
|
|
872
|
+
def tearsheet(
|
|
873
|
+
self,
|
|
874
|
+
compound: bool = True,
|
|
875
|
+
account_transactions=True,
|
|
876
|
+
performance_statistics_period=365,
|
|
877
|
+
timeframe: str | pd.Timedelta | None = None,
|
|
878
|
+
sort_by: str | None = "Sharpe",
|
|
879
|
+
sort_ascending: bool = False,
|
|
880
|
+
plot_equities: bool = True,
|
|
881
|
+
commission_factor: float = 1,
|
|
882
|
+
plot_leverage: bool = False,
|
|
883
|
+
use_plotly: bool = False,
|
|
884
|
+
no_title: bool = False,
|
|
885
|
+
):
|
|
886
|
+
return tearsheet(
|
|
887
|
+
self,
|
|
888
|
+
compound,
|
|
889
|
+
account_transactions,
|
|
890
|
+
performance_statistics_period,
|
|
891
|
+
timeframe,
|
|
892
|
+
sort_by,
|
|
893
|
+
sort_ascending,
|
|
894
|
+
plot_equities,
|
|
895
|
+
commission_factor,
|
|
896
|
+
plot_leverage,
|
|
897
|
+
use_plotly,
|
|
898
|
+
no_title,
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
def __repr__(self) -> str:
|
|
902
|
+
_s = "Simulation" if self.is_simulation else "Live"
|
|
903
|
+
_t = f"[{self.start} - {self.stop}]" if self.is_simulation else ""
|
|
904
|
+
r = f"""::: {_s} {self.id} ({self.name}) {_t}
|
|
905
|
+
: QUBX: {self.qubx_version}
|
|
906
|
+
: Capital: {self.capital} {self.base_currency} ({self.commissions} @ {self.exchange})
|
|
907
|
+
: Instruments: [{",".join(self.symbols)}]
|
|
908
|
+
: Created: {self.creation_time} by {self.author}
|
|
909
|
+
: Strategy: {self.config(False)}
|
|
910
|
+
: Generated: {len(self.signals_log)} signals, {len(self.executions_log)} executions
|
|
911
|
+
"""
|
|
912
|
+
_perf = pd.DataFrame.from_dict(self.performance(), orient="index").T.to_string(index=None)
|
|
913
|
+
for _i, s in enumerate(_perf.split("\n")):
|
|
914
|
+
r += f" : {s}\n" if _i > 0 else f" `----: {s}\n"
|
|
915
|
+
return r
|
|
916
|
+
|
|
917
|
+
|
|
918
|
+
def portfolio_symbols(src: pd.DataFrame | TradingSessionResult) -> List[str]:
|
|
919
|
+
"""
|
|
920
|
+
Get list of symbols from portfolio log
|
|
921
|
+
"""
|
|
922
|
+
df = src.portfolio_log if isinstance(src, TradingSessionResult) else src
|
|
923
|
+
return list(df.columns[::5].str.split("_").str.get(0).values)
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
def pnl(
|
|
927
|
+
src: pd.DataFrame | TradingSessionResult, c=1, cum=False, total=False, resample=None
|
|
928
|
+
) -> pd.Series | pd.DataFrame:
|
|
929
|
+
"""
|
|
930
|
+
Extract PnL from portfolio log
|
|
931
|
+
"""
|
|
932
|
+
x = src.portfolio_log if isinstance(src, TradingSessionResult) else src
|
|
933
|
+
pl = x.filter(regex=".*_PnL").rename(lambda x: x.split("_")[0], axis=1)
|
|
934
|
+
comms = x.filter(regex=".*_Commissions").rename(lambda x: x.split("_")[0], axis=1)
|
|
935
|
+
r = pl - c * comms
|
|
936
|
+
if resample:
|
|
937
|
+
r = r.resample(resample).sum()
|
|
938
|
+
r = r.cumsum() if cum else r
|
|
939
|
+
return r.sum(axis=1) if total else r
|
|
940
|
+
|
|
941
|
+
|
|
942
|
+
def drop_symbols(src: pd.DataFrame | TradingSessionResult, *args, quoted="USDT") -> pd.DataFrame:
|
|
943
|
+
"""
|
|
944
|
+
Drop symbols (is quoted currency) from portfolio log
|
|
945
|
+
"""
|
|
946
|
+
s = "|".join([f"{a}{quoted}" if not a.endswith(quoted) else a for a in args])
|
|
947
|
+
df = src.portfolio_log if isinstance(src, TradingSessionResult) else src
|
|
948
|
+
return df.filter(filter(lambda si: not re.match(f"^{s}_.*", si), df.columns))
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
def pick_symbols(src: pd.DataFrame | TradingSessionResult, *args, quoted="USDT") -> pd.DataFrame:
|
|
952
|
+
"""
|
|
953
|
+
Select symbols (is quoted currency) from portfolio log
|
|
954
|
+
"""
|
|
955
|
+
df = src.portfolio_log if isinstance(src, TradingSessionResult) else src
|
|
956
|
+
|
|
957
|
+
# - pick up from execution report
|
|
958
|
+
if "instrument" in df.columns and "quantity" in df.columns:
|
|
959
|
+
rx = "|".join([f"{a}.*" for a in args])
|
|
960
|
+
return df[df["instrument"].str.match(rx)]
|
|
961
|
+
|
|
962
|
+
# - pick up from PnL log report
|
|
963
|
+
s = "|".join([f"{a}{quoted}" if not a.endswith(quoted) else a for a in args])
|
|
964
|
+
return df.filter(filter(lambda si: re.match(f"^{s}_.*", si), df.columns))
|
|
965
|
+
|
|
966
|
+
|
|
967
|
+
def portfolio_metrics(
|
|
968
|
+
portfolio_log: pd.DataFrame,
|
|
969
|
+
executions_log: pd.DataFrame,
|
|
970
|
+
init_cash: float,
|
|
971
|
+
start: str | pd.Timestamp | None = None,
|
|
972
|
+
end: str | pd.Timestamp | None = None,
|
|
973
|
+
risk_free: float = 0.0,
|
|
974
|
+
rolling_sharpe_window=12,
|
|
975
|
+
account_transactions=True,
|
|
976
|
+
performance_statistics_period=DAILY_365,
|
|
977
|
+
**kwargs,
|
|
978
|
+
) -> dict:
|
|
979
|
+
if len(portfolio_log) == 0:
|
|
980
|
+
raise ValueError("Can't calculate statistcis on empty portfolio")
|
|
981
|
+
|
|
982
|
+
sheet = dict()
|
|
983
|
+
|
|
984
|
+
pft_total = calculate_total_pnl(portfolio_log, split_cumulative=False)
|
|
985
|
+
pft_total["Total_PnL"] = pft_total["Total_PnL"].cumsum()
|
|
986
|
+
pft_total["Total_Commissions"] = pft_total["Total_Commissions"].cumsum()
|
|
987
|
+
|
|
988
|
+
# if it's asked to account transactions into equ
|
|
989
|
+
pft_total["Total_Commissions"] *= kwargs.get("commission_factor", 1)
|
|
990
|
+
if account_transactions:
|
|
991
|
+
pft_total["Total_PnL"] -= pft_total["Total_Commissions"]
|
|
992
|
+
|
|
993
|
+
# calculate returns
|
|
994
|
+
returns = portfolio_returns(pft_total, init_cash=init_cash, method="pct")
|
|
995
|
+
returns_on_init_bp = portfolio_returns(pft_total, init_cash=init_cash, method="fixed")
|
|
996
|
+
|
|
997
|
+
if start:
|
|
998
|
+
returns = returns[start:]
|
|
999
|
+
returns_on_init_bp = returns_on_init_bp[start:]
|
|
1000
|
+
|
|
1001
|
+
if end:
|
|
1002
|
+
returns = returns[:end]
|
|
1003
|
+
returns_on_init_bp = returns_on_init_bp[:end]
|
|
1004
|
+
|
|
1005
|
+
# - aggregate returns to higher timeframe
|
|
1006
|
+
try:
|
|
1007
|
+
_conversion = "daily"
|
|
1008
|
+
match _s_freq := infer_series_frequency(returns):
|
|
1009
|
+
case _ if _s_freq <= _D1.to_timedelta64():
|
|
1010
|
+
_conversion = "daily"
|
|
1011
|
+
case _ if _s_freq > _D1.to_timedelta64() and _s_freq <= _W1.to_timedelta64():
|
|
1012
|
+
_conversion = "weekly"
|
|
1013
|
+
case _:
|
|
1014
|
+
_conversion = "monthly"
|
|
1015
|
+
|
|
1016
|
+
returns_daily = aggregate_returns(returns, _conversion)
|
|
1017
|
+
returns_on_init_bp = aggregate_returns(returns_on_init_bp, _conversion)
|
|
1018
|
+
except (ValueError, TypeError, AttributeError) as e:
|
|
1019
|
+
logger.warning(f"Failed to aggregate returns: {e}. Using raw returns.")
|
|
1020
|
+
returns_daily = returns
|
|
1021
|
+
|
|
1022
|
+
# todo: add transaction_cost calculations
|
|
1023
|
+
equity = init_cash + pft_total["Total_PnL"]
|
|
1024
|
+
mdd, ddstart, ddpeak, ddrecover, dd_data = absmaxdd(equity)
|
|
1025
|
+
execs = len(executions_log)
|
|
1026
|
+
mdd_pct = 100 * dd_data / equity.cummax() if execs > 0 else pd.Series(0, index=equity.index)
|
|
1027
|
+
sheet["equity"] = equity
|
|
1028
|
+
sheet["gain"] = sheet["equity"].iloc[-1] - sheet["equity"].iloc[0]
|
|
1029
|
+
sheet["cagr"] = cagr(returns_daily, performance_statistics_period)
|
|
1030
|
+
sheet["sharpe"] = sharpe_ratio(returns_daily, risk_free, performance_statistics_period)
|
|
1031
|
+
sheet["qr"] = qr(equity) if execs > 0 else 0
|
|
1032
|
+
sheet["drawdown_usd"] = dd_data
|
|
1033
|
+
sheet["drawdown_pct"] = mdd_pct
|
|
1034
|
+
# 25-May-2019: MDE fixed Max DD pct calculations
|
|
1035
|
+
sheet["max_dd_pct"] = max(mdd_pct)
|
|
1036
|
+
# sheet["max_dd_pct_on_init"] = 100 * mdd / init_cash
|
|
1037
|
+
sheet["mdd_usd"] = mdd
|
|
1038
|
+
sheet["mdd_start"] = equity.index[ddstart]
|
|
1039
|
+
sheet["mdd_peak"] = equity.index[ddpeak]
|
|
1040
|
+
sheet["mdd_recover"] = equity.index[ddrecover]
|
|
1041
|
+
sheet["returns"] = returns
|
|
1042
|
+
sheet["returns_daily"] = returns_daily
|
|
1043
|
+
sheet["compound_returns"] = (returns + 1).cumprod(axis=0) - 1
|
|
1044
|
+
sheet["rolling_sharpe"] = rolling_sharpe_ratio(returns_daily, risk_free, periods=rolling_sharpe_window)
|
|
1045
|
+
sheet["sortino"] = sortino_ratio(
|
|
1046
|
+
returns_daily, risk_free, performance_statistics_period, _downside_risk=kwargs.pop("downside_risk", None)
|
|
1047
|
+
)
|
|
1048
|
+
sheet["calmar"] = calmar_ratio(returns_daily, performance_statistics_period)
|
|
1049
|
+
# sheet["ann_vol"] = annual_volatility(returns_daily)
|
|
1050
|
+
sheet["tail_ratio"] = tail_ratio(returns_daily)
|
|
1051
|
+
sheet["stability"] = stability_of_returns(returns_daily)
|
|
1052
|
+
sheet["monthly_returns"] = aggregate_returns(returns_daily, convert_to="mon")
|
|
1053
|
+
r_m = np.mean(returns_daily)
|
|
1054
|
+
r_s = np.std(returns_daily)
|
|
1055
|
+
sheet["var"] = var_cov_var(init_cash, r_m, r_s)
|
|
1056
|
+
sheet["avg_return"] = 100 * r_m
|
|
1057
|
+
|
|
1058
|
+
# portfolio market values
|
|
1059
|
+
mkt_value = pft_total.filter(regex=".*_Value")
|
|
1060
|
+
sheet["long_value"] = mkt_value[mkt_value > 0].sum(axis=1).fillna(0)
|
|
1061
|
+
sheet["short_value"] = mkt_value[mkt_value < 0].sum(axis=1).fillna(0)
|
|
1062
|
+
|
|
1063
|
+
# total commissions
|
|
1064
|
+
sheet["fees"] = pft_total["Total_Commissions"].iloc[-1]
|
|
1065
|
+
|
|
1066
|
+
# executions metrics
|
|
1067
|
+
sheet["execs"] = execs
|
|
1068
|
+
|
|
1069
|
+
return sheet
|
|
1070
|
+
|
|
1071
|
+
|
|
1072
|
+
def tearsheet(
|
|
1073
|
+
session: TradingSessionResult | List[TradingSessionResult],
|
|
1074
|
+
compound: bool = True,
|
|
1075
|
+
account_transactions=True,
|
|
1076
|
+
performance_statistics_period=365,
|
|
1077
|
+
timeframe: str | pd.Timedelta | None = None,
|
|
1078
|
+
sort_by: str | None = "Sharpe",
|
|
1079
|
+
sort_ascending: bool = False,
|
|
1080
|
+
plot_equities: bool = True,
|
|
1081
|
+
commission_factor: float = 1,
|
|
1082
|
+
plot_leverage: bool = False,
|
|
1083
|
+
use_plotly: bool = False,
|
|
1084
|
+
no_title: bool = False,
|
|
1085
|
+
):
|
|
1086
|
+
"""
|
|
1087
|
+
Generate a tearsheet for one or multiple trading sessions.
|
|
1088
|
+
|
|
1089
|
+
This function creates a performance report and visualization for trading session(s).
|
|
1090
|
+
It can handle both single and multiple sessions, providing different outputs accordingly.
|
|
1091
|
+
|
|
1092
|
+
Parameters:
|
|
1093
|
+
-----------
|
|
1094
|
+
session : TradingSessionResult | List[TradingSessionResult]
|
|
1095
|
+
The trading session(s) to analyze. Can be a single session or a list of sessions.
|
|
1096
|
+
compound : bool, optional
|
|
1097
|
+
Whether to use compound returns for charting (default is True).
|
|
1098
|
+
account_transactions : bool, optional
|
|
1099
|
+
Whether to account for transactions in calculations (default is True).
|
|
1100
|
+
performance_statistics_period : int, optional
|
|
1101
|
+
The period for performance statistics calculations in days (default is 365).
|
|
1102
|
+
timeframe : str | pd.Timedelta, optional
|
|
1103
|
+
The timeframe for resampling data. If None, it will be estimated (default is None).
|
|
1104
|
+
sort_by : str, optional
|
|
1105
|
+
The metric to sort multiple sessions by (default is "Sharpe").
|
|
1106
|
+
sort_ascending : bool, optional
|
|
1107
|
+
Whether to sort in ascending order (default is False).
|
|
1108
|
+
plot_equities : bool, optional
|
|
1109
|
+
Whether to plot equity curves for multiple sessions (default is True).
|
|
1110
|
+
commission_factor : float, optional
|
|
1111
|
+
Factor to apply to commissions (default is 1).
|
|
1112
|
+
use_plotly : bool, optional
|
|
1113
|
+
Whether to use Plotly for visualizations instead of Matplotlib (default is plotly).
|
|
1114
|
+
|
|
1115
|
+
Returns:
|
|
1116
|
+
--------
|
|
1117
|
+
For a single session:
|
|
1118
|
+
A Plotly or Matplotlib visualization of the session's performance.
|
|
1119
|
+
For multiple sessions:
|
|
1120
|
+
A pandas DataFrame containing performance metrics for all sessions,
|
|
1121
|
+
optionally accompanied by a plot of equity curves.
|
|
1122
|
+
"""
|
|
1123
|
+
if timeframe is None:
|
|
1124
|
+
timeframe = _estimate_timeframe(session)
|
|
1125
|
+
|
|
1126
|
+
if isinstance(session, list):
|
|
1127
|
+
if len(session) == 1:
|
|
1128
|
+
return _tearsheet_single(
|
|
1129
|
+
session[0],
|
|
1130
|
+
compound,
|
|
1131
|
+
account_transactions,
|
|
1132
|
+
performance_statistics_period,
|
|
1133
|
+
timeframe=timeframe,
|
|
1134
|
+
commission_factor=commission_factor,
|
|
1135
|
+
use_plotly=use_plotly,
|
|
1136
|
+
plot_leverage=plot_leverage,
|
|
1137
|
+
no_title=no_title,
|
|
1138
|
+
)
|
|
1139
|
+
else:
|
|
1140
|
+
import matplotlib.pyplot as plt
|
|
1141
|
+
|
|
1142
|
+
# multiple sessions - just show table
|
|
1143
|
+
_rs = []
|
|
1144
|
+
# _eq = []
|
|
1145
|
+
for s in session:
|
|
1146
|
+
report, mtrx = _pfl_metrics_prepare(
|
|
1147
|
+
s, account_transactions, performance_statistics_period, commission_factor=commission_factor
|
|
1148
|
+
)
|
|
1149
|
+
_rs.append(report)
|
|
1150
|
+
if plot_equities:
|
|
1151
|
+
if compound:
|
|
1152
|
+
# _eq.append(pd.Series(100 * mtrx["compound_returns"], name=s.trading_id))
|
|
1153
|
+
compound_returns = mtrx["compound_returns"].resample(timeframe).ffill()
|
|
1154
|
+
plt.plot(100 * compound_returns, label=s.name)
|
|
1155
|
+
else:
|
|
1156
|
+
# _eq.append(pd.Series(mtrx["equity"], name=s.trading_id))
|
|
1157
|
+
equity = mtrx["equity"].resample(timeframe).ffill()
|
|
1158
|
+
plt.plot(equity, label=s.name)
|
|
1159
|
+
|
|
1160
|
+
if plot_equities:
|
|
1161
|
+
if len(session) <= 15:
|
|
1162
|
+
plt.legend(ncol=max(1, len(session) // 5))
|
|
1163
|
+
plt.title("Comparison of Equity Curves")
|
|
1164
|
+
|
|
1165
|
+
report = pd.concat(_rs, axis=1).T
|
|
1166
|
+
report["id"] = [s.id for s in session]
|
|
1167
|
+
report = report.set_index("id", append=True).swaplevel()
|
|
1168
|
+
if sort_by:
|
|
1169
|
+
report = report.sort_values(by=sort_by, ascending=sort_ascending)
|
|
1170
|
+
return report
|
|
1171
|
+
|
|
1172
|
+
else:
|
|
1173
|
+
return _tearsheet_single(
|
|
1174
|
+
session,
|
|
1175
|
+
compound,
|
|
1176
|
+
account_transactions,
|
|
1177
|
+
performance_statistics_period,
|
|
1178
|
+
timeframe=timeframe,
|
|
1179
|
+
commission_factor=commission_factor,
|
|
1180
|
+
use_plotly=use_plotly,
|
|
1181
|
+
plot_leverage=plot_leverage,
|
|
1182
|
+
no_title=no_title,
|
|
1183
|
+
)
|
|
1184
|
+
|
|
1185
|
+
|
|
1186
|
+
def get_cum_pnl(
|
|
1187
|
+
sessions: TradingSessionResult | list[TradingSessionResult],
|
|
1188
|
+
account_transactions: bool = True,
|
|
1189
|
+
timeframe: str | None = None,
|
|
1190
|
+
) -> pd.DataFrame | pd.Series:
|
|
1191
|
+
if timeframe is None:
|
|
1192
|
+
timeframe = _estimate_timeframe(sessions)
|
|
1193
|
+
|
|
1194
|
+
def _get_single_equity(session: TradingSessionResult) -> pd.Series:
|
|
1195
|
+
pnl = calculate_total_pnl(session.portfolio_log, split_cumulative=False)
|
|
1196
|
+
pnl["Total_PnL"] = pnl["Total_PnL"].cumsum()
|
|
1197
|
+
if account_transactions:
|
|
1198
|
+
pnl["Total_PnL"] -= pnl["Total_Commissions"].cumsum()
|
|
1199
|
+
returns = portfolio_returns(pnl, init_cash=session.capital)
|
|
1200
|
+
return ((returns + 1).cumprod(axis=0) - 1).resample(timeframe).ffill().rename(session.name)
|
|
1201
|
+
|
|
1202
|
+
if isinstance(sessions, list):
|
|
1203
|
+
return pd.concat([_get_single_equity(s) for s in sessions], axis=1, names=[s.name for s in sessions])
|
|
1204
|
+
else:
|
|
1205
|
+
return _get_single_equity(sessions)
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
def _estimate_timeframe(
|
|
1209
|
+
session: TradingSessionResult | list[TradingSessionResult], start: str | None = None, stop: str | None = None
|
|
1210
|
+
) -> str:
|
|
1211
|
+
session = session[0] if isinstance(session, list) else session
|
|
1212
|
+
start, end = pd.Timestamp(start or session.start), pd.Timestamp(stop or session.stop)
|
|
1213
|
+
diff = end - start
|
|
1214
|
+
if diff > pd.Timedelta("360d"):
|
|
1215
|
+
return "1d"
|
|
1216
|
+
elif diff > pd.Timedelta("30d"):
|
|
1217
|
+
return "1h"
|
|
1218
|
+
elif diff > pd.Timedelta("7d"):
|
|
1219
|
+
return "15min"
|
|
1220
|
+
elif diff > pd.Timedelta("1d"):
|
|
1221
|
+
return "5min"
|
|
1222
|
+
else:
|
|
1223
|
+
return "1min"
|
|
1224
|
+
|
|
1225
|
+
|
|
1226
|
+
def _pfl_metrics_prepare(
|
|
1227
|
+
session: TradingSessionResult,
|
|
1228
|
+
account_transactions: bool,
|
|
1229
|
+
performance_statistics_period: int,
|
|
1230
|
+
commission_factor: float = 1,
|
|
1231
|
+
) -> Tuple[pd.Series, dict]:
|
|
1232
|
+
mtrx = portfolio_metrics(
|
|
1233
|
+
session.portfolio_log,
|
|
1234
|
+
session.executions_log,
|
|
1235
|
+
session.capital,
|
|
1236
|
+
performance_statistics_period=performance_statistics_period,
|
|
1237
|
+
account_transactions=account_transactions,
|
|
1238
|
+
commission_factor=commission_factor,
|
|
1239
|
+
)
|
|
1240
|
+
rpt = {}
|
|
1241
|
+
for k, v in mtrx.items():
|
|
1242
|
+
if isinstance(v, (float, int, str)):
|
|
1243
|
+
n = (k[0].upper() + k[1:]).replace("_", " ")
|
|
1244
|
+
rpt[n] = v if np.isfinite(v) else 0
|
|
1245
|
+
return pd.Series(rpt, name=session.name), mtrx
|
|
1246
|
+
|
|
1247
|
+
|
|
1248
|
+
def _tearsheet_single(
|
|
1249
|
+
session: TradingSessionResult,
|
|
1250
|
+
compound: bool = True,
|
|
1251
|
+
account_transactions=True,
|
|
1252
|
+
performance_statistics_period=365,
|
|
1253
|
+
timeframe: str | pd.Timedelta = "1h",
|
|
1254
|
+
commission_factor: float = 1,
|
|
1255
|
+
use_plotly: bool = True,
|
|
1256
|
+
plot_leverage: bool = False,
|
|
1257
|
+
no_title=False,
|
|
1258
|
+
):
|
|
1259
|
+
report, mtrx = _pfl_metrics_prepare(
|
|
1260
|
+
session, account_transactions, performance_statistics_period, commission_factor=commission_factor
|
|
1261
|
+
)
|
|
1262
|
+
eqty = 100 * mtrx["compound_returns"] if compound else mtrx["equity"] - mtrx["equity"].iloc[0]
|
|
1263
|
+
eqty = eqty.resample(timeframe).ffill()
|
|
1264
|
+
_eqty = ["area", "green", eqty]
|
|
1265
|
+
dd = mtrx["drawdown_pct"] if compound else mtrx["drawdown_usd"]
|
|
1266
|
+
dd = dd.resample(timeframe).ffill()
|
|
1267
|
+
|
|
1268
|
+
# - make plotly charts
|
|
1269
|
+
if use_plotly:
|
|
1270
|
+
_dd = ["area", -dd, "lim", [-dd, 0]]
|
|
1271
|
+
tbl = go.Table(
|
|
1272
|
+
columnwidth=[130, 130, 130, 130, 200],
|
|
1273
|
+
header=dict(
|
|
1274
|
+
values=report.index,
|
|
1275
|
+
line_color="darkslategray",
|
|
1276
|
+
fill_color="#303030",
|
|
1277
|
+
font=dict(color="white", size=11),
|
|
1278
|
+
),
|
|
1279
|
+
cells=dict(
|
|
1280
|
+
values=round(report, 3).values.tolist(),
|
|
1281
|
+
line_color="darkslategray",
|
|
1282
|
+
fill_color="#101010",
|
|
1283
|
+
align=["center", "left"],
|
|
1284
|
+
font=dict(size=10),
|
|
1285
|
+
),
|
|
1286
|
+
)
|
|
1287
|
+
chart = (
|
|
1288
|
+
LookingGlass(
|
|
1289
|
+
_eqty,
|
|
1290
|
+
{
|
|
1291
|
+
"Drawdown": _dd,
|
|
1292
|
+
},
|
|
1293
|
+
study_plot_height=75,
|
|
1294
|
+
)
|
|
1295
|
+
.look(title=("Simulation: " if session.is_simulation else "") + session.name)
|
|
1296
|
+
.hover(h=500)
|
|
1297
|
+
)
|
|
1298
|
+
table = go.FigureWidget(tbl).update_layout(margin=dict(r=5, l=5, t=0, b=1), height=80)
|
|
1299
|
+
chart.show()
|
|
1300
|
+
table.show()
|
|
1301
|
+
# - make mpl charts
|
|
1302
|
+
else:
|
|
1303
|
+
_n = 51 if plot_leverage else 41
|
|
1304
|
+
ax = sbp(_n, 1, r=3)
|
|
1305
|
+
plt.plot(eqty, lw=2, c="g", label="Equity")
|
|
1306
|
+
plt.fill_between(eqty.index, eqty.values, 0, color="#003000", alpha=0.8)
|
|
1307
|
+
if not no_title:
|
|
1308
|
+
from textwrap import wrap
|
|
1309
|
+
|
|
1310
|
+
_titl_txt = ("Simulation: " if session.is_simulation else "") + session.name
|
|
1311
|
+
plt.title("\n".join(wrap(_titl_txt, 60)), fontsize=18)
|
|
1312
|
+
plt.legend()
|
|
1313
|
+
ay = sbp(_n, 4)
|
|
1314
|
+
plt.plot(-dd, c="r", lw=1.5, label="Drawdown")
|
|
1315
|
+
plt.fill_between(dd.index, -dd.values, 0, color="#800000", alpha=0.8)
|
|
1316
|
+
if not compound:
|
|
1317
|
+
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda y, p: str(y / 1000) + " K"))
|
|
1318
|
+
ay.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda y, p: str(y / 1000) + " K"))
|
|
1319
|
+
if plot_leverage:
|
|
1320
|
+
lev = calculate_leverage(session.portfolio_log, session.capital, session.start)
|
|
1321
|
+
ay = sbp(_n, 5)
|
|
1322
|
+
plt.plot(lev, c="c", lw=1.5, label="Leverage")
|
|
1323
|
+
plt.subplots_adjust(hspace=0)
|
|
1324
|
+
return pd.DataFrame(report).T.round(3)
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
def calculate_leverage(
|
|
1328
|
+
portfolio: pd.DataFrame, init_capital: float, start: str | pd.Timestamp, symbol=".*"
|
|
1329
|
+
) -> pd.Series:
|
|
1330
|
+
total_pnl = calculate_total_pnl(portfolio, split_cumulative=False).loc[start:]
|
|
1331
|
+
capital = init_capital + total_pnl["Total_PnL"].cumsum() - total_pnl["Total_Commissions"].cumsum()
|
|
1332
|
+
value = portfolio.filter(regex=f"{symbol}_Value").loc[start:].sum(axis=1)
|
|
1333
|
+
return (value.squeeze() / capital).mul(100).rename("Leverage") # type: ignore
|
|
1334
|
+
|
|
1335
|
+
|
|
1336
|
+
def chart_signals(
|
|
1337
|
+
result: TradingSessionResult,
|
|
1338
|
+
symbol: str,
|
|
1339
|
+
ohlc: dict | pd.DataFrame,
|
|
1340
|
+
timeframe: str | None = None,
|
|
1341
|
+
start=None,
|
|
1342
|
+
end=None,
|
|
1343
|
+
apply_commissions: bool = True,
|
|
1344
|
+
indicators={},
|
|
1345
|
+
overlay=[],
|
|
1346
|
+
info=True,
|
|
1347
|
+
show_trades: bool = True,
|
|
1348
|
+
show_signals: bool = False,
|
|
1349
|
+
show_quantity: bool = False,
|
|
1350
|
+
show_value: bool = False,
|
|
1351
|
+
show_leverage: bool = True,
|
|
1352
|
+
show_table: bool = False,
|
|
1353
|
+
show_portfolio: bool = True,
|
|
1354
|
+
height: int = 800,
|
|
1355
|
+
plugins: list[Callable[[LookingGlass, pd.DataFrame, str | pd.Timestamp, str | pd.Timestamp], LookingGlass]]
|
|
1356
|
+
| None = None,
|
|
1357
|
+
):
|
|
1358
|
+
"""
|
|
1359
|
+
Show trading signals on chart
|
|
1360
|
+
"""
|
|
1361
|
+
indicators = indicators | {}
|
|
1362
|
+
if timeframe is None:
|
|
1363
|
+
timeframe = _estimate_timeframe(result, start, end)
|
|
1364
|
+
|
|
1365
|
+
executions = result.executions_log.rename(
|
|
1366
|
+
columns={"instrument_id": "instrument", "filled_qty": "quantity", "price": "exec_price"}
|
|
1367
|
+
)
|
|
1368
|
+
portfolio = result.portfolio_log
|
|
1369
|
+
|
|
1370
|
+
if start is None:
|
|
1371
|
+
start = executions.index[0]
|
|
1372
|
+
|
|
1373
|
+
if end is None:
|
|
1374
|
+
end = executions.index[-1]
|
|
1375
|
+
|
|
1376
|
+
if portfolio is not None and show_portfolio:
|
|
1377
|
+
if show_quantity:
|
|
1378
|
+
pos = portfolio.filter(regex=f"{symbol}_Pos").loc[start:]
|
|
1379
|
+
indicators["Pos"] = ["area", "cyan", pos]
|
|
1380
|
+
if show_value:
|
|
1381
|
+
value = portfolio.filter(regex=f"{symbol}_Value").loc[start:]
|
|
1382
|
+
indicators["Value"] = ["area", "cyan", value]
|
|
1383
|
+
if show_leverage:
|
|
1384
|
+
leverage = calculate_leverage(portfolio, result.capital, start, symbol)
|
|
1385
|
+
indicators["Leverage"] = ["area", "cyan", leverage]
|
|
1386
|
+
symbol_count = len(portfolio.filter(like="_PnL").columns)
|
|
1387
|
+
pnl = portfolio.filter(regex=f"{symbol}_PnL").cumsum() + result.capital / symbol_count
|
|
1388
|
+
pnl = pnl.loc[start:]
|
|
1389
|
+
if apply_commissions:
|
|
1390
|
+
comm = portfolio.filter(regex=f"{symbol}_Commissions").loc[start:].cumsum()
|
|
1391
|
+
pnl -= comm.values
|
|
1392
|
+
pnl = (pnl / pnl.iloc[0] - 1) * 100
|
|
1393
|
+
indicators["PnL"] = ["area", "green", pnl]
|
|
1394
|
+
|
|
1395
|
+
if isinstance(ohlc, dict):
|
|
1396
|
+
bars = ohlc[symbol]
|
|
1397
|
+
if isinstance(bars, OHLCV):
|
|
1398
|
+
bars = bars.pd()
|
|
1399
|
+
bars = ohlc_resample(bars, timeframe) if timeframe else bars
|
|
1400
|
+
elif isinstance(ohlc, pd.DataFrame):
|
|
1401
|
+
bars = ohlc
|
|
1402
|
+
bars = ohlc_resample(bars, timeframe) if timeframe else bars
|
|
1403
|
+
elif isinstance(ohlc, OHLCV):
|
|
1404
|
+
bars = ohlc.pd()
|
|
1405
|
+
bars = ohlc_resample(bars, timeframe) if timeframe else bars
|
|
1406
|
+
else:
|
|
1407
|
+
raise ValueError(f"Invalid data type {type(ohlc)}")
|
|
1408
|
+
|
|
1409
|
+
if timeframe:
|
|
1410
|
+
|
|
1411
|
+
def __resample(ind):
|
|
1412
|
+
if isinstance(ind, list):
|
|
1413
|
+
return [__resample(i) for i in ind]
|
|
1414
|
+
elif isinstance(ind, pd.Series) or isinstance(ind, pd.DataFrame):
|
|
1415
|
+
return ind.resample(timeframe).ffill()
|
|
1416
|
+
else:
|
|
1417
|
+
return ind
|
|
1418
|
+
|
|
1419
|
+
indicators = {k: __resample(v) for k, v in indicators.items()}
|
|
1420
|
+
|
|
1421
|
+
if show_trades:
|
|
1422
|
+
excs = executions[executions["instrument"] == symbol][
|
|
1423
|
+
["quantity", "exec_price", "commissions", "commissions_quoted", "order_id"]
|
|
1424
|
+
]
|
|
1425
|
+
overlay = list(overlay) + [excs]
|
|
1426
|
+
|
|
1427
|
+
if show_signals:
|
|
1428
|
+
sigs = result.signals_log[result.signals_log["instrument_id"] == symbol]
|
|
1429
|
+
overlay = list(overlay) + [sigs]
|
|
1430
|
+
|
|
1431
|
+
chart = LookingGlass([bars, *overlay], indicators).look(start, end, title=symbol).hover(show_info=info, h=height)
|
|
1432
|
+
|
|
1433
|
+
# - run plugins
|
|
1434
|
+
if plugins is not None:
|
|
1435
|
+
for plugin in plugins if isinstance(plugins, list) else [plugins]:
|
|
1436
|
+
chart = plugin(bars, start, end, figure=chart)
|
|
1437
|
+
|
|
1438
|
+
if not show_table:
|
|
1439
|
+
return chart # .show()
|
|
1440
|
+
|
|
1441
|
+
q_pos = excs["quantity"].cumsum()[start:end]
|
|
1442
|
+
excs = excs[start:end]
|
|
1443
|
+
colors = ["red" if t == 0 else "green" for t in q_pos]
|
|
1444
|
+
|
|
1445
|
+
tbl = go.Table(
|
|
1446
|
+
# columnorder = [1,2],
|
|
1447
|
+
columnwidth=[200, 150, 150, 100, 100],
|
|
1448
|
+
header=dict(
|
|
1449
|
+
values=["time"] + list(excs.columns),
|
|
1450
|
+
line_color="darkslategray",
|
|
1451
|
+
fill_color="#303030",
|
|
1452
|
+
font=dict(color="white", size=11),
|
|
1453
|
+
),
|
|
1454
|
+
cells=dict(
|
|
1455
|
+
values=[excs.index.strftime("%Y-%m-%d %H:%M:%S")] + list(excs.T.values),
|
|
1456
|
+
line_color="darkslategray",
|
|
1457
|
+
fill_color="#101010",
|
|
1458
|
+
align=["center", "left"],
|
|
1459
|
+
font=dict(color=[colors], size=10),
|
|
1460
|
+
),
|
|
1461
|
+
)
|
|
1462
|
+
table = go.FigureWidget(tbl).update_layout(margin=dict(r=5, l=5, t=5, b=5), height=200)
|
|
1463
|
+
return chart.show(), table.show()
|
|
1464
|
+
|
|
1465
|
+
|
|
1466
|
+
def get_symbol_pnls(
|
|
1467
|
+
session: TradingSessionResult | List[TradingSessionResult],
|
|
1468
|
+
) -> pd.DataFrame:
|
|
1469
|
+
if isinstance(session, TradingSessionResult):
|
|
1470
|
+
session = [session]
|
|
1471
|
+
|
|
1472
|
+
pnls = []
|
|
1473
|
+
for s in session:
|
|
1474
|
+
pnls.append(s.portfolio_log.filter(like="_PnL").cumsum().iloc[-1])
|
|
1475
|
+
|
|
1476
|
+
return pd.DataFrame(pnls, index=[s.name for s in session])
|
|
1477
|
+
|
|
1478
|
+
|
|
1479
|
+
def combine_sessions(sessions: list[TradingSessionResult], name: str = "Portfolio") -> TradingSessionResult:
|
|
1480
|
+
session = copy(sessions[0])
|
|
1481
|
+
session.name = name
|
|
1482
|
+
session.instruments = list(set(chain.from_iterable([e.instruments for e in sessions])))
|
|
1483
|
+
session.portfolio_log = pd.concat(
|
|
1484
|
+
[e.portfolio_log.loc[:, (e.portfolio_log != 0).any(axis=0)] for e in sessions], axis=1
|
|
1485
|
+
)
|
|
1486
|
+
# remove duplicated columns, keep first
|
|
1487
|
+
session.portfolio_log = session.portfolio_log.loc[:, ~session.portfolio_log.columns.duplicated()]
|
|
1488
|
+
session.executions_log = pd.concat([s.executions_log for s in sessions], axis=0).sort_index()
|
|
1489
|
+
session.signals_log = pd.concat([s.signals_log for s in sessions], axis=0).sort_index()
|
|
1490
|
+
# remove duplicated rows
|
|
1491
|
+
session.executions_log = (
|
|
1492
|
+
session.executions_log.set_index("instrument_id", append=True).drop_duplicates().reset_index("instrument_id")
|
|
1493
|
+
)
|
|
1494
|
+
session.signals_log = (
|
|
1495
|
+
session.signals_log.set_index("instrument_id", append=True).drop_duplicates().reset_index("instrument_id")
|
|
1496
|
+
)
|
|
1497
|
+
return session
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
def _plt_to_base64() -> str:
|
|
1501
|
+
fig = plt.gcf()
|
|
1502
|
+
|
|
1503
|
+
imgdata = BytesIO()
|
|
1504
|
+
plt.subplots_adjust(hspace=0)
|
|
1505
|
+
fig.savefig(imgdata, format="png", transparent=True, bbox_inches="tight")
|
|
1506
|
+
# fig.savefig(imgdata, format="png", transparent=True)
|
|
1507
|
+
imgdata.seek(0)
|
|
1508
|
+
uri = "data:image/png;base64," + base64.b64encode(imgdata.getvalue()).decode("utf8")
|
|
1509
|
+
plt.clf()
|
|
1510
|
+
plt.close()
|
|
1511
|
+
|
|
1512
|
+
return uri
|