quantmllibrary 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantml/__init__.py +74 -0
- quantml/autograd.py +154 -0
- quantml/cli/__init__.py +10 -0
- quantml/cli/run_experiment.py +385 -0
- quantml/config/__init__.py +28 -0
- quantml/config/config.py +259 -0
- quantml/data/__init__.py +33 -0
- quantml/data/cache.py +149 -0
- quantml/data/feature_store.py +234 -0
- quantml/data/futures.py +254 -0
- quantml/data/loaders.py +236 -0
- quantml/data/memory_optimizer.py +234 -0
- quantml/data/validators.py +390 -0
- quantml/experiments/__init__.py +23 -0
- quantml/experiments/logger.py +208 -0
- quantml/experiments/results.py +158 -0
- quantml/experiments/tracker.py +223 -0
- quantml/features/__init__.py +25 -0
- quantml/features/base.py +104 -0
- quantml/features/gap_features.py +124 -0
- quantml/features/registry.py +138 -0
- quantml/features/volatility_features.py +140 -0
- quantml/features/volume_features.py +142 -0
- quantml/functional.py +37 -0
- quantml/models/__init__.py +27 -0
- quantml/models/attention.py +258 -0
- quantml/models/dropout.py +130 -0
- quantml/models/gru.py +319 -0
- quantml/models/linear.py +112 -0
- quantml/models/lstm.py +353 -0
- quantml/models/mlp.py +286 -0
- quantml/models/normalization.py +289 -0
- quantml/models/rnn.py +154 -0
- quantml/models/tcn.py +238 -0
- quantml/online.py +209 -0
- quantml/ops.py +1707 -0
- quantml/optim/__init__.py +42 -0
- quantml/optim/adafactor.py +206 -0
- quantml/optim/adagrad.py +157 -0
- quantml/optim/adam.py +267 -0
- quantml/optim/lookahead.py +97 -0
- quantml/optim/quant_optimizer.py +228 -0
- quantml/optim/radam.py +192 -0
- quantml/optim/rmsprop.py +203 -0
- quantml/optim/schedulers.py +286 -0
- quantml/optim/sgd.py +181 -0
- quantml/py.typed +0 -0
- quantml/streaming.py +175 -0
- quantml/tensor.py +462 -0
- quantml/time_series.py +447 -0
- quantml/training/__init__.py +135 -0
- quantml/training/alpha_eval.py +203 -0
- quantml/training/backtest.py +280 -0
- quantml/training/backtest_analysis.py +168 -0
- quantml/training/cv.py +106 -0
- quantml/training/data_loader.py +177 -0
- quantml/training/ensemble.py +84 -0
- quantml/training/feature_importance.py +135 -0
- quantml/training/features.py +364 -0
- quantml/training/futures_backtest.py +266 -0
- quantml/training/gradient_clipping.py +206 -0
- quantml/training/losses.py +248 -0
- quantml/training/lr_finder.py +127 -0
- quantml/training/metrics.py +376 -0
- quantml/training/regularization.py +89 -0
- quantml/training/trainer.py +239 -0
- quantml/training/walk_forward.py +190 -0
- quantml/utils/__init__.py +51 -0
- quantml/utils/gradient_check.py +274 -0
- quantml/utils/logging.py +181 -0
- quantml/utils/ops_cpu.py +231 -0
- quantml/utils/profiling.py +364 -0
- quantml/utils/reproducibility.py +220 -0
- quantml/utils/serialization.py +335 -0
- quantmllibrary-0.1.0.dist-info/METADATA +536 -0
- quantmllibrary-0.1.0.dist-info/RECORD +79 -0
- quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
- quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
- quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
quantml/time_series.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quantitative trading specific time-series operations.
|
|
3
|
+
|
|
4
|
+
This module provides operations commonly used in quantitative trading,
|
|
5
|
+
including moving averages, volatility measures, order flow indicators,
|
|
6
|
+
and price transformations.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Union, Optional, List
|
|
10
|
+
from quantml.tensor import Tensor
|
|
11
|
+
from quantml import ops
|
|
12
|
+
|
|
13
|
+
# Try to import NumPy
|
|
14
|
+
try:
|
|
15
|
+
import numpy as np
|
|
16
|
+
HAS_NUMPY = True
|
|
17
|
+
except ImportError:
|
|
18
|
+
HAS_NUMPY = False
|
|
19
|
+
np = None
|
|
20
|
+
|
|
21
|
+
# Helper to convert to tensor
|
|
22
|
+
def _to_tensor(x):
|
|
23
|
+
"""Convert input to Tensor if needed."""
|
|
24
|
+
if isinstance(x, Tensor):
|
|
25
|
+
return x
|
|
26
|
+
return Tensor(x)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def ema(t: Tensor, n: int, alpha: Optional[float] = None) -> Tensor:
|
|
30
|
+
"""
|
|
31
|
+
Exponential Moving Average (EMA).
|
|
32
|
+
|
|
33
|
+
EMA is calculated as: EMA_t = alpha * price_t + (1 - alpha) * EMA_{t-1}
|
|
34
|
+
where alpha = 2 / (n + 1) by default.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
t: Input tensor (price series)
|
|
38
|
+
n: Period for EMA
|
|
39
|
+
alpha: Smoothing factor (default: 2/(n+1))
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Tensor with EMA values
|
|
43
|
+
|
|
44
|
+
Examples:
|
|
45
|
+
>>> prices = Tensor([100.0, 101.0, 102.0, 103.0, 104.0])
|
|
46
|
+
>>> ema_5 = ema(prices, n=5)
|
|
47
|
+
"""
|
|
48
|
+
if alpha is None:
|
|
49
|
+
alpha = 2.0 / (n + 1.0)
|
|
50
|
+
|
|
51
|
+
t = _to_tensor(t)
|
|
52
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
53
|
+
|
|
54
|
+
if len(data) < n:
|
|
55
|
+
# Not enough data, return original
|
|
56
|
+
return t
|
|
57
|
+
|
|
58
|
+
# Use NumPy if available for vectorized computation
|
|
59
|
+
if HAS_NUMPY:
|
|
60
|
+
try:
|
|
61
|
+
data_arr = np.array(data, dtype=np.float64)
|
|
62
|
+
ema_arr = np.zeros_like(data_arr)
|
|
63
|
+
ema_arr[0] = data_arr[0]
|
|
64
|
+
|
|
65
|
+
# Vectorized EMA calculation
|
|
66
|
+
for i in range(1, len(data_arr)):
|
|
67
|
+
ema_arr[i] = alpha * data_arr[i] + (1.0 - alpha) * ema_arr[i-1]
|
|
68
|
+
|
|
69
|
+
return Tensor([ema_arr.tolist()], requires_grad=t.requires_grad)
|
|
70
|
+
except (ValueError, TypeError):
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
# Pure Python fallback
|
|
74
|
+
ema_values = []
|
|
75
|
+
ema_prev = float(data[0])
|
|
76
|
+
ema_values.append(ema_prev)
|
|
77
|
+
|
|
78
|
+
for i in range(1, len(data)):
|
|
79
|
+
ema_curr = alpha * float(data[i]) + (1.0 - alpha) * ema_prev
|
|
80
|
+
ema_values.append(ema_curr)
|
|
81
|
+
ema_prev = ema_curr
|
|
82
|
+
|
|
83
|
+
return Tensor([ema_values], requires_grad=t.requires_grad)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def wma(t: Tensor, n: int) -> Tensor:
|
|
87
|
+
"""
|
|
88
|
+
Weighted Moving Average (WMA).
|
|
89
|
+
|
|
90
|
+
WMA gives more weight to recent values. Weights are: n, n-1, ..., 1
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
t: Input tensor
|
|
94
|
+
n: Period for WMA
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Tensor with WMA values
|
|
98
|
+
"""
|
|
99
|
+
t = _to_tensor(t)
|
|
100
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
101
|
+
|
|
102
|
+
if len(data) < n:
|
|
103
|
+
return t
|
|
104
|
+
|
|
105
|
+
wma_values = []
|
|
106
|
+
total_weight = sum(range(1, n + 1))
|
|
107
|
+
|
|
108
|
+
for i in range(n - 1, len(data)):
|
|
109
|
+
weighted_sum = sum(float(data[i - j]) * (n - j) for j in range(n))
|
|
110
|
+
wma_values.append(weighted_sum / total_weight)
|
|
111
|
+
|
|
112
|
+
# Pad beginning with first value
|
|
113
|
+
for _ in range(n - 1):
|
|
114
|
+
wma_values.insert(0, float(data[0]))
|
|
115
|
+
|
|
116
|
+
return Tensor([wma_values], requires_grad=t.requires_grad)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def rolling_mean(t: Tensor, n: int) -> Tensor:
|
|
120
|
+
"""
|
|
121
|
+
Rolling mean over a window of size n.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
t: Input tensor
|
|
125
|
+
n: Window size
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
Tensor with rolling mean values
|
|
129
|
+
"""
|
|
130
|
+
t = _to_tensor(t)
|
|
131
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
132
|
+
|
|
133
|
+
if len(data) < n:
|
|
134
|
+
return t
|
|
135
|
+
|
|
136
|
+
# Use NumPy if available
|
|
137
|
+
if HAS_NUMPY:
|
|
138
|
+
try:
|
|
139
|
+
data_arr = np.array(data, dtype=np.float64)
|
|
140
|
+
rolling_means = np.convolve(data_arr, np.ones(n)/n, mode='same')
|
|
141
|
+
# Pad beginning
|
|
142
|
+
rolling_means[:n-1] = data_arr[0]
|
|
143
|
+
return Tensor([rolling_means.tolist()], requires_grad=t.requires_grad)
|
|
144
|
+
except (ValueError, TypeError):
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
# Pure Python fallback
|
|
148
|
+
rolling_means = []
|
|
149
|
+
for i in range(n - 1):
|
|
150
|
+
rolling_means.append(float(data[0]))
|
|
151
|
+
|
|
152
|
+
for i in range(n - 1, len(data)):
|
|
153
|
+
window_sum = sum(float(data[j]) for j in range(i - n + 1, i + 1))
|
|
154
|
+
rolling_means.append(window_sum / n)
|
|
155
|
+
|
|
156
|
+
return Tensor([rolling_means], requires_grad=t.requires_grad)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def rolling_std(t: Tensor, n: int) -> Tensor:
|
|
160
|
+
"""
|
|
161
|
+
Rolling standard deviation over a window of size n.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
t: Input tensor
|
|
165
|
+
n: Window size
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Tensor with rolling std values
|
|
169
|
+
"""
|
|
170
|
+
t = _to_tensor(t)
|
|
171
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
172
|
+
|
|
173
|
+
if len(data) < n:
|
|
174
|
+
return Tensor([[0.0] * len(data)], requires_grad=t.requires_grad)
|
|
175
|
+
|
|
176
|
+
# Use NumPy if available
|
|
177
|
+
if HAS_NUMPY:
|
|
178
|
+
try:
|
|
179
|
+
data_arr = np.array(data, dtype=np.float64)
|
|
180
|
+
rolling_stds = np.zeros_like(data_arr)
|
|
181
|
+
|
|
182
|
+
for i in range(n - 1, len(data_arr)):
|
|
183
|
+
window = data_arr[i - n + 1:i + 1]
|
|
184
|
+
rolling_stds[i] = np.std(window)
|
|
185
|
+
|
|
186
|
+
return Tensor([rolling_stds.tolist()], requires_grad=t.requires_grad)
|
|
187
|
+
except (ValueError, TypeError):
|
|
188
|
+
pass
|
|
189
|
+
|
|
190
|
+
# Pure Python fallback
|
|
191
|
+
rolling_stds = []
|
|
192
|
+
for i in range(n - 1):
|
|
193
|
+
rolling_stds.append(0.0)
|
|
194
|
+
|
|
195
|
+
for i in range(n - 1, len(data)):
|
|
196
|
+
window = [float(data[j]) for j in range(i - n + 1, i + 1)]
|
|
197
|
+
mean_val = sum(window) / n
|
|
198
|
+
variance = sum((x - mean_val) ** 2 for x in window) / n
|
|
199
|
+
rolling_stds.append(variance ** 0.5)
|
|
200
|
+
|
|
201
|
+
return Tensor([rolling_stds], requires_grad=t.requires_grad)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def zscore(t: Tensor, n: int) -> Tensor:
|
|
205
|
+
"""
|
|
206
|
+
Z-score normalization: (x - mean) / std over rolling window.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
t: Input tensor
|
|
210
|
+
n: Window size for mean and std calculation
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
Tensor with z-scores
|
|
214
|
+
"""
|
|
215
|
+
t = _to_tensor(t)
|
|
216
|
+
mean_vals = rolling_mean(t, n)
|
|
217
|
+
std_vals = rolling_std(t, n)
|
|
218
|
+
|
|
219
|
+
# Avoid division by zero
|
|
220
|
+
std_safe = ops.add(std_vals, Tensor([[1e-8]]))
|
|
221
|
+
return ops.div(ops.sub(t, mean_vals), std_safe)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def volatility(t: Tensor, n: int, annualize: bool = True) -> Tensor:
|
|
225
|
+
"""
|
|
226
|
+
Rolling volatility (annualized standard deviation of returns).
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
t: Price tensor
|
|
230
|
+
n: Window size
|
|
231
|
+
annualize: Whether to annualize (multiply by sqrt(252))
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Tensor with volatility values
|
|
235
|
+
"""
|
|
236
|
+
returns_t = returns(t)
|
|
237
|
+
vol = rolling_std(returns_t, n)
|
|
238
|
+
|
|
239
|
+
if annualize:
|
|
240
|
+
import math
|
|
241
|
+
vol = ops.mul(vol, math.sqrt(252.0))
|
|
242
|
+
|
|
243
|
+
return vol
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def returns(t: Tensor) -> Tensor:
|
|
247
|
+
"""
|
|
248
|
+
Calculate returns: (p_t - p_{t-1}) / p_{t-1}
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
t: Price tensor
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Tensor with returns
|
|
255
|
+
"""
|
|
256
|
+
t = _to_tensor(t)
|
|
257
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
258
|
+
|
|
259
|
+
if len(data) < 2:
|
|
260
|
+
return Tensor([[0.0] * len(data)], requires_grad=t.requires_grad)
|
|
261
|
+
|
|
262
|
+
rets = [0.0] # First return is 0
|
|
263
|
+
for i in range(1, len(data)):
|
|
264
|
+
if float(data[i - 1]) != 0:
|
|
265
|
+
ret = (float(data[i]) - float(data[i - 1])) / float(data[i - 1])
|
|
266
|
+
else:
|
|
267
|
+
ret = 0.0
|
|
268
|
+
rets.append(ret)
|
|
269
|
+
|
|
270
|
+
return Tensor([rets], requires_grad=t.requires_grad)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def log_returns(t: Tensor) -> Tensor:
|
|
274
|
+
"""
|
|
275
|
+
Calculate log returns: log(p_t / p_{t-1})
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
t: Price tensor
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
Tensor with log returns
|
|
282
|
+
"""
|
|
283
|
+
import math
|
|
284
|
+
t = _to_tensor(t)
|
|
285
|
+
data = t.data if not isinstance(t.data[0], list) else t.data[0]
|
|
286
|
+
|
|
287
|
+
if len(data) < 2:
|
|
288
|
+
return Tensor([[0.0] * len(data)], requires_grad=t.requires_grad)
|
|
289
|
+
|
|
290
|
+
log_rets = [0.0]
|
|
291
|
+
for i in range(1, len(data)):
|
|
292
|
+
if float(data[i - 1]) > 0 and float(data[i]) > 0:
|
|
293
|
+
log_ret = math.log(float(data[i]) / float(data[i - 1]))
|
|
294
|
+
else:
|
|
295
|
+
log_ret = 0.0
|
|
296
|
+
log_rets.append(log_ret)
|
|
297
|
+
|
|
298
|
+
return Tensor([log_rets], requires_grad=t.requires_grad)
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def vwap(price: Tensor, volume: Tensor) -> Tensor:
|
|
302
|
+
"""
|
|
303
|
+
Volume-Weighted Average Price (VWAP).
|
|
304
|
+
|
|
305
|
+
VWAP = sum(price * volume) / sum(volume)
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
price: Price tensor
|
|
309
|
+
volume: Volume tensor
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
Tensor with VWAP values
|
|
313
|
+
"""
|
|
314
|
+
price = _to_tensor(price)
|
|
315
|
+
volume = _to_tensor(volume)
|
|
316
|
+
|
|
317
|
+
price_data = price.data if not isinstance(price.data[0], list) else price.data[0]
|
|
318
|
+
volume_data = volume.data if not isinstance(volume.data[0], list) else volume.data[0]
|
|
319
|
+
|
|
320
|
+
if len(price_data) != len(volume_data):
|
|
321
|
+
raise ValueError("Price and volume must have same length")
|
|
322
|
+
|
|
323
|
+
# Use NumPy if available
|
|
324
|
+
if HAS_NUMPY:
|
|
325
|
+
try:
|
|
326
|
+
price_arr = np.array(price_data, dtype=np.float64)
|
|
327
|
+
volume_arr = np.array(volume_data, dtype=np.float64)
|
|
328
|
+
|
|
329
|
+
# Cumulative sums
|
|
330
|
+
cum_price_vol = np.cumsum(price_arr * volume_arr)
|
|
331
|
+
cum_vol = np.cumsum(volume_arr)
|
|
332
|
+
|
|
333
|
+
# VWAP = cumulative price*volume / cumulative volume
|
|
334
|
+
vwap_arr = np.where(cum_vol > 0, cum_price_vol / cum_vol, price_arr)
|
|
335
|
+
|
|
336
|
+
return Tensor([vwap_arr.tolist()], requires_grad=price.requires_grad or volume.requires_grad)
|
|
337
|
+
except (ValueError, TypeError):
|
|
338
|
+
pass
|
|
339
|
+
|
|
340
|
+
# Pure Python fallback
|
|
341
|
+
vwap_values = []
|
|
342
|
+
cum_price_vol = 0.0
|
|
343
|
+
cum_vol = 0.0
|
|
344
|
+
|
|
345
|
+
for i in range(len(price_data)):
|
|
346
|
+
p = float(price_data[i])
|
|
347
|
+
v = float(volume_data[i])
|
|
348
|
+
cum_price_vol += p * v
|
|
349
|
+
cum_vol += v
|
|
350
|
+
|
|
351
|
+
if cum_vol > 0:
|
|
352
|
+
vwap_values.append(cum_price_vol / cum_vol)
|
|
353
|
+
else:
|
|
354
|
+
vwap_values.append(p)
|
|
355
|
+
|
|
356
|
+
return Tensor([vwap_values], requires_grad=price.requires_grad or volume.requires_grad)
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
def orderflow_imbalance(bid: Tensor, ask: Tensor) -> Tensor:
|
|
360
|
+
"""
|
|
361
|
+
Order Flow Imbalance (OFI).
|
|
362
|
+
|
|
363
|
+
OFI = (bid_size - ask_size) / (bid_size + ask_size)
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
bid: Bid size tensor
|
|
367
|
+
ask: Ask size tensor
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Tensor with OFI values (ranges from -1 to 1)
|
|
371
|
+
"""
|
|
372
|
+
bid = _to_tensor(bid)
|
|
373
|
+
ask = _to_tensor(ask)
|
|
374
|
+
|
|
375
|
+
bid_data = bid.data if not isinstance(bid.data[0], list) else bid.data[0]
|
|
376
|
+
ask_data = ask.data if not isinstance(ask.data[0], list) else ask.data[0]
|
|
377
|
+
|
|
378
|
+
if len(bid_data) != len(ask_data):
|
|
379
|
+
raise ValueError("Bid and ask must have same length")
|
|
380
|
+
|
|
381
|
+
ofi_values = []
|
|
382
|
+
for i in range(len(bid_data)):
|
|
383
|
+
b = float(bid_data[i])
|
|
384
|
+
a = float(ask_data[i])
|
|
385
|
+
total = b + a
|
|
386
|
+
if total > 0:
|
|
387
|
+
ofi = (b - a) / total
|
|
388
|
+
else:
|
|
389
|
+
ofi = 0.0
|
|
390
|
+
ofi_values.append(ofi)
|
|
391
|
+
|
|
392
|
+
return Tensor([ofi_values], requires_grad=bid.requires_grad or ask.requires_grad)
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def microprice(bid: Tensor, ask: Tensor, bid_size: Optional[Tensor] = None,
|
|
396
|
+
ask_size: Optional[Tensor] = None) -> Tensor:
|
|
397
|
+
"""
|
|
398
|
+
Microprice: weighted average of bid and ask prices.
|
|
399
|
+
|
|
400
|
+
If sizes are provided: microprice = (bid * ask_size + ask * bid_size) / (bid_size + ask_size)
|
|
401
|
+
Otherwise: microprice = (bid + ask) / 2 (mid price)
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
bid: Bid price tensor
|
|
405
|
+
ask: Ask price tensor
|
|
406
|
+
bid_size: Bid size tensor (optional)
|
|
407
|
+
ask_size: Ask size tensor (optional)
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
Tensor with microprice values
|
|
411
|
+
"""
|
|
412
|
+
bid = _to_tensor(bid)
|
|
413
|
+
ask = _to_tensor(ask)
|
|
414
|
+
|
|
415
|
+
bid_data = bid.data if not isinstance(bid.data[0], list) else bid.data[0]
|
|
416
|
+
ask_data = ask.data if not isinstance(ask.data[0], list) else ask.data[0]
|
|
417
|
+
|
|
418
|
+
if len(bid_data) != len(ask_data):
|
|
419
|
+
raise ValueError("Bid and ask must have same length")
|
|
420
|
+
|
|
421
|
+
if bid_size is None or ask_size is None:
|
|
422
|
+
# Simple mid price
|
|
423
|
+
microprice_values = [(float(bid_data[i]) + float(ask_data[i])) / 2.0
|
|
424
|
+
for i in range(len(bid_data))]
|
|
425
|
+
else:
|
|
426
|
+
bid_size = _to_tensor(bid_size)
|
|
427
|
+
ask_size = _to_tensor(ask_size)
|
|
428
|
+
|
|
429
|
+
bid_size_data = bid_size.data if not isinstance(bid_size.data[0], list) else bid_size.data[0]
|
|
430
|
+
ask_size_data = ask_size.data if not isinstance(ask_size.data[0], list) else ask_size.data[0]
|
|
431
|
+
|
|
432
|
+
microprice_values = []
|
|
433
|
+
for i in range(len(bid_data)):
|
|
434
|
+
b = float(bid_data[i])
|
|
435
|
+
a = float(ask_data[i])
|
|
436
|
+
bs = float(bid_size_data[i])
|
|
437
|
+
asz = float(ask_size_data[i])
|
|
438
|
+
total_size = bs + asz
|
|
439
|
+
|
|
440
|
+
if total_size > 0:
|
|
441
|
+
mp = (b * asz + a * bs) / total_size
|
|
442
|
+
else:
|
|
443
|
+
mp = (b + a) / 2.0
|
|
444
|
+
microprice_values.append(mp)
|
|
445
|
+
|
|
446
|
+
return Tensor([microprice_values], requires_grad=bid.requires_grad or ask.requires_grad)
|
|
447
|
+
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""
|
|
2
|
+
QuantML Training Utilities
|
|
3
|
+
|
|
4
|
+
This module provides training utilities for quant model development,
|
|
5
|
+
including metrics, loss functions, data loaders, and training loops.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from quantml.training.metrics import (
|
|
9
|
+
sharpe_ratio,
|
|
10
|
+
sortino_ratio,
|
|
11
|
+
calmar_ratio,
|
|
12
|
+
max_drawdown,
|
|
13
|
+
information_coefficient,
|
|
14
|
+
rank_ic,
|
|
15
|
+
hit_rate,
|
|
16
|
+
var,
|
|
17
|
+
cvar,
|
|
18
|
+
turnover
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from quantml.training.losses import (
|
|
22
|
+
mse_loss,
|
|
23
|
+
mae_loss,
|
|
24
|
+
quantile_loss,
|
|
25
|
+
sharpe_loss,
|
|
26
|
+
information_ratio_loss,
|
|
27
|
+
huber_loss,
|
|
28
|
+
asymmetric_loss,
|
|
29
|
+
max_drawdown_loss,
|
|
30
|
+
combined_quant_loss
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
from quantml.training.walk_forward import (
|
|
34
|
+
WalkForwardOptimizer,
|
|
35
|
+
WindowType,
|
|
36
|
+
walk_forward_validation
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
from quantml.training.backtest import BacktestEngine
|
|
40
|
+
|
|
41
|
+
from quantml.training.alpha_eval import (
|
|
42
|
+
AlphaEvaluator,
|
|
43
|
+
evaluate_alpha_signals
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
from quantml.training.features import (
|
|
47
|
+
FeaturePipeline,
|
|
48
|
+
create_lagged_features,
|
|
49
|
+
normalize_features
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
from quantml.training.data_loader import (
|
|
53
|
+
QuantDataLoader,
|
|
54
|
+
TimeSeriesDataLoader
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
from quantml.training.trainer import QuantTrainer
|
|
58
|
+
|
|
59
|
+
from quantml.training.gradient_clipping import (
|
|
60
|
+
GradientNormClipper,
|
|
61
|
+
GradientValueClipper,
|
|
62
|
+
AdaptiveClipper,
|
|
63
|
+
clip_grad_norm,
|
|
64
|
+
clip_grad_value
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
from quantml.training.lr_finder import LRFinder
|
|
68
|
+
|
|
69
|
+
from quantml.training.ensemble import EnsembleModel
|
|
70
|
+
|
|
71
|
+
from quantml.training.feature_importance import FeatureImportanceTracker
|
|
72
|
+
|
|
73
|
+
from quantml.training.cv import TimeSeriesSplit, PurgedKFold
|
|
74
|
+
|
|
75
|
+
from quantml.training.regularization import Dropout
|
|
76
|
+
|
|
77
|
+
__all__ = [
|
|
78
|
+
# Metrics
|
|
79
|
+
'sharpe_ratio',
|
|
80
|
+
'sortino_ratio',
|
|
81
|
+
'calmar_ratio',
|
|
82
|
+
'max_drawdown',
|
|
83
|
+
'information_coefficient',
|
|
84
|
+
'rank_ic',
|
|
85
|
+
'hit_rate',
|
|
86
|
+
'var',
|
|
87
|
+
'cvar',
|
|
88
|
+
'turnover',
|
|
89
|
+
# Losses
|
|
90
|
+
'mse_loss',
|
|
91
|
+
'mae_loss',
|
|
92
|
+
'quantile_loss',
|
|
93
|
+
'sharpe_loss',
|
|
94
|
+
'information_ratio_loss',
|
|
95
|
+
'huber_loss',
|
|
96
|
+
'asymmetric_loss',
|
|
97
|
+
'max_drawdown_loss',
|
|
98
|
+
'combined_quant_loss',
|
|
99
|
+
# Walk-forward
|
|
100
|
+
'WalkForwardOptimizer',
|
|
101
|
+
'WindowType',
|
|
102
|
+
'walk_forward_validation',
|
|
103
|
+
# Backtesting
|
|
104
|
+
'BacktestEngine',
|
|
105
|
+
# Alpha evaluation
|
|
106
|
+
'AlphaEvaluator',
|
|
107
|
+
'evaluate_alpha_signals',
|
|
108
|
+
# Features
|
|
109
|
+
'FeaturePipeline',
|
|
110
|
+
'create_lagged_features',
|
|
111
|
+
'normalize_features',
|
|
112
|
+
# Data loaders
|
|
113
|
+
'QuantDataLoader',
|
|
114
|
+
'TimeSeriesDataLoader',
|
|
115
|
+
# Trainer
|
|
116
|
+
'QuantTrainer',
|
|
117
|
+
# Gradient clipping
|
|
118
|
+
'GradientNormClipper',
|
|
119
|
+
'GradientValueClipper',
|
|
120
|
+
'AdaptiveClipper',
|
|
121
|
+
'clip_grad_norm',
|
|
122
|
+
'clip_grad_value',
|
|
123
|
+
# LR finder
|
|
124
|
+
'LRFinder',
|
|
125
|
+
# Ensembling
|
|
126
|
+
'EnsembleModel',
|
|
127
|
+
# Feature importance
|
|
128
|
+
'FeatureImportanceTracker',
|
|
129
|
+
# Cross-validation
|
|
130
|
+
'TimeSeriesSplit',
|
|
131
|
+
'PurgedKFold',
|
|
132
|
+
# Regularization
|
|
133
|
+
'Dropout'
|
|
134
|
+
]
|
|
135
|
+
|