quantmllibrary 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantml/__init__.py +74 -0
- quantml/autograd.py +154 -0
- quantml/cli/__init__.py +10 -0
- quantml/cli/run_experiment.py +385 -0
- quantml/config/__init__.py +28 -0
- quantml/config/config.py +259 -0
- quantml/data/__init__.py +33 -0
- quantml/data/cache.py +149 -0
- quantml/data/feature_store.py +234 -0
- quantml/data/futures.py +254 -0
- quantml/data/loaders.py +236 -0
- quantml/data/memory_optimizer.py +234 -0
- quantml/data/validators.py +390 -0
- quantml/experiments/__init__.py +23 -0
- quantml/experiments/logger.py +208 -0
- quantml/experiments/results.py +158 -0
- quantml/experiments/tracker.py +223 -0
- quantml/features/__init__.py +25 -0
- quantml/features/base.py +104 -0
- quantml/features/gap_features.py +124 -0
- quantml/features/registry.py +138 -0
- quantml/features/volatility_features.py +140 -0
- quantml/features/volume_features.py +142 -0
- quantml/functional.py +37 -0
- quantml/models/__init__.py +27 -0
- quantml/models/attention.py +258 -0
- quantml/models/dropout.py +130 -0
- quantml/models/gru.py +319 -0
- quantml/models/linear.py +112 -0
- quantml/models/lstm.py +353 -0
- quantml/models/mlp.py +286 -0
- quantml/models/normalization.py +289 -0
- quantml/models/rnn.py +154 -0
- quantml/models/tcn.py +238 -0
- quantml/online.py +209 -0
- quantml/ops.py +1707 -0
- quantml/optim/__init__.py +42 -0
- quantml/optim/adafactor.py +206 -0
- quantml/optim/adagrad.py +157 -0
- quantml/optim/adam.py +267 -0
- quantml/optim/lookahead.py +97 -0
- quantml/optim/quant_optimizer.py +228 -0
- quantml/optim/radam.py +192 -0
- quantml/optim/rmsprop.py +203 -0
- quantml/optim/schedulers.py +286 -0
- quantml/optim/sgd.py +181 -0
- quantml/py.typed +0 -0
- quantml/streaming.py +175 -0
- quantml/tensor.py +462 -0
- quantml/time_series.py +447 -0
- quantml/training/__init__.py +135 -0
- quantml/training/alpha_eval.py +203 -0
- quantml/training/backtest.py +280 -0
- quantml/training/backtest_analysis.py +168 -0
- quantml/training/cv.py +106 -0
- quantml/training/data_loader.py +177 -0
- quantml/training/ensemble.py +84 -0
- quantml/training/feature_importance.py +135 -0
- quantml/training/features.py +364 -0
- quantml/training/futures_backtest.py +266 -0
- quantml/training/gradient_clipping.py +206 -0
- quantml/training/losses.py +248 -0
- quantml/training/lr_finder.py +127 -0
- quantml/training/metrics.py +376 -0
- quantml/training/regularization.py +89 -0
- quantml/training/trainer.py +239 -0
- quantml/training/walk_forward.py +190 -0
- quantml/utils/__init__.py +51 -0
- quantml/utils/gradient_check.py +274 -0
- quantml/utils/logging.py +181 -0
- quantml/utils/ops_cpu.py +231 -0
- quantml/utils/profiling.py +364 -0
- quantml/utils/reproducibility.py +220 -0
- quantml/utils/serialization.py +335 -0
- quantmllibrary-0.1.0.dist-info/METADATA +536 -0
- quantmllibrary-0.1.0.dist-info/RECORD +79 -0
- quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
- quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
- quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
quantml/online.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Online learning support for incremental model updates.
|
|
3
|
+
|
|
4
|
+
This module provides utilities for online learning scenarios where models
|
|
5
|
+
are updated incrementally as new data arrives, typical in HFT and quant trading.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import List, Optional, Callable, Any
|
|
9
|
+
from quantml.tensor import Tensor
|
|
10
|
+
from quantml.streaming import StreamingTensor
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OnlineOptimizer:
|
|
14
|
+
"""
|
|
15
|
+
Base class for online optimizers that support incremental updates.
|
|
16
|
+
|
|
17
|
+
Online optimizers update model parameters one sample at a time,
|
|
18
|
+
which is essential for per-tick training in quantitative trading.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, learning_rate: float = 0.01):
|
|
22
|
+
"""
|
|
23
|
+
Initialize online optimizer.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
learning_rate: Learning rate for updates
|
|
27
|
+
"""
|
|
28
|
+
self.learning_rate = learning_rate
|
|
29
|
+
self.step_count = 0
|
|
30
|
+
|
|
31
|
+
def step(self, grad: Tensor, param: Tensor):
|
|
32
|
+
"""
|
|
33
|
+
Perform a single parameter update step.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
grad: Gradient for the parameter
|
|
37
|
+
param: Parameter tensor to update
|
|
38
|
+
|
|
39
|
+
Note:
|
|
40
|
+
This is a base implementation. Subclasses should override.
|
|
41
|
+
"""
|
|
42
|
+
# Base implementation: simple gradient descent
|
|
43
|
+
# In practice, this should update param.data in-place
|
|
44
|
+
# For now, we'll just track the step
|
|
45
|
+
self.step_count += 1
|
|
46
|
+
# TODO: Implement actual parameter update
|
|
47
|
+
# This requires in-place operations which we avoid in the base design
|
|
48
|
+
# Subclasses can implement specific update rules
|
|
49
|
+
|
|
50
|
+
def zero_grad(self):
|
|
51
|
+
"""Reset optimizer state."""
|
|
52
|
+
self.step_count = 0
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def incremental_update(
|
|
56
|
+
model_params: List[Tensor],
|
|
57
|
+
gradients: List[Tensor],
|
|
58
|
+
learning_rate: float = 0.01
|
|
59
|
+
) -> List[Tensor]:
|
|
60
|
+
"""
|
|
61
|
+
Perform incremental parameter update for online learning.
|
|
62
|
+
|
|
63
|
+
This function updates model parameters using gradients computed from
|
|
64
|
+
a single sample or small batch, suitable for per-tick training.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
model_params: List of parameter tensors
|
|
68
|
+
gradients: List of corresponding gradients
|
|
69
|
+
learning_rate: Learning rate for the update
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Updated parameter tensors (new instances)
|
|
73
|
+
|
|
74
|
+
Examples:
|
|
75
|
+
>>> weights = [Tensor([[1.0, 2.0]], requires_grad=True)]
|
|
76
|
+
>>> grads = [Tensor([[0.1, 0.2]])]
|
|
77
|
+
>>> updated = incremental_update(weights, grads, lr=0.01)
|
|
78
|
+
"""
|
|
79
|
+
if len(model_params) != len(gradients):
|
|
80
|
+
raise ValueError("Number of parameters and gradients must match")
|
|
81
|
+
|
|
82
|
+
updated_params = []
|
|
83
|
+
for param, grad in zip(model_params, gradients):
|
|
84
|
+
# Simple gradient descent: param_new = param - lr * grad
|
|
85
|
+
from quantml import ops
|
|
86
|
+
update = ops.mul(grad, learning_rate)
|
|
87
|
+
new_param = ops.sub(param, update)
|
|
88
|
+
updated_params.append(new_param)
|
|
89
|
+
|
|
90
|
+
return updated_params
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def per_tick_training_step(
|
|
94
|
+
model: Any,
|
|
95
|
+
x: Tensor,
|
|
96
|
+
y: Tensor,
|
|
97
|
+
loss_fn: Callable,
|
|
98
|
+
optimizer: Optional[OnlineOptimizer] = None,
|
|
99
|
+
learning_rate: float = 0.01
|
|
100
|
+
) -> float:
|
|
101
|
+
"""
|
|
102
|
+
Perform a single training step on one tick of data.
|
|
103
|
+
|
|
104
|
+
This is the core function for online learning in quant trading,
|
|
105
|
+
where models are updated as each new tick arrives.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
model: Model with forward() method and parameters
|
|
109
|
+
x: Input tensor (single sample)
|
|
110
|
+
y: Target tensor (single sample)
|
|
111
|
+
loss_fn: Loss function (pred, target) -> loss
|
|
112
|
+
optimizer: Optional online optimizer
|
|
113
|
+
learning_rate: Learning rate if no optimizer provided
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Loss value for this step
|
|
117
|
+
|
|
118
|
+
Examples:
|
|
119
|
+
>>> from quantml.models import Linear
|
|
120
|
+
>>> model = Linear(10, 1)
|
|
121
|
+
>>> x = Tensor([[1.0] * 10])
|
|
122
|
+
>>> y = Tensor([[0.5]])
|
|
123
|
+
>>> loss = per_tick_training_step(model, x, y, lambda p, t: (p - t) ** 2)
|
|
124
|
+
"""
|
|
125
|
+
# Forward pass
|
|
126
|
+
pred = model.forward(x)
|
|
127
|
+
|
|
128
|
+
# Compute loss
|
|
129
|
+
loss = loss_fn(pred, y)
|
|
130
|
+
|
|
131
|
+
# Backward pass
|
|
132
|
+
if loss.requires_grad:
|
|
133
|
+
loss.backward()
|
|
134
|
+
|
|
135
|
+
# Update parameters
|
|
136
|
+
if hasattr(model, 'parameters'):
|
|
137
|
+
params = model.parameters()
|
|
138
|
+
grads = [p.grad for p in params if p.grad is not None]
|
|
139
|
+
|
|
140
|
+
if optimizer is not None:
|
|
141
|
+
for param, grad in zip(params, grads):
|
|
142
|
+
optimizer.step(grad, param)
|
|
143
|
+
else:
|
|
144
|
+
# Simple SGD update
|
|
145
|
+
updated = incremental_update(params, grads, learning_rate)
|
|
146
|
+
# In a real implementation, we'd update model parameters in-place
|
|
147
|
+
# For now, this demonstrates the pattern
|
|
148
|
+
|
|
149
|
+
# Get loss value
|
|
150
|
+
if isinstance(loss.data, list):
|
|
151
|
+
if isinstance(loss.data[0], list):
|
|
152
|
+
loss_val = loss.data[0][0]
|
|
153
|
+
else:
|
|
154
|
+
loss_val = loss.data[0]
|
|
155
|
+
else:
|
|
156
|
+
loss_val = float(loss.data)
|
|
157
|
+
|
|
158
|
+
return loss_val
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class StreamingDataset:
|
|
162
|
+
"""
|
|
163
|
+
Dataset wrapper for streaming data.
|
|
164
|
+
|
|
165
|
+
This class provides an interface for training models on streaming
|
|
166
|
+
market data, where new samples arrive continuously.
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
def __init__(self, x_stream: StreamingTensor, y_stream: Optional[StreamingTensor] = None):
|
|
170
|
+
"""
|
|
171
|
+
Initialize streaming dataset.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
x_stream: StreamingTensor for input features
|
|
175
|
+
y_stream: Optional StreamingTensor for targets
|
|
176
|
+
"""
|
|
177
|
+
self.x_stream = x_stream
|
|
178
|
+
self.y_stream = y_stream
|
|
179
|
+
|
|
180
|
+
def get_batch(self, size: int = 1) -> tuple:
|
|
181
|
+
"""
|
|
182
|
+
Get a batch of recent samples.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
size: Number of samples to return
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Tuple of (x_batch, y_batch) tensors
|
|
189
|
+
"""
|
|
190
|
+
x_batch = self.x_stream.get_window(size)
|
|
191
|
+
|
|
192
|
+
if self.y_stream is not None:
|
|
193
|
+
y_batch = self.y_stream.get_window(size)
|
|
194
|
+
return x_batch, y_batch
|
|
195
|
+
else:
|
|
196
|
+
return x_batch, None
|
|
197
|
+
|
|
198
|
+
def append(self, x: float, y: Optional[float] = None):
|
|
199
|
+
"""
|
|
200
|
+
Append a new sample to the dataset.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
x: Input value
|
|
204
|
+
y: Optional target value
|
|
205
|
+
"""
|
|
206
|
+
self.x_stream.append(x)
|
|
207
|
+
if y is not None and self.y_stream is not None:
|
|
208
|
+
self.y_stream.append(y)
|
|
209
|
+
|