quantmllibrary 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. quantml/__init__.py +74 -0
  2. quantml/autograd.py +154 -0
  3. quantml/cli/__init__.py +10 -0
  4. quantml/cli/run_experiment.py +385 -0
  5. quantml/config/__init__.py +28 -0
  6. quantml/config/config.py +259 -0
  7. quantml/data/__init__.py +33 -0
  8. quantml/data/cache.py +149 -0
  9. quantml/data/feature_store.py +234 -0
  10. quantml/data/futures.py +254 -0
  11. quantml/data/loaders.py +236 -0
  12. quantml/data/memory_optimizer.py +234 -0
  13. quantml/data/validators.py +390 -0
  14. quantml/experiments/__init__.py +23 -0
  15. quantml/experiments/logger.py +208 -0
  16. quantml/experiments/results.py +158 -0
  17. quantml/experiments/tracker.py +223 -0
  18. quantml/features/__init__.py +25 -0
  19. quantml/features/base.py +104 -0
  20. quantml/features/gap_features.py +124 -0
  21. quantml/features/registry.py +138 -0
  22. quantml/features/volatility_features.py +140 -0
  23. quantml/features/volume_features.py +142 -0
  24. quantml/functional.py +37 -0
  25. quantml/models/__init__.py +27 -0
  26. quantml/models/attention.py +258 -0
  27. quantml/models/dropout.py +130 -0
  28. quantml/models/gru.py +319 -0
  29. quantml/models/linear.py +112 -0
  30. quantml/models/lstm.py +353 -0
  31. quantml/models/mlp.py +286 -0
  32. quantml/models/normalization.py +289 -0
  33. quantml/models/rnn.py +154 -0
  34. quantml/models/tcn.py +238 -0
  35. quantml/online.py +209 -0
  36. quantml/ops.py +1707 -0
  37. quantml/optim/__init__.py +42 -0
  38. quantml/optim/adafactor.py +206 -0
  39. quantml/optim/adagrad.py +157 -0
  40. quantml/optim/adam.py +267 -0
  41. quantml/optim/lookahead.py +97 -0
  42. quantml/optim/quant_optimizer.py +228 -0
  43. quantml/optim/radam.py +192 -0
  44. quantml/optim/rmsprop.py +203 -0
  45. quantml/optim/schedulers.py +286 -0
  46. quantml/optim/sgd.py +181 -0
  47. quantml/py.typed +0 -0
  48. quantml/streaming.py +175 -0
  49. quantml/tensor.py +462 -0
  50. quantml/time_series.py +447 -0
  51. quantml/training/__init__.py +135 -0
  52. quantml/training/alpha_eval.py +203 -0
  53. quantml/training/backtest.py +280 -0
  54. quantml/training/backtest_analysis.py +168 -0
  55. quantml/training/cv.py +106 -0
  56. quantml/training/data_loader.py +177 -0
  57. quantml/training/ensemble.py +84 -0
  58. quantml/training/feature_importance.py +135 -0
  59. quantml/training/features.py +364 -0
  60. quantml/training/futures_backtest.py +266 -0
  61. quantml/training/gradient_clipping.py +206 -0
  62. quantml/training/losses.py +248 -0
  63. quantml/training/lr_finder.py +127 -0
  64. quantml/training/metrics.py +376 -0
  65. quantml/training/regularization.py +89 -0
  66. quantml/training/trainer.py +239 -0
  67. quantml/training/walk_forward.py +190 -0
  68. quantml/utils/__init__.py +51 -0
  69. quantml/utils/gradient_check.py +274 -0
  70. quantml/utils/logging.py +181 -0
  71. quantml/utils/ops_cpu.py +231 -0
  72. quantml/utils/profiling.py +364 -0
  73. quantml/utils/reproducibility.py +220 -0
  74. quantml/utils/serialization.py +335 -0
  75. quantmllibrary-0.1.0.dist-info/METADATA +536 -0
  76. quantmllibrary-0.1.0.dist-info/RECORD +79 -0
  77. quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
  78. quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
  79. quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
quantml/online.py ADDED
@@ -0,0 +1,209 @@
1
+ """
2
+ Online learning support for incremental model updates.
3
+
4
+ This module provides utilities for online learning scenarios where models
5
+ are updated incrementally as new data arrives, typical in HFT and quant trading.
6
+ """
7
+
8
+ from typing import List, Optional, Callable, Any
9
+ from quantml.tensor import Tensor
10
+ from quantml.streaming import StreamingTensor
11
+
12
+
13
+ class OnlineOptimizer:
14
+ """
15
+ Base class for online optimizers that support incremental updates.
16
+
17
+ Online optimizers update model parameters one sample at a time,
18
+ which is essential for per-tick training in quantitative trading.
19
+ """
20
+
21
+ def __init__(self, learning_rate: float = 0.01):
22
+ """
23
+ Initialize online optimizer.
24
+
25
+ Args:
26
+ learning_rate: Learning rate for updates
27
+ """
28
+ self.learning_rate = learning_rate
29
+ self.step_count = 0
30
+
31
+ def step(self, grad: Tensor, param: Tensor):
32
+ """
33
+ Perform a single parameter update step.
34
+
35
+ Args:
36
+ grad: Gradient for the parameter
37
+ param: Parameter tensor to update
38
+
39
+ Note:
40
+ This is a base implementation. Subclasses should override.
41
+ """
42
+ # Base implementation: simple gradient descent
43
+ # In practice, this should update param.data in-place
44
+ # For now, we'll just track the step
45
+ self.step_count += 1
46
+ # TODO: Implement actual parameter update
47
+ # This requires in-place operations which we avoid in the base design
48
+ # Subclasses can implement specific update rules
49
+
50
+ def zero_grad(self):
51
+ """Reset optimizer state."""
52
+ self.step_count = 0
53
+
54
+
55
+ def incremental_update(
56
+ model_params: List[Tensor],
57
+ gradients: List[Tensor],
58
+ learning_rate: float = 0.01
59
+ ) -> List[Tensor]:
60
+ """
61
+ Perform incremental parameter update for online learning.
62
+
63
+ This function updates model parameters using gradients computed from
64
+ a single sample or small batch, suitable for per-tick training.
65
+
66
+ Args:
67
+ model_params: List of parameter tensors
68
+ gradients: List of corresponding gradients
69
+ learning_rate: Learning rate for the update
70
+
71
+ Returns:
72
+ Updated parameter tensors (new instances)
73
+
74
+ Examples:
75
+ >>> weights = [Tensor([[1.0, 2.0]], requires_grad=True)]
76
+ >>> grads = [Tensor([[0.1, 0.2]])]
77
+ >>> updated = incremental_update(weights, grads, lr=0.01)
78
+ """
79
+ if len(model_params) != len(gradients):
80
+ raise ValueError("Number of parameters and gradients must match")
81
+
82
+ updated_params = []
83
+ for param, grad in zip(model_params, gradients):
84
+ # Simple gradient descent: param_new = param - lr * grad
85
+ from quantml import ops
86
+ update = ops.mul(grad, learning_rate)
87
+ new_param = ops.sub(param, update)
88
+ updated_params.append(new_param)
89
+
90
+ return updated_params
91
+
92
+
93
+ def per_tick_training_step(
94
+ model: Any,
95
+ x: Tensor,
96
+ y: Tensor,
97
+ loss_fn: Callable,
98
+ optimizer: Optional[OnlineOptimizer] = None,
99
+ learning_rate: float = 0.01
100
+ ) -> float:
101
+ """
102
+ Perform a single training step on one tick of data.
103
+
104
+ This is the core function for online learning in quant trading,
105
+ where models are updated as each new tick arrives.
106
+
107
+ Args:
108
+ model: Model with forward() method and parameters
109
+ x: Input tensor (single sample)
110
+ y: Target tensor (single sample)
111
+ loss_fn: Loss function (pred, target) -> loss
112
+ optimizer: Optional online optimizer
113
+ learning_rate: Learning rate if no optimizer provided
114
+
115
+ Returns:
116
+ Loss value for this step
117
+
118
+ Examples:
119
+ >>> from quantml.models import Linear
120
+ >>> model = Linear(10, 1)
121
+ >>> x = Tensor([[1.0] * 10])
122
+ >>> y = Tensor([[0.5]])
123
+ >>> loss = per_tick_training_step(model, x, y, lambda p, t: (p - t) ** 2)
124
+ """
125
+ # Forward pass
126
+ pred = model.forward(x)
127
+
128
+ # Compute loss
129
+ loss = loss_fn(pred, y)
130
+
131
+ # Backward pass
132
+ if loss.requires_grad:
133
+ loss.backward()
134
+
135
+ # Update parameters
136
+ if hasattr(model, 'parameters'):
137
+ params = model.parameters()
138
+ grads = [p.grad for p in params if p.grad is not None]
139
+
140
+ if optimizer is not None:
141
+ for param, grad in zip(params, grads):
142
+ optimizer.step(grad, param)
143
+ else:
144
+ # Simple SGD update
145
+ updated = incremental_update(params, grads, learning_rate)
146
+ # In a real implementation, we'd update model parameters in-place
147
+ # For now, this demonstrates the pattern
148
+
149
+ # Get loss value
150
+ if isinstance(loss.data, list):
151
+ if isinstance(loss.data[0], list):
152
+ loss_val = loss.data[0][0]
153
+ else:
154
+ loss_val = loss.data[0]
155
+ else:
156
+ loss_val = float(loss.data)
157
+
158
+ return loss_val
159
+
160
+
161
+ class StreamingDataset:
162
+ """
163
+ Dataset wrapper for streaming data.
164
+
165
+ This class provides an interface for training models on streaming
166
+ market data, where new samples arrive continuously.
167
+ """
168
+
169
+ def __init__(self, x_stream: StreamingTensor, y_stream: Optional[StreamingTensor] = None):
170
+ """
171
+ Initialize streaming dataset.
172
+
173
+ Args:
174
+ x_stream: StreamingTensor for input features
175
+ y_stream: Optional StreamingTensor for targets
176
+ """
177
+ self.x_stream = x_stream
178
+ self.y_stream = y_stream
179
+
180
+ def get_batch(self, size: int = 1) -> tuple:
181
+ """
182
+ Get a batch of recent samples.
183
+
184
+ Args:
185
+ size: Number of samples to return
186
+
187
+ Returns:
188
+ Tuple of (x_batch, y_batch) tensors
189
+ """
190
+ x_batch = self.x_stream.get_window(size)
191
+
192
+ if self.y_stream is not None:
193
+ y_batch = self.y_stream.get_window(size)
194
+ return x_batch, y_batch
195
+ else:
196
+ return x_batch, None
197
+
198
+ def append(self, x: float, y: Optional[float] = None):
199
+ """
200
+ Append a new sample to the dataset.
201
+
202
+ Args:
203
+ x: Input value
204
+ y: Optional target value
205
+ """
206
+ self.x_stream.append(x)
207
+ if y is not None and self.y_stream is not None:
208
+ self.y_stream.append(y)
209
+