quantmllibrary 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. quantml/__init__.py +74 -0
  2. quantml/autograd.py +154 -0
  3. quantml/cli/__init__.py +10 -0
  4. quantml/cli/run_experiment.py +385 -0
  5. quantml/config/__init__.py +28 -0
  6. quantml/config/config.py +259 -0
  7. quantml/data/__init__.py +33 -0
  8. quantml/data/cache.py +149 -0
  9. quantml/data/feature_store.py +234 -0
  10. quantml/data/futures.py +254 -0
  11. quantml/data/loaders.py +236 -0
  12. quantml/data/memory_optimizer.py +234 -0
  13. quantml/data/validators.py +390 -0
  14. quantml/experiments/__init__.py +23 -0
  15. quantml/experiments/logger.py +208 -0
  16. quantml/experiments/results.py +158 -0
  17. quantml/experiments/tracker.py +223 -0
  18. quantml/features/__init__.py +25 -0
  19. quantml/features/base.py +104 -0
  20. quantml/features/gap_features.py +124 -0
  21. quantml/features/registry.py +138 -0
  22. quantml/features/volatility_features.py +140 -0
  23. quantml/features/volume_features.py +142 -0
  24. quantml/functional.py +37 -0
  25. quantml/models/__init__.py +27 -0
  26. quantml/models/attention.py +258 -0
  27. quantml/models/dropout.py +130 -0
  28. quantml/models/gru.py +319 -0
  29. quantml/models/linear.py +112 -0
  30. quantml/models/lstm.py +353 -0
  31. quantml/models/mlp.py +286 -0
  32. quantml/models/normalization.py +289 -0
  33. quantml/models/rnn.py +154 -0
  34. quantml/models/tcn.py +238 -0
  35. quantml/online.py +209 -0
  36. quantml/ops.py +1707 -0
  37. quantml/optim/__init__.py +42 -0
  38. quantml/optim/adafactor.py +206 -0
  39. quantml/optim/adagrad.py +157 -0
  40. quantml/optim/adam.py +267 -0
  41. quantml/optim/lookahead.py +97 -0
  42. quantml/optim/quant_optimizer.py +228 -0
  43. quantml/optim/radam.py +192 -0
  44. quantml/optim/rmsprop.py +203 -0
  45. quantml/optim/schedulers.py +286 -0
  46. quantml/optim/sgd.py +181 -0
  47. quantml/py.typed +0 -0
  48. quantml/streaming.py +175 -0
  49. quantml/tensor.py +462 -0
  50. quantml/time_series.py +447 -0
  51. quantml/training/__init__.py +135 -0
  52. quantml/training/alpha_eval.py +203 -0
  53. quantml/training/backtest.py +280 -0
  54. quantml/training/backtest_analysis.py +168 -0
  55. quantml/training/cv.py +106 -0
  56. quantml/training/data_loader.py +177 -0
  57. quantml/training/ensemble.py +84 -0
  58. quantml/training/feature_importance.py +135 -0
  59. quantml/training/features.py +364 -0
  60. quantml/training/futures_backtest.py +266 -0
  61. quantml/training/gradient_clipping.py +206 -0
  62. quantml/training/losses.py +248 -0
  63. quantml/training/lr_finder.py +127 -0
  64. quantml/training/metrics.py +376 -0
  65. quantml/training/regularization.py +89 -0
  66. quantml/training/trainer.py +239 -0
  67. quantml/training/walk_forward.py +190 -0
  68. quantml/utils/__init__.py +51 -0
  69. quantml/utils/gradient_check.py +274 -0
  70. quantml/utils/logging.py +181 -0
  71. quantml/utils/ops_cpu.py +231 -0
  72. quantml/utils/profiling.py +364 -0
  73. quantml/utils/reproducibility.py +220 -0
  74. quantml/utils/serialization.py +335 -0
  75. quantmllibrary-0.1.0.dist-info/METADATA +536 -0
  76. quantmllibrary-0.1.0.dist-info/RECORD +79 -0
  77. quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
  78. quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
  79. quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,536 @@
1
+ Metadata-Version: 2.4
2
+ Name: quantmllibrary
3
+ Version: 0.1.0
4
+ Summary: A clean, minimal ML library optimized for quantitative trading, streaming data, and online learning
5
+ Author: QuantML Contributors
6
+ Author-email: Sritej Bommaraju <bommaraju@berkeley.edu>
7
+ License: MIT
8
+ Project-URL: Homepage, https://github.com/SritejBommaraju/quantmllibrary
9
+ Project-URL: Documentation, https://github.com/SritejBommaraju/quantmllibrary#readme
10
+ Project-URL: Repository, https://github.com/SritejBommaraju/quantmllibrary
11
+ Project-URL: Issues, https://github.com/SritejBommaraju/quantmllibrary/issues
12
+ Keywords: machine-learning,quantitative-trading,streaming-data,online-learning,autograd,tensor,hft,quant
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Financial and Insurance Industry
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
+ Classifier: Topic :: Office/Business :: Financial :: Investment
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Requires-Python: >=3.8
26
+ Description-Content-Type: text/markdown
27
+ License-File: LICENSE
28
+ Provides-Extra: numpy
29
+ Requires-Dist: numpy>=1.20.0; extra == "numpy"
30
+ Provides-Extra: dev
31
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
32
+ Requires-Dist: black>=22.0.0; extra == "dev"
33
+ Requires-Dist: mypy>=0.950; extra == "dev"
34
+ Dynamic: license-file
35
+
36
+ # QuantML
37
+
38
+ A clean, minimal, hackable machine learning library optimized specifically for quantitative trading, streaming data, online learning, and low-latency CPU inference.
39
+
40
+ ## Table of Contents
41
+
42
+ - [Installation](#installation)
43
+ - [Dependencies](#dependencies)
44
+ - [Quick Start](#quick-start)
45
+ - [Directory Structure](#directory-structure)
46
+ - [Usage for Research](#usage-for-research)
47
+ - [Features](#features)
48
+ - [Architecture](#architecture)
49
+ - [Performance](#performance)
50
+ - [Examples](#examples)
51
+ - [Documentation](#documentation)
52
+ - [Contributing](#contributing)
53
+ - [License](#license)
54
+
55
+ ## Installation
56
+
57
+ ### From PyPI (when published)
58
+ ```bash
59
+ pip install quantmllibrary
60
+ ```
61
+
62
+ ### From Source
63
+
64
+ ```bash
65
+ git clone https://github.com/SritejBommaraju/quantmllibrary.git
66
+ cd quantmllibrary
67
+ pip install -e .
68
+ ```
69
+
70
+ ### With Optional Dependencies
71
+
72
+ ```bash
73
+ # With NumPy for performance
74
+ pip install quantmllibrary[numpy]
75
+
76
+ # Development dependencies
77
+ pip install -r requirements-dev.txt
78
+ ```
79
+
80
+ ### Using Conda
81
+
82
+ ```bash
83
+ conda env create -f environment.yml
84
+ conda activate quantml
85
+ ```
86
+
87
+ ## Dependencies
88
+
89
+ ### Core Dependencies
90
+
91
+ - **Python**: >=3.8, <3.13
92
+ - **NumPy**: >=1.20.0, <2.0.0 (optional, but recommended for performance)
93
+
94
+ ### Optional Dependencies
95
+
96
+ - **pandas**: >=1.3.0 (for data loading and manipulation)
97
+ - **pyyaml**: >=6.0 (for YAML config support)
98
+ - **pyarrow**: (for Parquet feature caching)
99
+
100
+ ### Development Dependencies
101
+
102
+ See `requirements-dev.txt` for testing, linting, and documentation tools.
103
+
104
+ **Note**: The library works without NumPy, but performance is significantly better (2-5x faster) with NumPy installed.
105
+
106
+ ## Quick Start
107
+
108
+ ### Complete Working Example
109
+
110
+ ```python
111
+ from quantml import Tensor
112
+ from quantml.models import Linear
113
+ from quantml.optim import Adam
114
+ from quantml.training import QuantTrainer, FeaturePipeline
115
+ from quantml.training.losses import mse_loss
116
+ from quantml.training.features import normalize_features
117
+
118
+ # 1. Load your data (replace with your data source)
119
+ prices = [100.0, 101.0, 102.0, 103.0, 104.0, 105.0]
120
+ volumes = [100.0, 110.0, 105.0, 120.0, 115.0, 125.0]
121
+
122
+ # 2. Create features
123
+ pipeline = FeaturePipeline()
124
+ pipeline.add_lagged_feature('price', lags=[1, 5, 10])
125
+ pipeline.add_rolling_feature('price', window=20, func='mean')
126
+ pipeline.add_time_series_feature('price', 'returns')
127
+
128
+ features = pipeline.transform({'price': prices})
129
+ features = normalize_features(features, method='zscore')
130
+
131
+ # 3. Create targets (forward returns)
132
+ targets = [(prices[i+1] - prices[i]) / prices[i] for i in range(len(prices)-1)]
133
+ features = features[:-1] # Align
134
+
135
+ # 4. Train model
136
+ model = Linear(in_features=len(features[0]), out_features=1, bias=True)
137
+ optimizer = Adam(model.parameters(), lr=0.001)
138
+ trainer = QuantTrainer(model, optimizer, mse_loss)
139
+
140
+ # Train
141
+ for i in range(len(features)):
142
+ x = Tensor([features[i]])
143
+ y = Tensor([[targets[i]]])
144
+ trainer.train_step(x, y)
145
+
146
+ # 5. Generate predictions
147
+ predictions = []
148
+ for feat in features:
149
+ x = Tensor([feat])
150
+ pred = model.forward(x)
151
+ pred_val = pred.data[0][0] if isinstance(pred.data[0], list) else pred.data[0]
152
+ predictions.append(pred_val)
153
+
154
+ print(f"Generated {len(predictions)} predictions")
155
+ ```
156
+
157
+ ### Basic Tensor Operations
158
+
159
+ ```python
160
+ from quantml import Tensor
161
+
162
+ # Create tensors
163
+ x = Tensor([1.0, 2.0, 3.0], requires_grad=True)
164
+ y = Tensor([4.0, 5.0, 6.0], requires_grad=True)
165
+
166
+ # Operations
167
+ z = x + y
168
+ z.backward()
169
+
170
+ print(x.grad) # [1.0, 1.0, 1.0]
171
+ ```
172
+
173
+ ### Quant Operations
174
+
175
+ ```python
176
+ from quantml import Tensor
177
+ from quantml import time_series
178
+
179
+ # Price data
180
+ prices = Tensor([100.0, 101.0, 102.0, 103.0, 104.0])
181
+
182
+ # Exponential Moving Average
183
+ ema_20 = time_series.ema(prices, n=20)
184
+
185
+ # Rolling volatility
186
+ vol = time_series.volatility(prices, n=20)
187
+
188
+ # Returns
189
+ rets = time_series.returns(prices)
190
+
191
+ # VWAP
192
+ volumes = Tensor([100.0, 110.0, 105.0, 120.0, 115.0])
193
+ vwap = time_series.vwap(prices, volumes)
194
+ ```
195
+
196
+ ## Directory Structure
197
+
198
+ ```
199
+ quantmllibrary/
200
+ ├── quantml/ # Main library package
201
+ │ ├── __init__.py # Package initialization
202
+ │ ├── tensor.py # Core Tensor class
203
+ │ ├── autograd.py # Automatic differentiation
204
+ │ ├── ops.py # Operations (NumPy-optimized)
205
+ │ ├── functional.py # Functional API
206
+ │ ├── time_series.py # Quant-specific operations
207
+ │ ├── streaming.py # Streaming tensors
208
+ │ ├── online.py # Online learning
209
+ │ ├── config/ # Configuration management
210
+ │ │ ├── __init__.py
211
+ │ │ └── config.py # YAML/JSON config support
212
+ │ ├── data/ # Data management
213
+ │ │ ├── __init__.py
214
+ │ │ ├── validators.py # Data validation
215
+ │ │ ├── loaders.py # Data loaders
216
+ │ │ ├── feature_store.py # Feature caching
217
+ │ │ └── memory_optimizer.py # Memory optimization
218
+ │ ├── models/ # Neural network models
219
+ │ │ ├── linear.py
220
+ │ │ ├── rnn.py
221
+ │ │ └── tcn.py
222
+ │ ├── optim/ # Optimizers and schedulers
223
+ │ │ ├── sgd.py
224
+ │ │ ├── adam.py
225
+ │ │ ├── rmsprop.py
226
+ │ │ ├── adagrad.py
227
+ │ │ ├── adafactor.py
228
+ │ │ ├── lookahead.py
229
+ │ │ ├── radam.py
230
+ │ │ ├── quant_optimizer.py
231
+ │ │ └── schedulers.py
232
+ │ ├── training/ # Training utilities
233
+ │ │ ├── trainer.py
234
+ │ │ ├── losses.py
235
+ │ │ ├── metrics.py
236
+ │ │ ├── features.py
237
+ │ │ ├── walk_forward.py
238
+ │ │ ├── backtest.py
239
+ │ │ ├── alpha_eval.py
240
+ │ │ └── ...
241
+ │ ├── experiments/ # Experiment tracking
242
+ │ │ └── ...
243
+ │ └── utils/ # Utilities
244
+ │ ├── logging.py
245
+ │ ├── reproducibility.py
246
+ │ └── profiling.py
247
+ ├── examples/ # Example scripts
248
+ │ ├── quick_alpha.py # Quick alpha generation
249
+ │ ├── production_alpha.py # Production pipeline
250
+ │ ├── alpha_training.py # Alpha training example
251
+ │ └── ...
252
+ ├── tests/ # Test suite
253
+ │ ├── test_tensor.py
254
+ │ ├── test_ops.py
255
+ │ ├── test_models.py
256
+ │ └── integration/
257
+ ├── configs/ # Configuration files
258
+ │ ├── base.yaml
259
+ │ └── experiments/
260
+ ├── docs/ # Documentation
261
+ │ └── ...
262
+ ├── benchmarks/ # Performance benchmarks
263
+ ├── requirements.txt # Core dependencies
264
+ ├── requirements-dev.txt # Dev dependencies
265
+ ├── environment.yml # Conda environment
266
+ ├── pyproject.toml # Package configuration
267
+ └── README.md # This file
268
+ ```
269
+
270
+ ## Usage for Research
271
+
272
+ ### Overnight Gap Prediction
273
+
274
+ ```python
275
+ from quantml.config import load_config, ExperimentConfig
276
+ from quantml.data import load_csv_data, validate_price_data
277
+ from quantml.training import FeaturePipeline, QuantTrainer
278
+ from quantml.models import Linear
279
+ from quantml.optim import Adam
280
+
281
+ # Load configuration
282
+ config = load_config('configs/experiments/overnight_gap.yaml')
283
+
284
+ # Load and validate data
285
+ data = load_csv_data(
286
+ config.data.data_path,
287
+ price_column='close',
288
+ volume_column='volume'
289
+ )
290
+
291
+ is_valid, errors = validate_price_data(data['prices'], data['volumes'])
292
+ if not is_valid:
293
+ print(f"Data validation errors: {errors}")
294
+
295
+ # Create features for gap prediction
296
+ pipeline = FeaturePipeline()
297
+ pipeline.add_lagged_feature('price', lags=[1, 5, 10, 20])
298
+ pipeline.add_rolling_feature('price', window=20, func='mean')
299
+ pipeline.add_time_series_feature('price', 'volatility', n=20)
300
+
301
+ features = pipeline.transform({'price': data['prices']})
302
+
303
+ # Train model (see examples/alpha_training.py for full example)
304
+ ```
305
+
306
+ ### Multi-Instrument Support (ES, MES, NQ, MNQ)
307
+
308
+ ```python
309
+ from quantml.config import load_config
310
+
311
+ # Load instrument-specific config
312
+ es_config = load_config('configs/instruments/ES.yaml')
313
+ mes_config = load_config('configs/instruments/MES.yaml')
314
+
315
+ # Run experiments for each instrument
316
+ for instrument, config in [('ES', es_config), ('MES', mes_config)]:
317
+ # Load data for instrument
318
+ # Create features
319
+ # Train model
320
+ # Evaluate
321
+ pass
322
+ ```
323
+
324
+ ### Walk-Forward Optimization
325
+
326
+ ```python
327
+ from quantml.training import WalkForwardOptimizer, WindowType
328
+
329
+ wfo = WalkForwardOptimizer(
330
+ window_type=WindowType.EXPANDING,
331
+ train_size=500, # 500 days training
332
+ test_size=100 # 100 days testing
333
+ )
334
+
335
+ for train_idx, test_idx in wfo.split(features, n_splits=5):
336
+ # Train on train_idx (past data only)
337
+ # Test on test_idx (future data)
338
+ # No lookahead bias!
339
+ pass
340
+ ```
341
+
342
+ ## Features
343
+
344
+ ### Core Components
345
+ - **Tensor Operations**: Full-featured tensor class with automatic differentiation
346
+ - **Autograd Engine**: Dynamic computation graphs with backpropagation
347
+ - **NumPy-Optimized**: 2-5x faster operations with optional NumPy acceleration
348
+ - **Zero Dependencies**: Pure Python with optional NumPy for performance
349
+
350
+ ### Quant-Specific Operations
351
+ - **Time-Series Ops**: EMA, WMA, rolling mean/std, volatility, z-score
352
+ - **Market Data**: VWAP, order flow imbalance, microprice
353
+ - **Returns**: Returns, log returns calculations
354
+ - **Streaming Support**: Ring buffer tensors for tick-level market data
355
+
356
+ ### Neural Network Models
357
+ - **Linear**: Fully connected layer
358
+ - **SimpleRNN**: Recurrent neural network
359
+ - **TCN**: Temporal Convolutional Network
360
+
361
+ ### Optimizers (8 Total)
362
+ - **SGD**: Stochastic Gradient Descent with momentum and weight decay
363
+ - **Adam**: Adaptive Moment Estimation
364
+ - **RMSProp**: Root Mean Square Propagation
365
+ - **AdaGrad**: Adaptive Gradient
366
+ - **AdaFactor**: Memory-efficient Adam variant
367
+ - **Lookahead**: Wrapper optimizer for training stability
368
+ - **RAdam**: Rectified Adam with variance rectification
369
+ - **QuantOptimizer**: Custom optimizer with volatility-aware learning rates
370
+
371
+ ### Learning Rate Schedulers (6 Types)
372
+ - **StepLR**: Step decay by factor
373
+ - **CosineAnnealingLR**: Cosine annealing schedule
374
+ - **WarmupLR**: Linear/cosine warmup
375
+ - **ReduceLROnPlateau**: Reduce on metric plateau
376
+ - **CyclicLR**: Cyclic learning rates
377
+ - **OneCycleLR**: One cycle policy
378
+
379
+ ### Training Utilities
380
+ - **QuantTrainer**: Training loop with early stopping, checkpointing, metrics tracking
381
+ - **Gradient Clipping**: Norm clipping, value clipping, adaptive clipping
382
+ - **Gradient Accumulation**: Effective larger batch sizes
383
+ - **Learning Rate Finder**: Automatic optimal LR discovery
384
+ - **Model Ensembling**: Weighted averaging, voting, stacking
385
+ - **Feature Importance**: Gradient-based and permutation-based importance
386
+ - **Cross-Validation**: TimeSeriesSplit, PurgedKFold for time-series data
387
+ - **Regularization**: Dropout layer
388
+
389
+ ### Quant-Specific Loss Functions
390
+ - **Sharpe Loss**: Optimize for Sharpe ratio
391
+ - **Quantile Loss**: For quantile regression
392
+ - **Information Ratio Loss**: Optimize for information ratio
393
+ - **Huber Loss**: Robust to outliers
394
+ - **Asymmetric Loss**: Different penalties for over/under-prediction
395
+ - **Max Drawdown Loss**: Optimize to reduce drawdowns
396
+ - **Combined Quant Loss**: Multi-objective quant loss
397
+
398
+ ### Financial Metrics
399
+ - **Performance**: Sharpe, Sortino, Calmar ratios, max drawdown
400
+ - **Alpha Metrics**: Information Coefficient (IC), Rank IC, hit rate
401
+ - **Risk Metrics**: VaR, CVaR, turnover
402
+ - **Backtesting**: Complete backtesting engine with transaction costs
403
+
404
+ ### Feature Engineering
405
+ - **FeaturePipeline**: Reproducible feature creation
406
+ - **Lagged Features**: Multiple lag periods
407
+ - **Rolling Windows**: Mean, std, min, max over windows
408
+ - **Normalization**: Z-score, min-max, robust scaling
409
+
410
+ ### Walk-Forward & Backtesting
411
+ - **WalkForwardOptimizer**: Time-series aware train/test splits
412
+ - **BacktestEngine**: Strategy backtesting with P&L tracking
413
+ - **AlphaEvaluator**: Comprehensive alpha signal evaluation
414
+
415
+ ## Architecture
416
+
417
+ ```
418
+ quantml/
419
+ ├── tensor.py # Core Tensor class with NumPy optimization
420
+ ├── autograd.py # Automatic differentiation
421
+ ├── ops.py # Operations (NumPy-optimized)
422
+ ├── functional.py # Functional API
423
+ ├── time_series.py # Quant-specific operations
424
+ ├── streaming.py # Streaming tensors with ring buffer
425
+ ├── online.py # Online learning utilities
426
+ ├── config/ # Configuration management
427
+ │ └── config.py # YAML/JSON config support
428
+ ├── data/ # Data management
429
+ │ ├── validators.py # Data validation
430
+ │ ├── loaders.py # Data loaders
431
+ │ ├── feature_store.py # Feature caching (Parquet)
432
+ │ └── memory_optimizer.py # Memory optimization
433
+ ├── models/ # Neural network models
434
+ │ ├── linear.py
435
+ │ ├── rnn.py
436
+ │ └── tcn.py
437
+ ├── optim/ # Optimizers and schedulers
438
+ │ ├── sgd.py
439
+ │ ├── adam.py
440
+ │ └── ... (8 optimizers total)
441
+ ├── training/ # Training utilities
442
+ │ ├── trainer.py
443
+ │ ├── losses.py
444
+ │ ├── metrics.py
445
+ │ ├── features.py
446
+ │ ├── walk_forward.py
447
+ │ ├── backtest.py
448
+ │ └── ... (15+ modules)
449
+ ├── experiments/ # Experiment tracking
450
+ │ └── ...
451
+ └── utils/ # Utilities
452
+ ├── logging.py
453
+ ├── reproducibility.py
454
+ └── profiling.py
455
+ ```
456
+
457
+ ## Performance
458
+
459
+ **Measured Benchmarks** (see `docs/benchmark_results.md` for details):
460
+
461
+ - **Inference Latency**: 0.02-0.07ms average (sub-millisecond, suitable for HFT)
462
+ - **Training Throughput**: 7,000-14,000 steps/second
463
+ - **Tensor Operations**: 250,000+ ops/sec (100x100 tensors)
464
+ - **Matrix Multiplication**: 2,000-14,000 ops/sec (depending on size)
465
+
466
+ **Optimization Benefits**:
467
+ - **NumPy-optimized**: Direct array operations eliminate conversion overhead
468
+ - **Memory Efficient**: NumPy arrays reduce memory footprint vs Python lists
469
+ - **Low Latency**: Sub-millisecond inference enables real-time trading
470
+ - **High Throughput**: Optimized training loops for rapid iteration
471
+
472
+ ## Examples
473
+
474
+ See the `examples/` directory for complete examples:
475
+
476
+ - **`quick_alpha.py`**: Generate alpha signals immediately
477
+ - **`alpha_training.py`**: Complete alpha generation pipeline
478
+ - **`production_alpha.py`**: Production-ready pipeline with walk-forward
479
+ - **`walk_forward_training.py`**: Walk-forward optimization example
480
+ - **`backtest_strategy.py`**: Strategy backtesting
481
+ - **`futures_model.py`**: Complete futures trading pipeline
482
+ - **`online_regression.py`**: Online learning with streaming data
483
+ - **`streaming_training.py`**: Per-tick model updates
484
+ - **`live_alpha_generator.py`**: Real-time alpha generation
485
+
486
+ ## Documentation
487
+
488
+ - **`ALPHA_GUIDE.md`**: Complete guide to generating alpha
489
+ - **`examples/config_example.yaml`**: Configuration file example
490
+ - **API Documentation**: See docstrings in each module
491
+
492
+ ## Design Philosophy
493
+
494
+ 1. **Clean & Hackable**: Code is readable and easy to modify (micrograd-level clarity)
495
+ 2. **Minimal Dependencies**: Pure Python with optional NumPy
496
+ 3. **Quant-First**: Operations designed for trading workflows
497
+ 4. **Streaming Native**: Built for tick-level data from the ground up
498
+ 5. **Low Latency**: CPU-optimized for fast inference
499
+ 6. **Performance**: NumPy-optimized operations throughout
500
+ 7. **Reproducible**: Random seed management, experiment tracking, version control
501
+
502
+ ## Comparison to PyTorch/JAX
503
+
504
+ QuantML is designed specifically for quant trading use cases:
505
+
506
+ - **Smaller footprint**: No CUDA, no complex backends
507
+ - **Streaming-first**: Native support for ring buffers and tick data
508
+ - **Quant ops**: Built-in EMA, VWAP, order flow, etc.
509
+ - **Online learning**: Per-tick updates out of the box
510
+ - **Simpler API**: Easier to understand and modify
511
+ - **Quant-specific**: Optimizers, losses, and metrics for trading
512
+ - **Walk-forward**: Built-in time-series cross-validation
513
+ - **Backtesting**: Integrated backtesting engine
514
+
515
+ ## Key Use Cases
516
+
517
+ 1. **Alpha Generation**: Train models to predict returns
518
+ 2. **Signal Processing**: Real-time feature engineering on streaming data
519
+ 3. **Online Learning**: Update models on every tick
520
+ 4. **Strategy Backtesting**: Test trading strategies with realistic costs
521
+ 5. **Feature Engineering**: Create reproducible quant features
522
+ 6. **Model Ensembling**: Combine multiple models for robust predictions
523
+ 7. **Futures Trading**: Overnight gap prediction, multi-instrument support
524
+ 8. **Research**: Reproducible experiments for academic papers
525
+
526
+ ## Contributing
527
+
528
+ Contributions are welcome! Please feel free to submit a Pull Request.
529
+
530
+ ## License
531
+
532
+ MIT License - see LICENSE file for details.
533
+
534
+ ## Status
535
+
536
+ This is an alpha release. The API may change in future versions.
@@ -0,0 +1,79 @@
1
+ quantml/__init__.py,sha256=lILQXXzEURJiVU7hexV1uMlnQn9acnkMWvVrxlSAOH0,1508
2
+ quantml/autograd.py,sha256=o8SRJCg3FBXjm1Y9--VznUBuGU2KBR1dLCmyUASetlU,5006
3
+ quantml/functional.py,sha256=6iV5kIaGfPG1_-TaV7Q2mMnVxVPxYuRW0gIDIjsxvkY,742
4
+ quantml/online.py,sha256=nqkIJWmQsVwtwjhM6YXrb5Iiu4Kag-PDlQYnLkFbf1w,6337
5
+ quantml/ops.py,sha256=Be2Pero4PeNvWi9SYjjtlkxoqj6pfOUeVC6p6iLgJEs,63780
6
+ quantml/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ quantml/streaming.py,sha256=EShD2WEgmZxNa5HZ1NIb5C81bEGtwS0FyjMEPZdW0pc,5384
8
+ quantml/tensor.py,sha256=bonoOlwUigZopDMUWlgmAl4LAfZDOAelgQ_t3CqsyPU,17727
9
+ quantml/time_series.py,sha256=ORoFgxv_Ih4BFdrl4DqSCWZhzuma4nYdpoe410iVZcY,12840
10
+ quantml/cli/__init__.py,sha256=XsRVZ7IhpF_DvRGCOoA5LyHgrSm2vY9uRwoQu7Ktn20,176
11
+ quantml/cli/run_experiment.py,sha256=uFBwqf_w3CcrBVVSTJ5qRi_V415Ms_a5ujH2o8we14M,13955
12
+ quantml/config/__init__.py,sha256=RbeUHar3kK9_szE0djWB6jmADP1cKX2ska4j7bX74m8,482
13
+ quantml/config/config.py,sha256=cb6bYrJcs2jGS1YWXzBRv-7m73va73h8N3oSjVYR4Bo,9100
14
+ quantml/data/__init__.py,sha256=lPPaLFod54S0XL4i3zoUAfaoveBpHAsA4VDUMKmAaSU,633
15
+ quantml/data/cache.py,sha256=ernv2iRuvgl7Zf5FU1YJMs4BL_1LL4N_OrIvLUPSnXw,4425
16
+ quantml/data/feature_store.py,sha256=WXrx5Ryl15mIcYKxizjOyaAu0EUyqQRF_HEZqJlWOVU,7222
17
+ quantml/data/futures.py,sha256=mvfiKE7KBkrrf2xrUqqqV9drgXdHSyxa2DfTmnxt7Vo,7972
18
+ quantml/data/loaders.py,sha256=T4F0Mwg-AXV4xZlUhletA-Xpp-mjwYOUl97ADfPDP8c,7111
19
+ quantml/data/memory_optimizer.py,sha256=4xuRlfC-C3vdZxZ10GV6M71ZhwtR8FboMTQeYuTkjf0,6307
20
+ quantml/data/validators.py,sha256=rXvomR4zMlDVwTuwzDBelqskwP0RrZHHhhtImfLbEjo,11708
21
+ quantml/experiments/__init__.py,sha256=4sEYP3qdsQOdDcwLfBavoWlUAriaChanVmsqn0lU8eg,623
22
+ quantml/experiments/logger.py,sha256=CmiNKOvujaX1fRUtQJxSGxGi5hRX-NxDlGKRrcNY7mo,6389
23
+ quantml/experiments/results.py,sha256=nCenrQZAZ_gyKeDLKGwjXI358ZOLiyz4GFmxTfTM1o0,5024
24
+ quantml/experiments/tracker.py,sha256=wCVS61_xcdJ3t0ZzV_mQDmGu1wI-SzOoRf3G53oqNFs,7564
25
+ quantml/features/__init__.py,sha256=mlojJt0vLT2DHYmvGPd6c4gXclicfRF9LUjnB-8C744,760
26
+ quantml/features/base.py,sha256=NCcgrBznG0x9he4URAffPrwmubX7fAkstaE56o-W9H8,2757
27
+ quantml/features/gap_features.py,sha256=5fFgluu3YwBwl_2VsFYQnEpmaPes9HX0HRuMHviSZ4w,4126
28
+ quantml/features/registry.py,sha256=6Nqk65DxNQQAsTup6O6LKbGU5dXqIvWIkIhgmeh-vHI,3475
29
+ quantml/features/volatility_features.py,sha256=bTxu1cuzw1OY2ZLTQAalHV96OJIo-3VRPOQRc5IceEw,4722
30
+ quantml/features/volume_features.py,sha256=kNgHvIxj4RXxG9WMpWXV03gl6FiLmD-pepWyf3cgrgg,4774
31
+ quantml/models/__init__.py,sha256=BAbJlq0QWgLuiBXxGadMb4iSXQ5N_90H56Nn0b3YCas,807
32
+ quantml/models/attention.py,sha256=9DLluZmQLPhcUwew6EJAIVLX-yJWT39oyt_mj1Htzks,9653
33
+ quantml/models/dropout.py,sha256=Hb5TgeEU-_LoUowXqLNSwRfQOYJ2JD2XivA9TsTJ_fA,4168
34
+ quantml/models/gru.py,sha256=ii-nftZMXF9A_FaGwZXar1te9lTFJtwPBv1UFOTDQhY,10778
35
+ quantml/models/linear.py,sha256=HPi6-t8AcEr2xBqCEAxn_Ug5-BTXPzZOqSYZjKC5DXU,3484
36
+ quantml/models/lstm.py,sha256=KgALKjd3vsPelzU-ZMFD3RG3t4W_OBJAs5kLtCyu-V0,12420
37
+ quantml/models/mlp.py,sha256=OySHwKp782rdAo2u3qcDS8V1ig4WrX0S5aLIFAcm13c,9440
38
+ quantml/models/normalization.py,sha256=ZuT3kRUo6_b_z3r2AmFGe0nfxC5_nv8Yp9E29mne7nQ,10055
39
+ quantml/models/rnn.py,sha256=FB9wFVv12j0tJ6DOnrBtPwGRRbHNgoj49phxE15Av1Y,5029
40
+ quantml/models/tcn.py,sha256=c9kLOUfuwyObCIbYtP2nxwLJbveH1VoeUUw4Qa0DFb0,7696
41
+ quantml/optim/__init__.py,sha256=d7HKTseqYUOYzkMINtkpwrBiEszOrR4RUq3v1HPieBw,872
42
+ quantml/optim/adafactor.py,sha256=pwyMFFE9_7O3-uTGvJT8w4XgIvBduxu8ePZ2oPvPpNA,7714
43
+ quantml/optim/adagrad.py,sha256=jazAIu05Yta3mdfCntoqR0J0mi4khQp5EOugZpHtyno,5093
44
+ quantml/optim/adam.py,sha256=F6H46nfNix5hW8gxf0OUrOFEiqFlXq_bTbpddsOjvx0,9281
45
+ quantml/optim/lookahead.py,sha256=mr7-9VtyYrBohgXd5ucs1HsRbdHvWsWM4U9HHEObAvE,3479
46
+ quantml/optim/quant_optimizer.py,sha256=O5m24IMg5MXLkz-JwzHq_jIPwYXZLtJ20GrSHcy9Bkc,8609
47
+ quantml/optim/radam.py,sha256=bcEYYQt_deVp6ms-GyDaC656tPtMqbYEIzflHL2j7I4,6816
48
+ quantml/optim/rmsprop.py,sha256=KPX8hcZDWsblMb8uXyo15QGxNHPW8ah-Ipydkhc-BOA,7144
49
+ quantml/optim/schedulers.py,sha256=oCKKu-UKAI_H98HpYlnx9xIyxWAm_gHiUmHnq4KgdPw,10548
50
+ quantml/optim/sgd.py,sha256=kI_QjOLlxsTb7WP65GvbBMPO8c_piFX-sBddTnvR9qA,5974
51
+ quantml/training/__init__.py,sha256=j6cuBz3VOdEDwYBvLSL46NSF_-hUNwoA1Z1cYpAWVyo,2773
52
+ quantml/training/alpha_eval.py,sha256=Ggo7jeELBogTA-deLBB2Hdkn0oboob7tahQlqQphtQk,6417
53
+ quantml/training/backtest.py,sha256=PuVqjh3NBWPI46WF4nVe00YDxlLFey8HBg0YBe3Iq0I,11146
54
+ quantml/training/backtest_analysis.py,sha256=JVBlq2iqnm6vNRI3c2DVEk1xixt5v4y8EE2dGWvFBmE,5080
55
+ quantml/training/cv.py,sha256=Si-XD1hhAMVEkroyMc-HiK7oJBDlivcQCuPZGVpnWF8,3092
56
+ quantml/training/data_loader.py,sha256=l2rQnDMOLZ_jsWCPyaizknfZ6kAnGiOcVQB_AoK9PjM,5231
57
+ quantml/training/ensemble.py,sha256=QqAtq6RxnDu1NaBepA_x4jKUWPVaZBlnJ9L5Dcsi8PA,2837
58
+ quantml/training/feature_importance.py,sha256=iMmCGO1s7JWCmCYwaeA7ccZTIHYTLU8dumzkZC9AAzI,4297
59
+ quantml/training/features.py,sha256=m6louCh__tvDpo1td37MrFxVmvTwAiGZdkIFRfhmdLc,12417
60
+ quantml/training/futures_backtest.py,sha256=PtBjA4vGlZKVVy0fNjVfZ-pN-PAyrGV87WYEuWFovBg,10167
61
+ quantml/training/gradient_clipping.py,sha256=_GebZM5bulKQKL1Sgn0nE-NAqHMYkYKKJp4ARa4x3ts,7239
62
+ quantml/training/losses.py,sha256=90pxsoNGXHESM4kbM1MH11EYWGU8IQG8lJpMKxmVY8o,6690
63
+ quantml/training/lr_finder.py,sha256=m20J0ICAA417nycBd_vtBy3tWkDQvmYOzChonjN6rRU,3653
64
+ quantml/training/metrics.py,sha256=W4u5x_tVO9Htty0NI5niKeccW6_G5oFdP2a4-xs0i90,10678
65
+ quantml/training/regularization.py,sha256=uJ5F7qLUw7q-AUoF1RS5dDyEloi4XCQ6D683U-R_GLc,2607
66
+ quantml/training/trainer.py,sha256=Ivb1Mme8872Tf8rfKARKtkCT36vXxWy4wUv9P8ZBntk,8086
67
+ quantml/training/walk_forward.py,sha256=0UHvnO3kEBF79qAepWlBUMt-2MnzYQawLetJj_2UaDQ,6257
68
+ quantml/utils/__init__.py,sha256=Z9IVACjqz4WPW2ol0aCzO8xwBKx0u8shmRHEsGHlYAQ,1065
69
+ quantml/utils/gradient_check.py,sha256=JF2CvNkMUdyDN6nFxPpTIQMzdreT38PNtqRS060A5wo,8316
70
+ quantml/utils/logging.py,sha256=EE6pP-9tkn_Iguwy1SB0OgAgcsL5x2OZW99JcuhA63E,4759
71
+ quantml/utils/ops_cpu.py,sha256=l0iztlXiYHfanIAG7K-8G_g80N76dGiwvFNy0dBKFJ0,7131
72
+ quantml/utils/profiling.py,sha256=KlwnHKZR8UhF6c0CbDuXHeZ57b9YWvFBJEY1H2pZOek,10802
73
+ quantml/utils/reproducibility.py,sha256=EVO2cyNI7RTjj7FRhS22CPyw9HIGsI4P6BIdE1zfIks,5509
74
+ quantml/utils/serialization.py,sha256=puzfy1Z5JRrKfbLFldKalAOkkAPFKMl7ATbuV2cBsfs,9791
75
+ quantmllibrary-0.1.0.dist-info/licenses/LICENSE,sha256=O72pW8FNah_i6YbFFNTb_dzlAS1Lvwce_dPxFDc4Dnw,1078
76
+ quantmllibrary-0.1.0.dist-info/METADATA,sha256=UgkYdf7yE8UTAA5rnO8uoeahVx26CQwUcWrCpKXPqPQ,18685
77
+ quantmllibrary-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
78
+ quantmllibrary-0.1.0.dist-info/top_level.txt,sha256=Bw_CMshUABCzP3PzMZdKw9NHNTbHG79iZXpG2uborOo,8
79
+ quantmllibrary-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,22 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 QuantML Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
@@ -0,0 +1 @@
1
+ quantml