@ebowwa/quant-rust 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +161 -0
- package/bun-ffi.d.ts +54 -0
- package/dist/index.js +576 -0
- package/dist/src/index.d.ts +324 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/types/index.d.ts +403 -0
- package/dist/types/index.d.ts.map +1 -0
- package/native/README.md +62 -0
- package/native/darwin-arm64/libquant_rust.dylib +0 -0
- package/package.json +70 -0
- package/scripts/postinstall.cjs +85 -0
- package/src/ffi.rs +496 -0
- package/src/index.ts +1073 -0
- package/src/indicators/ma.rs +222 -0
- package/src/indicators/mod.rs +18 -0
- package/src/indicators/momentum.rs +353 -0
- package/src/indicators/sr.rs +195 -0
- package/src/indicators/trend.rs +351 -0
- package/src/indicators/volatility.rs +270 -0
- package/src/indicators/volume.rs +213 -0
- package/src/lib.rs +130 -0
- package/src/patterns/breakout.rs +431 -0
- package/src/patterns/chart.rs +772 -0
- package/src/patterns/mod.rs +394 -0
- package/src/patterns/sr.rs +423 -0
- package/src/prediction/amm.rs +338 -0
- package/src/prediction/arbitrage.rs +230 -0
- package/src/prediction/calibration.rs +317 -0
- package/src/prediction/kelly.rs +232 -0
- package/src/prediction/lmsr.rs +194 -0
- package/src/prediction/mod.rs +59 -0
- package/src/prediction/odds.rs +229 -0
- package/src/prediction/pnl.rs +254 -0
- package/src/prediction/risk.rs +228 -0
- package/src/risk/beta.rs +257 -0
- package/src/risk/drawdown.rs +256 -0
- package/src/risk/leverage.rs +201 -0
- package/src/risk/mod.rs +388 -0
- package/src/risk/portfolio.rs +287 -0
- package/src/risk/ratios.rs +290 -0
- package/src/risk/sizing.rs +194 -0
- package/src/risk/var.rs +222 -0
- package/src/stats/cdf.rs +257 -0
- package/src/stats/correlation.rs +225 -0
- package/src/stats/distribution.rs +194 -0
- package/src/stats/hypothesis.rs +177 -0
- package/src/stats/matrix.rs +346 -0
- package/src/stats/mod.rs +257 -0
- package/src/stats/regression.rs +239 -0
- package/src/stats/rolling.rs +193 -0
- package/src/stats/timeseries.rs +263 -0
- package/src/types.rs +224 -0
- package/src/utils/mod.rs +215 -0
- package/src/utils/normalize.rs +192 -0
- package/src/utils/price.rs +167 -0
- package/src/utils/quantiles.rs +177 -0
- package/src/utils/returns.rs +158 -0
- package/src/utils/rolling.rs +97 -0
- package/src/utils/stats.rs +154 -0
- package/types/index.ts +513 -0
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
//! Time Series Analysis Module
|
|
2
|
+
//!
|
|
3
|
+
//! Autocorrelation, partial autocorrelation, and exponential smoothing
|
|
4
|
+
|
|
5
|
+
use crate::utils::mean;
|
|
6
|
+
|
|
7
|
+
/// Calculate autocorrelation function (ACF)
|
|
8
|
+
pub fn autocorrelation(data: &[f64], max_lag: usize) -> Vec<f64> {
|
|
9
|
+
if data.is_empty() || max_lag == 0 {
|
|
10
|
+
return Vec::new();
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
let n = data.len();
|
|
14
|
+
let avg = mean(data);
|
|
15
|
+
let variance_val: f64 = data.iter()
|
|
16
|
+
.map(|x| (x - avg).powi(2))
|
|
17
|
+
.sum::<f64>() / n as f64;
|
|
18
|
+
|
|
19
|
+
if variance_val == 0.0 {
|
|
20
|
+
return vec![0.0; max_lag + 1];
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
let max_lag = max_lag.min(n - 1);
|
|
24
|
+
let mut acf = Vec::with_capacity(max_lag + 1);
|
|
25
|
+
|
|
26
|
+
for lag in 0..=max_lag {
|
|
27
|
+
if lag == 0 {
|
|
28
|
+
acf.push(1.0);
|
|
29
|
+
} else {
|
|
30
|
+
let mut sum = 0.0;
|
|
31
|
+
for i in 0..(n - lag) {
|
|
32
|
+
sum += (data[i] - avg) * (data[i + lag] - avg);
|
|
33
|
+
}
|
|
34
|
+
acf.push(sum / ((n - lag) as f64 * variance_val));
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
acf
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/// Calculate partial autocorrelation function (PACF)
|
|
42
|
+
/// Using Levinson-Durbin recursion
|
|
43
|
+
pub fn partial_autocorrelation(data: &[f64], max_lag: usize) -> Vec<f64> {
|
|
44
|
+
if data.is_empty() || max_lag == 0 {
|
|
45
|
+
return Vec::new();
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
let acf = autocorrelation(data, max_lag);
|
|
49
|
+
if acf.is_empty() {
|
|
50
|
+
return Vec::new();
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
let mut pacf = vec![acf[0]]; // Lag 0 is always 1
|
|
54
|
+
|
|
55
|
+
// Levinson-Durbin recursion for lags 1 to max_lag
|
|
56
|
+
for k in 1..=max_lag.min(acf.len() - 1) {
|
|
57
|
+
// Simplified Yule-Walker approach
|
|
58
|
+
let mut phi = acf[k];
|
|
59
|
+
for j in 1..k {
|
|
60
|
+
if j - 1 < pacf.len() {
|
|
61
|
+
phi -= pacf[j] * acf[k - j];
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
pacf.push(phi);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
pacf
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/// Simple exponential smoothing (SES)
|
|
71
|
+
pub fn exponential_smoothing(data: &[f64], alpha: f64) -> Vec<f64> {
|
|
72
|
+
if data.is_empty() {
|
|
73
|
+
return Vec::new();
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
let alpha = alpha.clamp(0.0, 1.0);
|
|
77
|
+
let mut result = Vec::with_capacity(data.len());
|
|
78
|
+
result.push(data[0]);
|
|
79
|
+
|
|
80
|
+
for i in 1..data.len() {
|
|
81
|
+
let smoothed = alpha * data[i] + (1.0 - alpha) * result[i - 1];
|
|
82
|
+
result.push(smoothed);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
result
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/// Double exponential smoothing (Holt's method)
|
|
89
|
+
pub fn double_exponential_smoothing(
|
|
90
|
+
data: &[f64],
|
|
91
|
+
alpha: f64,
|
|
92
|
+
beta: f64,
|
|
93
|
+
) -> DoubleExponentialResult {
|
|
94
|
+
if data.len() < 2 {
|
|
95
|
+
return DoubleExponentialResult {
|
|
96
|
+
level: Vec::new(),
|
|
97
|
+
trend: Vec::new(),
|
|
98
|
+
forecast: Vec::new(),
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
let alpha = alpha.clamp(0.0, 1.0);
|
|
103
|
+
let beta = beta.clamp(0.0, 1.0);
|
|
104
|
+
|
|
105
|
+
let mut level = Vec::with_capacity(data.len());
|
|
106
|
+
let mut trend = Vec::with_capacity(data.len());
|
|
107
|
+
let mut forecast = Vec::with_capacity(data.len());
|
|
108
|
+
|
|
109
|
+
// Initial values
|
|
110
|
+
level.push(data[0]);
|
|
111
|
+
trend.push(data[1] - data[0]);
|
|
112
|
+
forecast.push(data[0]);
|
|
113
|
+
|
|
114
|
+
for i in 1..data.len() {
|
|
115
|
+
let new_level = alpha * data[i] + (1.0 - alpha) * (level[i - 1] + trend[i - 1]);
|
|
116
|
+
let new_trend = beta * (new_level - level[i - 1]) + (1.0 - beta) * trend[i - 1];
|
|
117
|
+
|
|
118
|
+
level.push(new_level);
|
|
119
|
+
trend.push(new_trend);
|
|
120
|
+
forecast.push(new_level + new_trend);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
DoubleExponentialResult {
|
|
124
|
+
level,
|
|
125
|
+
trend,
|
|
126
|
+
forecast,
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/// Double exponential smoothing result
|
|
131
|
+
#[derive(Debug, Clone)]
|
|
132
|
+
pub struct DoubleExponentialResult {
|
|
133
|
+
/// Level component
|
|
134
|
+
pub level: Vec<f64>,
|
|
135
|
+
/// Trend component
|
|
136
|
+
pub trend: Vec<f64>,
|
|
137
|
+
/// Forecast (level + trend)
|
|
138
|
+
pub forecast: Vec<f64>,
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/// Triple exponential smoothing (Holt-Winters)
|
|
142
|
+
pub fn triple_exponential_smoothing(
|
|
143
|
+
data: &[f64],
|
|
144
|
+
alpha: f64,
|
|
145
|
+
beta: f64,
|
|
146
|
+
gamma: f64,
|
|
147
|
+
period: usize,
|
|
148
|
+
) -> TripleExponentialResult {
|
|
149
|
+
if data.len() < period * 2 {
|
|
150
|
+
return TripleExponentialResult {
|
|
151
|
+
level: Vec::new(),
|
|
152
|
+
trend: Vec::new(),
|
|
153
|
+
seasonal: Vec::new(),
|
|
154
|
+
forecast: Vec::new(),
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
let alpha = alpha.clamp(0.0, 1.0);
|
|
159
|
+
let beta = beta.clamp(0.0, 1.0);
|
|
160
|
+
let gamma = gamma.clamp(0.0, 1.0);
|
|
161
|
+
|
|
162
|
+
let n = data.len();
|
|
163
|
+
let mut level = vec![0.0; n];
|
|
164
|
+
let mut trend = vec![0.0; n];
|
|
165
|
+
let mut seasonal = vec![0.0; n + period];
|
|
166
|
+
let mut forecast = vec![0.0; n];
|
|
167
|
+
|
|
168
|
+
// Initialize seasonal factors
|
|
169
|
+
let mut sum_seasonal = 0.0;
|
|
170
|
+
for i in 0..period {
|
|
171
|
+
seasonal[i] = data[i];
|
|
172
|
+
sum_seasonal += data[i];
|
|
173
|
+
}
|
|
174
|
+
let avg_seasonal = sum_seasonal / period as f64;
|
|
175
|
+
|
|
176
|
+
for i in 0..period {
|
|
177
|
+
seasonal[i] /= avg_seasonal;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Initialize level and trend
|
|
181
|
+
level[period - 1] = data[period - 1] / seasonal[period - 1];
|
|
182
|
+
trend[period - 1] = (data[period] / seasonal[0] - data[period - 1] / seasonal[period - 1]) / period as f64;
|
|
183
|
+
|
|
184
|
+
// Calculate smoothed values
|
|
185
|
+
for i in period..n {
|
|
186
|
+
level[i] = alpha * data[i] / seasonal[i] + (1.0 - alpha) * (level[i - 1] + trend[i - 1]);
|
|
187
|
+
trend[i] = beta * (level[i] - level[i - 1]) + (1.0 - beta) * trend[i - 1];
|
|
188
|
+
seasonal[i + period] = gamma * data[i] / level[i] + (1.0 - gamma) * seasonal[i];
|
|
189
|
+
forecast[i] = (level[i - 1] + trend[i - 1]) * seasonal[i];
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
// Set initial forecasts to actuals
|
|
193
|
+
for i in 0..period {
|
|
194
|
+
forecast[i] = data[i];
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
TripleExponentialResult {
|
|
198
|
+
level,
|
|
199
|
+
trend,
|
|
200
|
+
seasonal: seasonal[..n].to_vec(),
|
|
201
|
+
forecast,
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/// Triple exponential smoothing result
|
|
206
|
+
#[derive(Debug, Clone)]
|
|
207
|
+
pub struct TripleExponentialResult {
|
|
208
|
+
/// Level component
|
|
209
|
+
pub level: Vec<f64>,
|
|
210
|
+
/// Trend component
|
|
211
|
+
pub trend: Vec<f64>,
|
|
212
|
+
/// Seasonal component
|
|
213
|
+
pub seasonal: Vec<f64>,
|
|
214
|
+
/// Forecast
|
|
215
|
+
pub forecast: Vec<f64>,
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
#[cfg(test)]
|
|
219
|
+
mod tests {
|
|
220
|
+
use super::*;
|
|
221
|
+
|
|
222
|
+
#[test]
|
|
223
|
+
fn test_autocorrelation() {
|
|
224
|
+
let data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
|
|
225
|
+
let acf = autocorrelation(&data, 3);
|
|
226
|
+
|
|
227
|
+
assert_eq!(acf.len(), 4);
|
|
228
|
+
assert!((acf[0] - 1.0).abs() < 1e-10); // Lag 0 is always 1
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
#[test]
|
|
232
|
+
fn test_partial_autocorrelation() {
|
|
233
|
+
let data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
|
|
234
|
+
let pacf = partial_autocorrelation(&data, 3);
|
|
235
|
+
|
|
236
|
+
assert_eq!(pacf.len(), 4);
|
|
237
|
+
assert!((pacf[0] - 1.0).abs() < 1e-10); // Lag 0 is always 1
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
#[test]
|
|
241
|
+
fn test_exponential_smoothing() {
|
|
242
|
+
let data = [10.0, 12.0, 14.0, 16.0, 18.0];
|
|
243
|
+
let smoothed = exponential_smoothing(&data, 0.5);
|
|
244
|
+
|
|
245
|
+
assert_eq!(smoothed.len(), 5);
|
|
246
|
+
assert!((smoothed[0] - 10.0).abs() < 1e-10);
|
|
247
|
+
// Second value: 0.5 * 12 + 0.5 * 10 = 11
|
|
248
|
+
assert!((smoothed[1] - 11.0).abs() < 1e-10);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
#[test]
|
|
252
|
+
fn test_double_exponential_smoothing() {
|
|
253
|
+
let data = [10.0, 12.0, 14.0, 16.0, 18.0];
|
|
254
|
+
let result = double_exponential_smoothing(&data, 0.5, 0.5);
|
|
255
|
+
|
|
256
|
+
assert_eq!(result.level.len(), 5);
|
|
257
|
+
assert_eq!(result.trend.len(), 5);
|
|
258
|
+
assert_eq!(result.forecast.len(), 5);
|
|
259
|
+
|
|
260
|
+
// Initial trend should be 2 (12 - 10)
|
|
261
|
+
assert!((result.trend[0] - 2.0).abs() < 1e-10);
|
|
262
|
+
}
|
|
263
|
+
}
|
package/src/types.rs
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
1
|
+
//! Core types for quantitative finance computations
|
|
2
|
+
|
|
3
|
+
use serde::{Deserialize, Serialize};
|
|
4
|
+
|
|
5
|
+
/// OHLCV candlestick data
|
|
6
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
7
|
+
pub struct OHLCV {
|
|
8
|
+
/// Unix timestamp in milliseconds
|
|
9
|
+
pub timestamp: i64,
|
|
10
|
+
/// Opening price
|
|
11
|
+
pub open: f64,
|
|
12
|
+
/// High price
|
|
13
|
+
pub high: f64,
|
|
14
|
+
/// Low price
|
|
15
|
+
pub low: f64,
|
|
16
|
+
/// Closing price
|
|
17
|
+
pub close: f64,
|
|
18
|
+
/// Volume
|
|
19
|
+
pub volume: f64,
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
impl OHLCV {
|
|
23
|
+
pub fn new(timestamp: i64, open: f64, high: f64, low: f64, close: f64, volume: f64) -> Self {
|
|
24
|
+
Self { timestamp, open, high, low, close, volume }
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/// Calculate the typical price (HLC/3)
|
|
28
|
+
pub fn typical_price(&self) -> f64 {
|
|
29
|
+
(self.high + self.low + self.close) / 3.0
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/// Calculate the range (high - low)
|
|
33
|
+
pub fn range(&self) -> f64 {
|
|
34
|
+
self.high - self.low
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/// Calculate the body size |close - open|
|
|
38
|
+
pub fn body_size(&self) -> f64 {
|
|
39
|
+
(self.close - self.open).abs()
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/// Check if bullish (close > open)
|
|
43
|
+
pub fn is_bullish(&self) -> bool {
|
|
44
|
+
self.close > self.open
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/// Check if bearish (close < open)
|
|
48
|
+
pub fn is_bearish(&self) -> bool {
|
|
49
|
+
self.close < self.open
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/// AMM (Automated Market Maker) state for constant-product AMMs
|
|
54
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
55
|
+
pub struct AMMState {
|
|
56
|
+
/// YES pool reserves
|
|
57
|
+
pub pool_yes: f64,
|
|
58
|
+
/// NO pool reserves
|
|
59
|
+
pub pool_no: f64,
|
|
60
|
+
/// Constant product k = pool_yes * pool_no
|
|
61
|
+
pub k: f64,
|
|
62
|
+
/// LP token total supply
|
|
63
|
+
pub lp_token_supply: f64,
|
|
64
|
+
/// Fee as decimal (e.g., 0.003 for 0.3%)
|
|
65
|
+
pub fee: f64,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
impl AMMState {
|
|
69
|
+
pub fn new(pool_yes: f64, pool_no: f64, fee: f64) -> Self {
|
|
70
|
+
let k = pool_yes * pool_no;
|
|
71
|
+
Self { pool_yes, pool_no, k, lp_token_supply: 0.0, fee }
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/// Get the current YES price
|
|
75
|
+
pub fn yes_price(&self) -> f64 {
|
|
76
|
+
let total = self.pool_yes + self.pool_no;
|
|
77
|
+
if total == 0.0 { return 0.5; }
|
|
78
|
+
self.pool_no / total
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/// Get the current NO price
|
|
82
|
+
pub fn no_price(&self) -> f64 {
|
|
83
|
+
1.0 - self.yes_price()
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/// LMSR (Logarithmic Market Scoring Rule) state
|
|
88
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
89
|
+
pub struct LMSRState {
|
|
90
|
+
/// YES shares outstanding
|
|
91
|
+
pub yes_shares: f64,
|
|
92
|
+
/// NO shares outstanding
|
|
93
|
+
pub no_shares: f64,
|
|
94
|
+
/// Liquidity parameter (controls price sensitivity)
|
|
95
|
+
pub b: f64,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
impl LMSRState {
|
|
99
|
+
pub fn new(yes_shares: f64, no_shares: f64, b: f64) -> Self {
|
|
100
|
+
Self { yes_shares, no_shares, b }
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/// Calculate the current YES price using LMSR formula
|
|
104
|
+
pub fn yes_price(&self) -> f64 {
|
|
105
|
+
use std::f64::consts::E;
|
|
106
|
+
let exp_yes = E.powf(self.yes_shares / self.b);
|
|
107
|
+
let exp_no = E.powf(self.no_shares / self.b);
|
|
108
|
+
exp_yes / (exp_yes + exp_no)
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/// Calculate the current NO price
|
|
112
|
+
pub fn no_price(&self) -> f64 {
|
|
113
|
+
1.0 - self.yes_price()
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
/// Result of Kelly criterion calculation
|
|
118
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
119
|
+
pub struct KellyResult {
|
|
120
|
+
/// Full Kelly fraction (0-1)
|
|
121
|
+
pub kelly_fraction: f64,
|
|
122
|
+
/// Half Kelly fraction (more conservative)
|
|
123
|
+
pub half_kelly: f64,
|
|
124
|
+
/// Quarter Kelly fraction (most conservative)
|
|
125
|
+
pub quarter_kelly: f64,
|
|
126
|
+
/// Full bet size in currency units
|
|
127
|
+
pub full_bet_size: f64,
|
|
128
|
+
/// Half bet size in currency units
|
|
129
|
+
pub half_bet_size: f64,
|
|
130
|
+
/// Quarter bet size in currency units
|
|
131
|
+
pub quarter_bet_size: f64,
|
|
132
|
+
/// Expected edge (your_prob - market_price)
|
|
133
|
+
pub edge: f64,
|
|
134
|
+
/// Decimal odds
|
|
135
|
+
pub odds: f64,
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/// Result of VaR (Value at Risk) calculation
|
|
139
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
140
|
+
pub struct VaRResult {
|
|
141
|
+
/// Value at Risk at the given confidence level
|
|
142
|
+
pub var: f64,
|
|
143
|
+
/// Conditional VaR (Expected Shortfall)
|
|
144
|
+
pub cvar: f64,
|
|
145
|
+
/// Confidence level used
|
|
146
|
+
pub confidence_level: f64,
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/// Result of Brier score calculation
|
|
150
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
151
|
+
pub struct BrierScore {
|
|
152
|
+
/// The Brier score (lower is better, 0 = perfect)
|
|
153
|
+
pub score: f64,
|
|
154
|
+
/// Number of predictions evaluated
|
|
155
|
+
pub count: usize,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/// Trading signal
|
|
159
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
|
160
|
+
pub enum Signal {
|
|
161
|
+
StrongBuy = 2,
|
|
162
|
+
Buy = 1,
|
|
163
|
+
Neutral = 0,
|
|
164
|
+
Sell = -1,
|
|
165
|
+
StrongSell = -2,
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
impl Default for Signal {
|
|
169
|
+
fn default() -> Self {
|
|
170
|
+
Signal::Neutral
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/// Indicator result with value and signal
|
|
175
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
|
176
|
+
pub struct IndicatorResult {
|
|
177
|
+
/// The indicator value
|
|
178
|
+
pub value: f64,
|
|
179
|
+
/// Trading signal derived from the indicator
|
|
180
|
+
pub signal: Signal,
|
|
181
|
+
/// Optional upper band (for Bollinger, etc.)
|
|
182
|
+
pub upper_band: Option<f64>,
|
|
183
|
+
/// Optional lower band
|
|
184
|
+
pub lower_band: Option<f64>,
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
impl IndicatorResult {
|
|
188
|
+
pub fn new(value: f64, signal: Signal) -> Self {
|
|
189
|
+
Self { value, signal, upper_band: None, lower_band: None }
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
pub fn with_bands(value: f64, signal: Signal, upper: f64, lower: f64) -> Self {
|
|
193
|
+
Self { value, signal, upper_band: Some(upper), lower_band: Some(lower) }
|
|
194
|
+
}
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
/// Candlestick pattern type
|
|
198
|
+
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
|
|
199
|
+
pub enum CandlePattern {
|
|
200
|
+
Doji,
|
|
201
|
+
Hammer,
|
|
202
|
+
InvertedHammer,
|
|
203
|
+
BullishEngulfing,
|
|
204
|
+
BearishEngulfing,
|
|
205
|
+
MorningStar,
|
|
206
|
+
EveningStar,
|
|
207
|
+
ThreeWhiteSoldiers,
|
|
208
|
+
ThreeBlackCrows,
|
|
209
|
+
ShootingStar,
|
|
210
|
+
HangingMan,
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/// Pattern recognition result
|
|
214
|
+
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
215
|
+
pub struct PatternResult {
|
|
216
|
+
/// The detected pattern
|
|
217
|
+
pub pattern: CandlePattern,
|
|
218
|
+
/// Confidence level (0-1)
|
|
219
|
+
pub confidence: f64,
|
|
220
|
+
/// Index in the data where pattern was found
|
|
221
|
+
pub index: usize,
|
|
222
|
+
/// Whether the pattern is bullish
|
|
223
|
+
pub is_bullish: bool,
|
|
224
|
+
}
|
package/src/utils/mod.rs
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
//! Utility Functions Module
|
|
2
|
+
//!
|
|
3
|
+
//! Common utility functions for quantitative analysis.
|
|
4
|
+
//! This module provides statistical functions, rolling operations,
|
|
5
|
+
//! price extraction, return calculations, normalization, and quantile functions.
|
|
6
|
+
|
|
7
|
+
pub mod normalize;
|
|
8
|
+
pub mod price;
|
|
9
|
+
pub mod quantiles;
|
|
10
|
+
pub mod returns;
|
|
11
|
+
pub mod rolling;
|
|
12
|
+
pub mod stats;
|
|
13
|
+
|
|
14
|
+
// Re-export all public functions for convenient access
|
|
15
|
+
pub use normalize::{
|
|
16
|
+
decimal_scale,
|
|
17
|
+
normalize_min_max,
|
|
18
|
+
normalize_to_range,
|
|
19
|
+
normalize_z_score,
|
|
20
|
+
percent_change,
|
|
21
|
+
robust_scale,
|
|
22
|
+
softmax,
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
pub use price::{
|
|
26
|
+
extract_close,
|
|
27
|
+
extract_high,
|
|
28
|
+
extract_low,
|
|
29
|
+
extract_ohlcv,
|
|
30
|
+
extract_open,
|
|
31
|
+
extract_timestamps,
|
|
32
|
+
extract_typical_price,
|
|
33
|
+
extract_volume,
|
|
34
|
+
extract_weighted_close,
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
pub use quantiles::{
|
|
38
|
+
deciles,
|
|
39
|
+
detect_outliers_iqr,
|
|
40
|
+
five_number_summary,
|
|
41
|
+
iqr,
|
|
42
|
+
percentile,
|
|
43
|
+
quantile,
|
|
44
|
+
quartiles,
|
|
45
|
+
remove_outliers_iqr,
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
pub use returns::{
|
|
49
|
+
absolute_returns,
|
|
50
|
+
annualize_return,
|
|
51
|
+
cumulative_returns,
|
|
52
|
+
deannualize_return,
|
|
53
|
+
log_returns,
|
|
54
|
+
returns_to_prices,
|
|
55
|
+
simple_returns,
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
pub use rolling::{
|
|
59
|
+
rolling,
|
|
60
|
+
rolling_max,
|
|
61
|
+
rolling_mean,
|
|
62
|
+
rolling_min,
|
|
63
|
+
rolling_std_dev,
|
|
64
|
+
rolling_sum,
|
|
65
|
+
rolling_variance,
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
pub use stats::{
|
|
69
|
+
max,
|
|
70
|
+
mean,
|
|
71
|
+
median,
|
|
72
|
+
min,
|
|
73
|
+
mode,
|
|
74
|
+
range,
|
|
75
|
+
std_dev,
|
|
76
|
+
sum,
|
|
77
|
+
variance,
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
// Additional utility functions from TypeScript source
|
|
81
|
+
|
|
82
|
+
/// Pad array at the beginning (for indicator alignment)
|
|
83
|
+
pub fn pad_front<T: Clone>(arr: &[T], padding_length: usize, fill_value: T) -> Vec<T> {
|
|
84
|
+
let mut result = Vec::with_capacity(arr.len() + padding_length);
|
|
85
|
+
for _ in 0..padding_length {
|
|
86
|
+
result.push(fill_value.clone());
|
|
87
|
+
}
|
|
88
|
+
result.extend_from_slice(arr);
|
|
89
|
+
result
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/// Linear interpolation
|
|
93
|
+
pub fn lerp(a: f64, b: f64, t: f64) -> f64 {
|
|
94
|
+
a + (b - a) * t
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/// Interpolate missing values (NaN) in array
|
|
98
|
+
pub fn interpolate_nan(arr: &[f64]) -> Vec<f64> {
|
|
99
|
+
let mut result = arr.to_vec();
|
|
100
|
+
|
|
101
|
+
for i in 0..result.len() {
|
|
102
|
+
if result[i].is_nan() {
|
|
103
|
+
let prev_index = (0..i).rev().find(|&j| !result[j].is_nan());
|
|
104
|
+
let next_index = (i + 1..result.len()).find(|&j| !result[j].is_nan());
|
|
105
|
+
|
|
106
|
+
match (prev_index, next_index) {
|
|
107
|
+
(Some(prev), Some(next)) => {
|
|
108
|
+
let t = (i - prev) as f64 / (next - prev) as f64;
|
|
109
|
+
result[i] = lerp(result[prev], result[next], t);
|
|
110
|
+
}
|
|
111
|
+
(Some(prev), None) => {
|
|
112
|
+
result[i] = result[prev];
|
|
113
|
+
}
|
|
114
|
+
(None, Some(next)) => {
|
|
115
|
+
result[i] = result[next];
|
|
116
|
+
}
|
|
117
|
+
(None, None) => {
|
|
118
|
+
// No valid values to interpolate from
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
result
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/// Check if array contains valid numbers (no NaN)
|
|
128
|
+
pub fn is_valid_array(arr: &[f64]) -> bool {
|
|
129
|
+
!arr.is_empty() && arr.iter().all(|&v| !v.is_nan() && !v.is_infinite())
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
/// Round to specific decimal places
|
|
133
|
+
pub fn round_to(value: f64, decimals: u32) -> f64 {
|
|
134
|
+
let factor = 10_f64.powi(decimals as i32);
|
|
135
|
+
(value * factor).round() / factor
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/// Clamp value within range
|
|
139
|
+
pub fn clamp(value: f64, min_val: f64, max_val: f64) -> f64 {
|
|
140
|
+
value.clamp(min_val, max_val)
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/// Check if two numbers are approximately equal
|
|
144
|
+
pub fn approx_equal(a: f64, b: f64, epsilon: f64) -> bool {
|
|
145
|
+
(a - b).abs() < epsilon
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/// Calculate percentage difference
|
|
149
|
+
pub fn percent_diff(a: f64, b: f64) -> f64 {
|
|
150
|
+
if a == 0.0 && b == 0.0 {
|
|
151
|
+
return 0.0;
|
|
152
|
+
}
|
|
153
|
+
if a == 0.0 {
|
|
154
|
+
return if b > 0.0 { f64::INFINITY } else { f64::NEG_INFINITY };
|
|
155
|
+
}
|
|
156
|
+
((b - a) / a.abs()) * 100.0
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
#[cfg(test)]
|
|
160
|
+
mod tests {
|
|
161
|
+
use super::*;
|
|
162
|
+
|
|
163
|
+
#[test]
|
|
164
|
+
fn test_pad_front() {
|
|
165
|
+
let arr = [1.0, 2.0, 3.0];
|
|
166
|
+
let padded = pad_front(&arr, 2, 0.0);
|
|
167
|
+
assert_eq!(padded, vec![0.0, 0.0, 1.0, 2.0, 3.0]);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
#[test]
|
|
171
|
+
fn test_lerp() {
|
|
172
|
+
assert!((lerp(0.0, 10.0, 0.5) - 5.0).abs() < 1e-10);
|
|
173
|
+
assert!((lerp(0.0, 10.0, 0.0) - 0.0).abs() < 1e-10);
|
|
174
|
+
assert!((lerp(0.0, 10.0, 1.0) - 10.0).abs() < 1e-10);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
#[test]
|
|
178
|
+
fn test_interpolate_nan() {
|
|
179
|
+
let arr = [1.0, f64::NAN, 3.0];
|
|
180
|
+
let result = interpolate_nan(&arr);
|
|
181
|
+
assert!((result[1] - 2.0).abs() < 1e-10);
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
#[test]
|
|
185
|
+
fn test_is_valid_array() {
|
|
186
|
+
assert!(is_valid_array(&[1.0, 2.0, 3.0]));
|
|
187
|
+
assert!(!is_valid_array(&[1.0, f64::NAN, 3.0]));
|
|
188
|
+
assert!(!is_valid_array(&[]));
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
#[test]
|
|
192
|
+
fn test_round_to() {
|
|
193
|
+
assert!((round_to(3.14159, 2) - 3.14).abs() < 1e-10);
|
|
194
|
+
assert!((round_to(3.14159, 4) - 3.1416).abs() < 1e-10);
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
#[test]
|
|
198
|
+
fn test_clamp() {
|
|
199
|
+
assert_eq!(clamp(5.0, 0.0, 10.0), 5.0);
|
|
200
|
+
assert_eq!(clamp(-5.0, 0.0, 10.0), 0.0);
|
|
201
|
+
assert_eq!(clamp(15.0, 0.0, 10.0), 10.0);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
#[test]
|
|
205
|
+
fn test_approx_equal() {
|
|
206
|
+
assert!(approx_equal(1.0, 1.0000000001, 1e-9));
|
|
207
|
+
assert!(!approx_equal(1.0, 1.1, 1e-9));
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
#[test]
|
|
211
|
+
fn test_percent_diff() {
|
|
212
|
+
assert!((percent_diff(100.0, 110.0) - 10.0).abs() < 1e-10);
|
|
213
|
+
assert!((percent_diff(100.0, 90.0) - (-10.0)).abs() < 1e-10);
|
|
214
|
+
}
|
|
215
|
+
}
|