@ebowwa/quant-rust 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/README.md +161 -0
  2. package/bun-ffi.d.ts +54 -0
  3. package/dist/index.js +576 -0
  4. package/dist/src/index.d.ts +324 -0
  5. package/dist/src/index.d.ts.map +1 -0
  6. package/dist/types/index.d.ts +403 -0
  7. package/dist/types/index.d.ts.map +1 -0
  8. package/native/README.md +62 -0
  9. package/native/darwin-arm64/libquant_rust.dylib +0 -0
  10. package/package.json +70 -0
  11. package/scripts/postinstall.cjs +85 -0
  12. package/src/ffi.rs +496 -0
  13. package/src/index.ts +1073 -0
  14. package/src/indicators/ma.rs +222 -0
  15. package/src/indicators/mod.rs +18 -0
  16. package/src/indicators/momentum.rs +353 -0
  17. package/src/indicators/sr.rs +195 -0
  18. package/src/indicators/trend.rs +351 -0
  19. package/src/indicators/volatility.rs +270 -0
  20. package/src/indicators/volume.rs +213 -0
  21. package/src/lib.rs +130 -0
  22. package/src/patterns/breakout.rs +431 -0
  23. package/src/patterns/chart.rs +772 -0
  24. package/src/patterns/mod.rs +394 -0
  25. package/src/patterns/sr.rs +423 -0
  26. package/src/prediction/amm.rs +338 -0
  27. package/src/prediction/arbitrage.rs +230 -0
  28. package/src/prediction/calibration.rs +317 -0
  29. package/src/prediction/kelly.rs +232 -0
  30. package/src/prediction/lmsr.rs +194 -0
  31. package/src/prediction/mod.rs +59 -0
  32. package/src/prediction/odds.rs +229 -0
  33. package/src/prediction/pnl.rs +254 -0
  34. package/src/prediction/risk.rs +228 -0
  35. package/src/risk/beta.rs +257 -0
  36. package/src/risk/drawdown.rs +256 -0
  37. package/src/risk/leverage.rs +201 -0
  38. package/src/risk/mod.rs +388 -0
  39. package/src/risk/portfolio.rs +287 -0
  40. package/src/risk/ratios.rs +290 -0
  41. package/src/risk/sizing.rs +194 -0
  42. package/src/risk/var.rs +222 -0
  43. package/src/stats/cdf.rs +257 -0
  44. package/src/stats/correlation.rs +225 -0
  45. package/src/stats/distribution.rs +194 -0
  46. package/src/stats/hypothesis.rs +177 -0
  47. package/src/stats/matrix.rs +346 -0
  48. package/src/stats/mod.rs +257 -0
  49. package/src/stats/regression.rs +239 -0
  50. package/src/stats/rolling.rs +193 -0
  51. package/src/stats/timeseries.rs +263 -0
  52. package/src/types.rs +224 -0
  53. package/src/utils/mod.rs +215 -0
  54. package/src/utils/normalize.rs +192 -0
  55. package/src/utils/price.rs +167 -0
  56. package/src/utils/quantiles.rs +177 -0
  57. package/src/utils/returns.rs +158 -0
  58. package/src/utils/rolling.rs +97 -0
  59. package/src/utils/stats.rs +154 -0
  60. package/types/index.ts +513 -0
@@ -0,0 +1,177 @@
1
+ //! Hypothesis Testing Module
2
+ //!
3
+ //! Statistical hypothesis testing including t-tests and ADF test
4
+
5
+ use serde::{Deserialize, Serialize};
6
+ use crate::utils::{mean, variance, std_dev};
7
+ use super::regression::linear_regression;
8
+ use super::cdf::{normal_cdf, t_cdf};
9
+
10
+ /// T-test result
11
+ #[derive(Debug, Clone, Serialize, Deserialize)]
12
+ pub struct TTestResult {
13
+ /// T-statistic
14
+ pub t_statistic: f64,
15
+ /// P-value (two-tailed)
16
+ pub p_value: f64,
17
+ /// Degrees of freedom
18
+ pub df: f64,
19
+ }
20
+
21
+ /// Augmented Dickey-Fuller test result
22
+ #[derive(Debug, Clone, Serialize, Deserialize)]
23
+ pub struct ADFResult {
24
+ /// Test statistic
25
+ pub t_statistic: f64,
26
+ /// Critical value (5% level)
27
+ pub critical_value: f64,
28
+ /// Whether the series is stationary
29
+ pub is_stationary: bool,
30
+ }
31
+
32
+ /// One-sample t-test
33
+ pub fn one_sample_t_test(sample: &[f64], mu: f64) -> TTestResult {
34
+ let n = sample.len();
35
+
36
+ if n < 2 {
37
+ return TTestResult {
38
+ t_statistic: 0.0,
39
+ p_value: 1.0,
40
+ df: 0.0,
41
+ };
42
+ }
43
+
44
+ let sample_mean = mean(sample);
45
+ let sample_sd = std_dev(sample, false);
46
+
47
+ let t = (sample_mean - mu) / (sample_sd / (n as f64).sqrt());
48
+ let p_value = 2.0 * (1.0 - normal_cdf(t.abs()));
49
+
50
+ TTestResult {
51
+ t_statistic: t,
52
+ p_value: p_value.clamp(0.0, 1.0),
53
+ df: (n - 1) as f64,
54
+ }
55
+ }
56
+
57
+ /// Two-sample t-test (independent, Welch's t-test)
58
+ pub fn two_sample_t_test(sample1: &[f64], sample2: &[f64]) -> TTestResult {
59
+ let n1 = sample1.len();
60
+ let n2 = sample2.len();
61
+
62
+ if n1 < 2 || n2 < 2 {
63
+ return TTestResult {
64
+ t_statistic: 0.0,
65
+ p_value: 1.0,
66
+ df: 0.0,
67
+ };
68
+ }
69
+
70
+ let mean1 = mean(sample1);
71
+ let mean2 = mean(sample2);
72
+ let var1 = variance(sample1, false);
73
+ let var2 = variance(sample2, false);
74
+
75
+ // Welch's t-test (unequal variances)
76
+ let se = (var1 / n1 as f64 + var2 / n2 as f64).sqrt();
77
+ let t = if se > 0.0 { (mean1 - mean2) / se } else { 0.0 };
78
+
79
+ // Degrees of freedom (Welch-Satterthwaite)
80
+ let numer = (var1 / n1 as f64 + var2 / n2 as f64).powi(2);
81
+ let denom = (var1 / n1 as f64).powi(2) / (n1 - 1) as f64
82
+ + (var2 / n2 as f64).powi(2) / (n2 - 1) as f64;
83
+ let df = if denom > 0.0 { numer / denom } else { 0.0 };
84
+
85
+ let p_value = 2.0 * (1.0 - t_cdf(t.abs(), df));
86
+
87
+ TTestResult {
88
+ t_statistic: t,
89
+ p_value: p_value.clamp(0.0, 1.0),
90
+ df,
91
+ }
92
+ }
93
+
94
+ /// Augmented Dickey-Fuller test for stationarity (simplified)
95
+ pub fn adf_test(series: &[f64]) -> ADFResult {
96
+ if series.len() < 3 {
97
+ return ADFResult {
98
+ t_statistic: 0.0,
99
+ critical_value: -2.86,
100
+ is_stationary: false,
101
+ };
102
+ }
103
+
104
+ // Simplified ADF: regress diff(y) on lag(y)
105
+ let n = series.len();
106
+ let dy: Vec<f64> = (1..n).map(|i| series[i] - series[i - 1]).collect();
107
+ let lag_y: Vec<f64> = series[..n - 1].to_vec();
108
+
109
+ let reg = linear_regression(&lag_y, &dy);
110
+
111
+ // Critical value approximation (for large n, 5% level)
112
+ let critical_value = -2.86;
113
+ let is_stationary = reg.slope < critical_value;
114
+
115
+ ADFResult {
116
+ t_statistic: reg.slope,
117
+ critical_value,
118
+ is_stationary,
119
+ }
120
+ }
121
+
122
+ #[cfg(test)]
123
+ mod tests {
124
+ use super::*;
125
+
126
+ #[test]
127
+ fn test_one_sample_t_test() {
128
+ let sample = [5.0, 5.5, 6.0, 6.5, 7.0];
129
+ let result = one_sample_t_test(&sample, 5.0);
130
+
131
+ assert!(result.t_statistic > 0.0);
132
+ assert!(result.p_value < 1.0);
133
+ assert_eq!(result.df as usize, 4);
134
+ }
135
+
136
+ #[test]
137
+ fn test_two_sample_t_test() {
138
+ let sample1 = [1.0, 2.0, 3.0, 4.0, 5.0];
139
+ let sample2 = [6.0, 7.0, 8.0, 9.0, 10.0];
140
+ let result = two_sample_t_test(&sample1, &sample2);
141
+
142
+ assert!(result.t_statistic < 0.0);
143
+ assert!(result.p_value < 0.05); // Should be significant
144
+ }
145
+
146
+ #[test]
147
+ fn test_two_sample_t_test_equal() {
148
+ let sample1 = [1.0, 2.0, 3.0, 4.0, 5.0];
149
+ let sample2 = [1.0, 2.0, 3.0, 4.0, 5.0];
150
+ let result = two_sample_t_test(&sample1, &sample2);
151
+
152
+ assert!(result.t_statistic.abs() < 1e-10);
153
+ assert!(result.p_value > 0.99); // Should not be significant
154
+ }
155
+
156
+ #[test]
157
+ fn test_adf_test() {
158
+ // Random walk (non-stationary)
159
+ let mut rw = vec![0.0];
160
+ for i in 1..100 {
161
+ rw.push(rw[i - 1] + (rand_random() - 0.5));
162
+ }
163
+
164
+ // This is a simplified test - results depend on the series
165
+ let _result = adf_test(&rw);
166
+ }
167
+
168
+ // Simple random function for testing
169
+ fn rand_random() -> f64 {
170
+ use std::time::{SystemTime, UNIX_EPOCH};
171
+ let nanos = SystemTime::now()
172
+ .duration_since(UNIX_EPOCH)
173
+ .unwrap()
174
+ .subsec_nanos();
175
+ (nanos as f64 / 1_000_000_000.0).fract()
176
+ }
177
+ }
@@ -0,0 +1,346 @@
1
+ //! Matrix Operations Module
2
+ //!
3
+ //! Internal matrix operations for regression and correlation analysis
4
+
5
+ /// Transpose a matrix
6
+ pub fn transpose(m: &[Vec<f64>]) -> Vec<Vec<f64>> {
7
+ if m.is_empty() {
8
+ return Vec::new();
9
+ }
10
+
11
+ let rows = m.len();
12
+ let cols = m[0].len();
13
+
14
+ if cols == 0 {
15
+ return vec![Vec::new(); rows];
16
+ }
17
+
18
+ (0..cols)
19
+ .map(|j| (0..rows).map(|i| m[i][j]).collect())
20
+ .collect()
21
+ }
22
+
23
+ /// Matrix multiplication (A * B)
24
+ pub fn matrix_multiply(a: &[Vec<f64>], b: &[Vec<f64>]) -> Vec<Vec<f64>> {
25
+ if a.is_empty() || b.is_empty() {
26
+ return Vec::new();
27
+ }
28
+
29
+ let a_rows = a.len();
30
+ let a_cols = a[0].len();
31
+ let b_rows = b.len();
32
+ let b_cols = b[0].len();
33
+
34
+ if a_cols != b_rows {
35
+ return Vec::new();
36
+ }
37
+
38
+ let mut result = vec![vec![0.0; b_cols]; a_rows];
39
+
40
+ for i in 0..a_rows {
41
+ for j in 0..b_cols {
42
+ let mut sum = 0.0;
43
+ for k in 0..a_cols {
44
+ sum += a[i][k] * b[k][j];
45
+ }
46
+ result[i][j] = sum;
47
+ }
48
+ }
49
+
50
+ result
51
+ }
52
+
53
+ /// Matrix-vector multiplication (M * v)
54
+ pub fn matrix_vector_multiply(m: &[Vec<f64>], v: &[f64]) -> Vec<f64> {
55
+ if m.is_empty() || v.is_empty() {
56
+ return Vec::new();
57
+ }
58
+
59
+ let rows = m.len();
60
+ let cols = m[0].len();
61
+
62
+ if cols != v.len() {
63
+ return vec![0.0; rows];
64
+ }
65
+
66
+ (0..rows)
67
+ .map(|i| m[i].iter().zip(v.iter()).map(|(&m_val, &v_val)| m_val * v_val).sum())
68
+ .collect()
69
+ }
70
+
71
+ /// Matrix inverse using Gauss-Jordan elimination
72
+ pub fn matrix_inverse(m: &[Vec<f64>]) -> Option<Vec<Vec<f64>>> {
73
+ if m.is_empty() {
74
+ return None;
75
+ }
76
+
77
+ let n = m.len();
78
+
79
+ // Check if square
80
+ for row in m {
81
+ if row.len() != n {
82
+ return None;
83
+ }
84
+ }
85
+
86
+ // Create augmented matrix [M | I]
87
+ let mut augmented: Vec<Vec<f64>> = m.iter()
88
+ .enumerate()
89
+ .map(|(i, row)| {
90
+ let mut aug_row = row.clone();
91
+ aug_row.extend((0..n).map(|j| if i == j { 1.0 } else { 0.0 }));
92
+ aug_row
93
+ })
94
+ .collect();
95
+
96
+ // Forward elimination with partial pivoting
97
+ for i in 0..n {
98
+ // Find pivot
99
+ let mut max_row = i;
100
+ for k in (i + 1)..n {
101
+ if augmented[k][i].abs() > augmented[max_row][i].abs() {
102
+ max_row = k;
103
+ }
104
+ }
105
+
106
+ // Swap rows
107
+ augmented.swap(i, max_row);
108
+
109
+ // Check for singularity
110
+ if augmented[i][i].abs() < 1e-10 {
111
+ return None;
112
+ }
113
+
114
+ // Scale pivot row
115
+ let pivot = augmented[i][i];
116
+ for j in 0..(2 * n) {
117
+ augmented[i][j] /= pivot;
118
+ }
119
+
120
+ // Eliminate column
121
+ for k in 0..n {
122
+ if k != i {
123
+ let factor = augmented[k][i];
124
+ for j in 0..(2 * n) {
125
+ augmented[k][j] -= factor * augmented[i][j];
126
+ }
127
+ }
128
+ }
129
+ }
130
+
131
+ // Extract inverse from augmented matrix
132
+ Some(
133
+ augmented
134
+ .iter()
135
+ .map(|row| row[n..].to_vec())
136
+ .collect()
137
+ )
138
+ }
139
+
140
+ /// Calculate determinant using LU decomposition
141
+ pub fn determinant(m: &[Vec<f64>]) -> f64 {
142
+ if m.is_empty() {
143
+ return 0.0;
144
+ }
145
+
146
+ let n = m.len();
147
+
148
+ // Check if square
149
+ for row in m {
150
+ if row.len() != n {
151
+ return 0.0;
152
+ }
153
+ }
154
+
155
+ // Create a copy
156
+ let mut lu: Vec<Vec<f64>> = m.to_vec();
157
+ let mut det = 1.0;
158
+
159
+ for i in 0..n {
160
+ // Find pivot
161
+ let mut max_row = i;
162
+ for k in (i + 1)..n {
163
+ if lu[k][i].abs() > lu[max_row][i].abs() {
164
+ max_row = k;
165
+ }
166
+ }
167
+
168
+ if max_row != i {
169
+ lu.swap(i, max_row);
170
+ det *= -1.0;
171
+ }
172
+
173
+ if lu[i][i].abs() < 1e-10 {
174
+ return 0.0;
175
+ }
176
+
177
+ det *= lu[i][i];
178
+
179
+ for k in (i + 1)..n {
180
+ let factor = lu[k][i] / lu[i][i];
181
+ for j in (i + 1)..n {
182
+ lu[k][j] -= factor * lu[i][j];
183
+ }
184
+ }
185
+ }
186
+
187
+ det
188
+ }
189
+
190
+ /// Solve linear system Ax = b using LU decomposition
191
+ pub fn solve_linear_system(a: &[Vec<f64>], b: &[f64]) -> Option<Vec<f64>> {
192
+ if a.is_empty() || b.is_empty() {
193
+ return None;
194
+ }
195
+
196
+ let n = a.len();
197
+ if n != b.len() {
198
+ return None;
199
+ }
200
+
201
+ // Check if square
202
+ for row in a {
203
+ if row.len() != n {
204
+ return None;
205
+ }
206
+ }
207
+
208
+ // Create augmented matrix [A | b]
209
+ let mut aug: Vec<Vec<f64>> = a.iter()
210
+ .zip(b.iter())
211
+ .map(|(row, &b_val)| {
212
+ let mut aug_row = row.clone();
213
+ aug_row.push(b_val);
214
+ aug_row
215
+ })
216
+ .collect();
217
+
218
+ // Forward elimination with partial pivoting
219
+ for i in 0..n {
220
+ // Find pivot
221
+ let mut max_row = i;
222
+ for k in (i + 1)..n {
223
+ if aug[k][i].abs() > aug[max_row][i].abs() {
224
+ max_row = k;
225
+ }
226
+ }
227
+
228
+ aug.swap(i, max_row);
229
+
230
+ if aug[i][i].abs() < 1e-10 {
231
+ return None;
232
+ }
233
+
234
+ // Eliminate
235
+ for k in (i + 1)..n {
236
+ let factor = aug[k][i] / aug[i][i];
237
+ for j in i..=n {
238
+ aug[k][j] -= factor * aug[i][j];
239
+ }
240
+ }
241
+ }
242
+
243
+ // Back substitution
244
+ let mut x = vec![0.0; n];
245
+ for i in (0..n).rev() {
246
+ let mut sum = aug[i][n];
247
+ for j in (i + 1)..n {
248
+ sum -= aug[i][j] * x[j];
249
+ }
250
+ x[i] = sum / aug[i][i];
251
+ }
252
+
253
+ Some(x)
254
+ }
255
+
256
+ #[cfg(test)]
257
+ mod tests {
258
+ use super::*;
259
+
260
+ #[test]
261
+ fn test_transpose() {
262
+ let m = vec![
263
+ vec![1.0, 2.0, 3.0],
264
+ vec![4.0, 5.0, 6.0],
265
+ ];
266
+ let t = transpose(&m);
267
+
268
+ assert_eq!(t.len(), 3);
269
+ assert_eq!(t[0], vec![1.0, 4.0]);
270
+ assert_eq!(t[1], vec![2.0, 5.0]);
271
+ assert_eq!(t[2], vec![3.0, 6.0]);
272
+ }
273
+
274
+ #[test]
275
+ fn test_matrix_multiply() {
276
+ let a = vec![
277
+ vec![1.0, 2.0],
278
+ vec![3.0, 4.0],
279
+ ];
280
+ let b = vec![
281
+ vec![5.0, 6.0],
282
+ vec![7.0, 8.0],
283
+ ];
284
+ let c = matrix_multiply(&a, &b);
285
+
286
+ assert_eq!(c, vec![
287
+ vec![19.0, 22.0],
288
+ vec![43.0, 50.0],
289
+ ]);
290
+ }
291
+
292
+ #[test]
293
+ fn test_matrix_vector_multiply() {
294
+ let m = vec![
295
+ vec![1.0, 2.0],
296
+ vec![3.0, 4.0],
297
+ ];
298
+ let v = vec![2.0, 3.0];
299
+ let result = matrix_vector_multiply(&m, &v);
300
+
301
+ assert_eq!(result, vec![8.0, 18.0]);
302
+ }
303
+
304
+ #[test]
305
+ fn test_matrix_inverse() {
306
+ let m = vec![
307
+ vec![4.0, 7.0],
308
+ vec![2.0, 6.0],
309
+ ];
310
+ let inv = matrix_inverse(&m).unwrap();
311
+
312
+ // Check that M * M^-1 = I
313
+ let product = matrix_multiply(&m, &inv);
314
+
315
+ assert!((product[0][0] - 1.0).abs() < 1e-10);
316
+ assert!(product[0][1].abs() < 1e-10);
317
+ assert!(product[1][0].abs() < 1e-10);
318
+ assert!((product[1][1] - 1.0).abs() < 1e-10);
319
+ }
320
+
321
+ #[test]
322
+ fn test_determinant() {
323
+ let m = vec![
324
+ vec![1.0, 2.0],
325
+ vec![3.0, 4.0],
326
+ ];
327
+ let det = determinant(&m);
328
+
329
+ // det = 1*4 - 2*3 = -2
330
+ assert!((det - (-2.0)).abs() < 1e-10);
331
+ }
332
+
333
+ #[test]
334
+ fn test_solve_linear_system() {
335
+ let a = vec![
336
+ vec![2.0, 1.0],
337
+ vec![1.0, 3.0],
338
+ ];
339
+ let b = vec![5.0, 10.0];
340
+ let x = solve_linear_system(&a, &b).unwrap();
341
+
342
+ // 2x + y = 5, x + 3y = 10 => x = 1, y = 3
343
+ assert!((x[0] - 1.0).abs() < 1e-10);
344
+ assert!((x[1] - 3.0).abs() < 1e-10);
345
+ }
346
+ }