@ebowwa/quant-rust 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/README.md +161 -0
  2. package/bun-ffi.d.ts +54 -0
  3. package/dist/index.js +576 -0
  4. package/dist/src/index.d.ts +324 -0
  5. package/dist/src/index.d.ts.map +1 -0
  6. package/dist/types/index.d.ts +403 -0
  7. package/dist/types/index.d.ts.map +1 -0
  8. package/native/README.md +62 -0
  9. package/native/darwin-arm64/libquant_rust.dylib +0 -0
  10. package/package.json +70 -0
  11. package/scripts/postinstall.cjs +85 -0
  12. package/src/ffi.rs +496 -0
  13. package/src/index.ts +1073 -0
  14. package/src/indicators/ma.rs +222 -0
  15. package/src/indicators/mod.rs +18 -0
  16. package/src/indicators/momentum.rs +353 -0
  17. package/src/indicators/sr.rs +195 -0
  18. package/src/indicators/trend.rs +351 -0
  19. package/src/indicators/volatility.rs +270 -0
  20. package/src/indicators/volume.rs +213 -0
  21. package/src/lib.rs +130 -0
  22. package/src/patterns/breakout.rs +431 -0
  23. package/src/patterns/chart.rs +772 -0
  24. package/src/patterns/mod.rs +394 -0
  25. package/src/patterns/sr.rs +423 -0
  26. package/src/prediction/amm.rs +338 -0
  27. package/src/prediction/arbitrage.rs +230 -0
  28. package/src/prediction/calibration.rs +317 -0
  29. package/src/prediction/kelly.rs +232 -0
  30. package/src/prediction/lmsr.rs +194 -0
  31. package/src/prediction/mod.rs +59 -0
  32. package/src/prediction/odds.rs +229 -0
  33. package/src/prediction/pnl.rs +254 -0
  34. package/src/prediction/risk.rs +228 -0
  35. package/src/risk/beta.rs +257 -0
  36. package/src/risk/drawdown.rs +256 -0
  37. package/src/risk/leverage.rs +201 -0
  38. package/src/risk/mod.rs +388 -0
  39. package/src/risk/portfolio.rs +287 -0
  40. package/src/risk/ratios.rs +290 -0
  41. package/src/risk/sizing.rs +194 -0
  42. package/src/risk/var.rs +222 -0
  43. package/src/stats/cdf.rs +257 -0
  44. package/src/stats/correlation.rs +225 -0
  45. package/src/stats/distribution.rs +194 -0
  46. package/src/stats/hypothesis.rs +177 -0
  47. package/src/stats/matrix.rs +346 -0
  48. package/src/stats/mod.rs +257 -0
  49. package/src/stats/regression.rs +239 -0
  50. package/src/stats/rolling.rs +193 -0
  51. package/src/stats/timeseries.rs +263 -0
  52. package/src/types.rs +224 -0
  53. package/src/utils/mod.rs +215 -0
  54. package/src/utils/normalize.rs +192 -0
  55. package/src/utils/price.rs +167 -0
  56. package/src/utils/quantiles.rs +177 -0
  57. package/src/utils/returns.rs +158 -0
  58. package/src/utils/rolling.rs +97 -0
  59. package/src/utils/stats.rs +154 -0
  60. package/types/index.ts +513 -0
@@ -0,0 +1,317 @@
1
+ //! # Forecast Calibration and Scoring
2
+ //!
3
+ //! Tools for evaluating probability forecasts.
4
+
5
+ /// A single prediction with outcome
6
+ #[derive(Debug, Clone, Copy)]
7
+ pub struct Prediction {
8
+ /// Predicted probability (0-1)
9
+ pub predicted: f64,
10
+ /// Actual outcome (0 or 1)
11
+ pub actual: f64,
12
+ }
13
+
14
+ impl Prediction {
15
+ /// Create a new prediction
16
+ pub fn new(predicted: f64, actual: bool) -> Self {
17
+ Self {
18
+ predicted,
19
+ actual: if actual { 1.0 } else { 0.0 },
20
+ }
21
+ }
22
+ }
23
+
24
+ /// A calibration bucket for a range of predicted probabilities
25
+ #[derive(Debug, Clone)]
26
+ pub struct CalibrationBucket {
27
+ /// Range of predicted probabilities (min, max)
28
+ pub predicted_range: (f64, f64),
29
+ /// Number of predictions in this bucket
30
+ pub count: usize,
31
+ /// Average predicted probability
32
+ pub avg_predicted: f64,
33
+ /// Average actual frequency (observed)
34
+ pub avg_actual: f64,
35
+ }
36
+
37
+ /// Result of calibration analysis
38
+ #[derive(Debug, Clone)]
39
+ pub struct CalibrationResult {
40
+ /// Calibration buckets
41
+ pub buckets: Vec<CalibrationBucket>,
42
+ /// Brier score (lower is better, 0 = perfect)
43
+ pub brier_score: f64,
44
+ /// Log loss (lower is better)
45
+ pub log_loss: f64,
46
+ /// Mean absolute calibration error
47
+ pub calibration_error: f64,
48
+ }
49
+
50
+ const EPSILON: f64 = 1e-15;
51
+
52
+ /// Calculate Brier score for probability forecasts.
53
+ ///
54
+ /// Brier score = mean((predicted - actual)^2)
55
+ ///
56
+ /// Lower is better: 0 = perfect, 0.25 = random for binary
57
+ ///
58
+ /// # Arguments
59
+ /// * `predictions` - Slice of predictions
60
+ ///
61
+ /// # Returns
62
+ /// Brier score
63
+ ///
64
+ /// # Example
65
+ /// ```
66
+ /// use quant_rust::prediction::calibration::{brier_score, Prediction};
67
+ /// let predictions = vec![
68
+ /// Prediction::new(0.8, true),
69
+ /// Prediction::new(0.3, false),
70
+ /// ];
71
+ /// let score = brier_score(&predictions);
72
+ /// assert!(score < 0.25); // Better than random
73
+ /// ```
74
+ pub fn brier_score(predictions: &[Prediction]) -> f64 {
75
+ if predictions.is_empty() {
76
+ return 0.0;
77
+ }
78
+
79
+ let sum_squared_errors: f64 = predictions
80
+ .iter()
81
+ .map(|p| (p.predicted - p.actual).powi(2))
82
+ .sum();
83
+
84
+ sum_squared_errors / predictions.len() as f64
85
+ }
86
+
87
+ /// Calculate log loss for probability forecasts.
88
+ ///
89
+ /// Log loss = -mean(actual * log(pred) + (1-actual) * log(1-pred))
90
+ ///
91
+ /// Lower is better. Penalizes confident wrong predictions heavily.
92
+ ///
93
+ /// # Arguments
94
+ /// * `predictions` - Slice of predictions
95
+ ///
96
+ /// # Returns
97
+ /// Log loss
98
+ ///
99
+ /// # Example
100
+ /// ```
101
+ /// use quant_rust::prediction::calibration::{log_loss, Prediction};
102
+ /// let predictions = vec![
103
+ /// Prediction::new(0.9, true),
104
+ /// Prediction::new(0.1, false),
105
+ /// ];
106
+ /// let loss = log_loss(&predictions);
107
+ /// assert!(loss < 0.5); // Good predictions
108
+ /// ```
109
+ pub fn log_loss(predictions: &[Prediction]) -> f64 {
110
+ if predictions.is_empty() {
111
+ return 0.0;
112
+ }
113
+
114
+ let sum_log_loss: f64 = predictions
115
+ .iter()
116
+ .map(|p| {
117
+ // Clamp to prevent log(0)
118
+ let pred = p.predicted.clamp(EPSILON, 1.0 - EPSILON);
119
+ let actual = p.actual;
120
+ -(actual * pred.ln() + (1.0 - actual) * (1.0 - pred).ln())
121
+ })
122
+ .sum();
123
+
124
+ sum_log_loss / predictions.len() as f64
125
+ }
126
+
127
+ /// Calculate calibration metrics.
128
+ ///
129
+ /// Groups predictions into buckets and compares predicted vs observed frequencies.
130
+ ///
131
+ /// # Arguments
132
+ /// * `predictions` - Slice of predictions
133
+ ///
134
+ /// # Returns
135
+ /// CalibrationResult with buckets and scores
136
+ ///
137
+ /// # Example
138
+ /// ```
139
+ /// use quant_rust::prediction::calibration::{calculate_calibration, Prediction};
140
+ /// let predictions = vec![
141
+ /// Prediction::new(0.1, false),
142
+ /// Prediction::new(0.1, false),
143
+ /// Prediction::new(0.1, true), // 33% actual in 10% bucket
144
+ /// Prediction::new(0.9, true),
145
+ /// Prediction::new(0.9, true),
146
+ /// Prediction::new(0.9, false), // 67% actual in 90% bucket
147
+ /// ];
148
+ /// let result = calculate_calibration(&predictions);
149
+ /// assert!(result.buckets.len() > 0);
150
+ /// ```
151
+ pub fn calculate_calibration(predictions: &[Prediction]) -> CalibrationResult {
152
+ const NUM_BUCKETS: usize = 10;
153
+
154
+ // Group predictions into buckets
155
+ let mut buckets: Vec<(Vec<f64>, Vec<f64>)> = vec![(vec![], vec![]); NUM_BUCKETS];
156
+
157
+ for p in predictions {
158
+ // Clamp predicted to [0, 1) to ensure valid bucket index
159
+ let clamped = p.predicted.clamp(0.0, 0.9999999);
160
+ let bucket_index = (clamped * NUM_BUCKETS as f64) as usize;
161
+ let bucket_index = bucket_index.min(NUM_BUCKETS - 1);
162
+ buckets[bucket_index].0.push(p.predicted);
163
+ buckets[bucket_index].1.push(p.actual);
164
+ }
165
+
166
+ // Build calibration buckets
167
+ let mut calibration_buckets: Vec<CalibrationBucket> = Vec::new();
168
+
169
+ for i in 0..NUM_BUCKETS {
170
+ let (predicted, actual) = &buckets[i];
171
+ if !predicted.is_empty() {
172
+ let avg_predicted: f64 = predicted.iter().sum::<f64>() / predicted.len() as f64;
173
+ let avg_actual: f64 = actual.iter().sum::<f64>() / actual.len() as f64;
174
+
175
+ calibration_buckets.push(CalibrationBucket {
176
+ predicted_range: (
177
+ i as f64 / NUM_BUCKETS as f64,
178
+ (i + 1) as f64 / NUM_BUCKETS as f64,
179
+ ),
180
+ count: predicted.len(),
181
+ avg_predicted,
182
+ avg_actual,
183
+ });
184
+ }
185
+ }
186
+
187
+ // Calculate calibration error = mean |predicted - observed|
188
+ let calibration_error = if calibration_buckets.is_empty() {
189
+ 0.0
190
+ } else {
191
+ let sum: f64 = calibration_buckets
192
+ .iter()
193
+ .map(|b| (b.avg_predicted - b.avg_actual).abs())
194
+ .sum();
195
+ sum / calibration_buckets.len() as f64
196
+ };
197
+
198
+ CalibrationResult {
199
+ buckets: calibration_buckets,
200
+ brier_score: brier_score(predictions),
201
+ log_loss: log_loss(predictions),
202
+ calibration_error,
203
+ }
204
+ }
205
+
206
+ #[cfg(test)]
207
+ mod tests {
208
+ use super::*;
209
+
210
+ #[test]
211
+ fn test_brier_score_perfect() {
212
+ let predictions = vec![
213
+ Prediction::new(1.0, true),
214
+ Prediction::new(0.0, false),
215
+ Prediction::new(1.0, true),
216
+ ];
217
+ assert!((brier_score(&predictions) - 0.0).abs() < 1e-10);
218
+ }
219
+
220
+ #[test]
221
+ fn test_brier_score_random() {
222
+ // Random predictions should give ~0.25
223
+ let predictions = vec![
224
+ Prediction::new(0.5, true),
225
+ Prediction::new(0.5, false),
226
+ Prediction::new(0.5, true),
227
+ Prediction::new(0.5, false),
228
+ ];
229
+ assert!((brier_score(&predictions) - 0.25).abs() < 1e-10);
230
+ }
231
+
232
+ #[test]
233
+ fn test_brier_score_worst() {
234
+ // Confident and wrong
235
+ let predictions = vec![
236
+ Prediction::new(1.0, false),
237
+ Prediction::new(0.0, true),
238
+ ];
239
+ assert!((brier_score(&predictions) - 1.0).abs() < 1e-10);
240
+ }
241
+
242
+ #[test]
243
+ fn test_brier_score_empty() {
244
+ let predictions: Vec<Prediction> = vec![];
245
+ assert_eq!(brier_score(&predictions), 0.0);
246
+ }
247
+
248
+ #[test]
249
+ fn test_log_loss_perfect() {
250
+ let predictions = vec![
251
+ Prediction::new(0.999, true),
252
+ Prediction::new(0.001, false),
253
+ ];
254
+ let loss = log_loss(&predictions);
255
+ assert!(loss < 0.01);
256
+ }
257
+
258
+ #[test]
259
+ fn test_log_loss_worst() {
260
+ // Confident and wrong
261
+ let predictions = vec![
262
+ Prediction::new(0.001, true),
263
+ Prediction::new(0.999, false),
264
+ ];
265
+ let loss = log_loss(&predictions);
266
+ assert!(loss > 5.0); // Very high loss
267
+ }
268
+
269
+ #[test]
270
+ fn test_log_loss_empty() {
271
+ let predictions: Vec<Prediction> = vec![];
272
+ assert_eq!(log_loss(&predictions), 0.0);
273
+ }
274
+
275
+ #[test]
276
+ fn test_calibrate_buckets() {
277
+ let predictions = vec![
278
+ Prediction::new(0.05, false),
279
+ Prediction::new(0.05, false),
280
+ Prediction::new(0.05, true),
281
+ Prediction::new(0.95, true),
282
+ Prediction::new(0.95, true),
283
+ Prediction::new(0.95, false),
284
+ ];
285
+ let result = calculate_calibration(&predictions);
286
+ assert!(result.buckets.len() >= 2);
287
+
288
+ // Find 0-10% bucket
289
+ let low_bucket = result.buckets.iter().find(|b| b.predicted_range.0 == 0.0);
290
+ assert!(low_bucket.is_some());
291
+ let low = low_bucket.unwrap();
292
+ assert_eq!(low.count, 3);
293
+ assert!((low.avg_actual - 0.333).abs() < 0.01);
294
+
295
+ // Find 90-100% bucket
296
+ let high_bucket = result.buckets.iter().find(|b| b.predicted_range.0 == 0.9);
297
+ assert!(high_bucket.is_some());
298
+ let high = high_bucket.unwrap();
299
+ assert_eq!(high.count, 3);
300
+ assert!((high.avg_actual - 0.667).abs() < 0.01);
301
+ }
302
+
303
+ #[test]
304
+ fn test_calibration_error() {
305
+ // Perfectly calibrated
306
+ let predictions: Vec<Prediction> = (0..100)
307
+ .map(|i| {
308
+ let prob = (i as f64) / 100.0;
309
+ // Simulate outcomes matching predictions
310
+ Prediction::new(prob, prob > 0.5)
311
+ })
312
+ .collect();
313
+ let result = calculate_calibration(&predictions);
314
+ // With deterministic outcomes, calibration should be reasonably good
315
+ assert!(result.calibration_error < 0.5);
316
+ }
317
+ }
@@ -0,0 +1,232 @@
1
+ //! # Kelly Criterion for Prediction Markets
2
+ //!
3
+ //! Optimal position sizing for binary outcome bets.
4
+
5
+ /// Result of Kelly criterion calculation
6
+ #[derive(Debug, Clone, Copy)]
7
+ pub struct KellyResult {
8
+ /// Optimal fraction of bankroll (0-1)
9
+ pub kelly_fraction: f64,
10
+ /// Conservative half-Kelly fraction
11
+ pub half_kelly: f64,
12
+ /// Very conservative quarter-Kelly fraction
13
+ pub quarter_kelly: f64,
14
+ /// Full bet size in currency units
15
+ pub full_bet_size: f64,
16
+ /// Half-Kelly bet size
17
+ pub half_bet_size: f64,
18
+ /// Quarter-Kelly bet size
19
+ pub quarter_bet_size: f64,
20
+ /// Your estimated edge (your_prob - market_price)
21
+ pub edge: f64,
22
+ /// Market odds (win_amount / loss_amount)
23
+ pub odds: f64,
24
+ }
25
+
26
+ /// A betting opportunity for multi-bet Kelly
27
+ #[derive(Debug, Clone, Copy)]
28
+ pub struct BetOpportunity {
29
+ /// Your estimated probability of winning
30
+ pub your_prob: f64,
31
+ /// Current market price
32
+ pub market_price: f64,
33
+ }
34
+
35
+ /// Calculate Kelly criterion for a binary prediction market bet.
36
+ ///
37
+ /// # Arguments
38
+ /// * `your_prob` - Your estimated probability of winning (0-1)
39
+ /// * `market_price` - Current market price to buy shares (0-1)
40
+ /// * `bankroll` - Your total bankroll in currency units
41
+ ///
42
+ /// # Returns
43
+ /// KellyResult with optimal sizing at various Kelly fractions
44
+ ///
45
+ /// # Example
46
+ /// ```
47
+ /// use quant_rust::prediction::kelly::kelly_criterion;
48
+ /// let result = kelly_criterion(0.6, 0.5, 1000.0);
49
+ /// assert!(result.kelly_fraction > 0.0);
50
+ /// assert!(result.edge > 0.0);
51
+ /// ```
52
+ pub fn kelly_criterion(your_prob: f64, market_price: f64, bankroll: f64) -> KellyResult {
53
+ // For prediction markets:
54
+ // - Win: get $1 per share (profit = $1 - price)
55
+ // - Lose: lose the entire purchase price
56
+ let win_amount = 1.0 - market_price; // Profit if wins ($1 - purchase price)
57
+ let loss_amount = market_price; // Loss if loses (entire purchase price)
58
+
59
+ // b = win_amount / loss_amount (odds received)
60
+ let b = if loss_amount > 0.0 {
61
+ win_amount / loss_amount
62
+ } else {
63
+ 0.0
64
+ };
65
+
66
+ let q = 1.0 - your_prob;
67
+
68
+ // Kelly fraction = (p * b - q) / b = p - q/b
69
+ // Kelly = your_prob - (1 - your_prob) * market_price / win_amount
70
+ let kelly_fraction = if win_amount > 0.0 {
71
+ your_prob - (q * market_price) / win_amount
72
+ } else {
73
+ 0.0
74
+ };
75
+
76
+ // If negative, no bet recommended
77
+ let optimal_fraction = kelly_fraction.max(0.0);
78
+
79
+ KellyResult {
80
+ kelly_fraction: optimal_fraction,
81
+ half_kelly: optimal_fraction / 2.0,
82
+ quarter_kelly: optimal_fraction / 4.0,
83
+ full_bet_size: bankroll * optimal_fraction,
84
+ half_bet_size: bankroll * (optimal_fraction / 2.0),
85
+ quarter_bet_size: bankroll * (optimal_fraction / 4.0),
86
+ edge: your_prob - market_price,
87
+ odds: b,
88
+ }
89
+ }
90
+
91
+ /// Calculate fractional Kelly for risk management.
92
+ ///
93
+ /// # Arguments
94
+ /// * `your_prob` - Your estimated probability of winning
95
+ /// * `market_price` - Current market price
96
+ /// * `bankroll` - Your total bankroll
97
+ /// * `fraction` - Fraction of full Kelly to use (e.g., 0.25 for quarter)
98
+ ///
99
+ /// # Returns
100
+ /// Bet size in currency units
101
+ ///
102
+ /// # Example
103
+ /// ```
104
+ /// use quant_rust::prediction::kelly::fractional_kelly;
105
+ /// let size = fractional_kelly(0.6, 0.5, 1000.0, 0.5);
106
+ /// assert!(size > 0.0);
107
+ /// ```
108
+ pub fn fractional_kelly(
109
+ your_prob: f64,
110
+ market_price: f64,
111
+ bankroll: f64,
112
+ fraction: f64,
113
+ ) -> f64 {
114
+ let kelly = kelly_criterion(your_prob, market_price, bankroll);
115
+ kelly.full_bet_size * fraction
116
+ }
117
+
118
+ /// Calculate Kelly for multiple simultaneous independent bets.
119
+ ///
120
+ /// For independent bets, if total Kelly exceeds 1, we scale down proportionally.
121
+ ///
122
+ /// # Arguments
123
+ /// * `bets` - Slice of betting opportunities
124
+ /// * `bankroll` - Your total bankroll
125
+ ///
126
+ /// # Returns
127
+ /// Vector of bet sizes (same order as input)
128
+ ///
129
+ /// # Example
130
+ /// ```
131
+ /// use quant_rust::prediction::kelly::{kelly_multiple, BetOpportunity};
132
+ /// let bets = vec![
133
+ /// BetOpportunity { your_prob: 0.6, market_price: 0.5 },
134
+ /// BetOpportunity { your_prob: 0.55, market_price: 0.45 },
135
+ /// ];
136
+ /// let sizes = kelly_multiple(&bets, 1000.0);
137
+ /// assert_eq!(sizes.len(), 2);
138
+ /// ```
139
+ pub fn kelly_multiple(bets: &[BetOpportunity], bankroll: f64) -> Vec<f64> {
140
+ // Calculate Kelly fraction for each bet
141
+ let kelly_fractions: Vec<f64> = bets
142
+ .iter()
143
+ .map(|bet| {
144
+ let k = kelly_criterion(bet.your_prob, bet.market_price, 1.0);
145
+ k.kelly_fraction.max(0.0)
146
+ })
147
+ .collect();
148
+
149
+ let total_kelly: f64 = kelly_fractions.iter().sum();
150
+
151
+ // If total exceeds 1, scale down proportionally
152
+ if total_kelly > 1.0 {
153
+ let scale = 1.0 / total_kelly;
154
+ kelly_fractions
155
+ .iter()
156
+ .map(|&k| bankroll * k * scale)
157
+ .collect()
158
+ } else {
159
+ kelly_fractions.iter().map(|&k| bankroll * k).collect()
160
+ }
161
+ }
162
+
163
+ #[cfg(test)]
164
+ mod tests {
165
+ use super::*;
166
+
167
+ #[test]
168
+ fn test_kelly_criterion_positive_edge() {
169
+ // 60% probability at 50% price = positive edge
170
+ let result = kelly_criterion(0.6, 0.5, 1000.0);
171
+ assert!(result.kelly_fraction > 0.0);
172
+ assert!(result.edge > 0.0);
173
+ assert!((result.edge - 0.1).abs() < 1e-10); // 0.6 - 0.5
174
+ assert!(result.full_bet_size > 0.0);
175
+ }
176
+
177
+ #[test]
178
+ fn test_kelly_criterion_negative_edge() {
179
+ // 40% probability at 50% price = negative edge
180
+ let result = kelly_criterion(0.4, 0.5, 1000.0);
181
+ assert_eq!(result.kelly_fraction, 0.0); // No bet recommended
182
+ assert!(result.edge < 0.0);
183
+ }
184
+
185
+ #[test]
186
+ fn test_kelly_criterion_no_edge() {
187
+ // 50% probability at 50% price = no edge
188
+ let result = kelly_criterion(0.5, 0.5, 1000.0);
189
+ assert_eq!(result.kelly_fraction, 0.0);
190
+ assert_eq!(result.edge, 0.0);
191
+ }
192
+
193
+ #[test]
194
+ fn test_kelly_fractions() {
195
+ let result = kelly_criterion(0.7, 0.5, 1000.0);
196
+ assert_eq!(result.half_kelly, result.kelly_fraction / 2.0);
197
+ assert_eq!(result.quarter_kelly, result.kelly_fraction / 4.0);
198
+ }
199
+
200
+ #[test]
201
+ fn test_fractional_kelly() {
202
+ let full = kelly_criterion(0.6, 0.5, 1000.0).full_bet_size;
203
+ let half = fractional_kelly(0.6, 0.5, 1000.0, 0.5);
204
+ assert!((full * 0.5 - half).abs() < 1e-10);
205
+ }
206
+
207
+ #[test]
208
+ fn test_kelly_multiple() {
209
+ let bets = vec![
210
+ BetOpportunity { your_prob: 0.6, market_price: 0.5 },
211
+ BetOpportunity { your_prob: 0.55, market_price: 0.45 },
212
+ ];
213
+ let sizes = kelly_multiple(&bets, 1000.0);
214
+ assert_eq!(sizes.len(), 2);
215
+ assert!(sizes[0] > 0.0);
216
+ assert!(sizes[1] > 0.0);
217
+ }
218
+
219
+ #[test]
220
+ fn test_kelly_multiple_scaling() {
221
+ // Create bets that would exceed 100% allocation
222
+ let bets = vec![
223
+ BetOpportunity { your_prob: 0.8, market_price: 0.3 },
224
+ BetOpportunity { your_prob: 0.8, market_price: 0.3 },
225
+ BetOpportunity { your_prob: 0.8, market_price: 0.3 },
226
+ ];
227
+ let sizes = kelly_multiple(&bets, 1000.0);
228
+ let total: f64 = sizes.iter().sum();
229
+ // Total should be at most bankroll (scaled down if needed)
230
+ assert!(total <= 1000.0 + 1e-10);
231
+ }
232
+ }