@buley/hexgrid-3d 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.json +28 -0
- package/LICENSE +39 -0
- package/README.md +291 -0
- package/examples/basic-usage.tsx +52 -0
- package/package.json +65 -0
- package/public/hexgrid-worker.js +1763 -0
- package/rust/Cargo.toml +41 -0
- package/rust/src/lib.rs +740 -0
- package/rust/src/math.rs +574 -0
- package/rust/src/spatial.rs +245 -0
- package/rust/src/statistics.rs +496 -0
- package/src/HexGridEnhanced.ts +16 -0
- package/src/Snapshot.ts +1402 -0
- package/src/adapters.ts +65 -0
- package/src/algorithms/AdvancedStatistics.ts +328 -0
- package/src/algorithms/BayesianStatistics.ts +317 -0
- package/src/algorithms/FlowField.ts +126 -0
- package/src/algorithms/FluidSimulation.ts +99 -0
- package/src/algorithms/GraphAlgorithms.ts +184 -0
- package/src/algorithms/OutlierDetection.ts +391 -0
- package/src/algorithms/ParticleSystem.ts +85 -0
- package/src/algorithms/index.ts +13 -0
- package/src/compat.ts +96 -0
- package/src/components/HexGrid.tsx +31 -0
- package/src/components/NarrationOverlay.tsx +221 -0
- package/src/components/index.ts +2 -0
- package/src/features.ts +125 -0
- package/src/index.ts +30 -0
- package/src/math/HexCoordinates.ts +15 -0
- package/src/math/Matrix4.ts +35 -0
- package/src/math/Quaternion.ts +37 -0
- package/src/math/SpatialIndex.ts +114 -0
- package/src/math/Vector3.ts +69 -0
- package/src/math/index.ts +11 -0
- package/src/note-adapter.ts +124 -0
- package/src/ontology-adapter.ts +77 -0
- package/src/stores/index.ts +1 -0
- package/src/stores/uiStore.ts +85 -0
- package/src/types/index.ts +3 -0
- package/src/types.ts +152 -0
- package/src/utils/image-utils.ts +25 -0
- package/src/wasm/HexGridWasmWrapper.ts +753 -0
- package/src/wasm/index.ts +7 -0
- package/src/workers/hexgrid-math.ts +177 -0
- package/src/workers/hexgrid-worker.worker.ts +1807 -0
- package/tsconfig.json +18 -0
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
//! Statistical computations for hexgrid analysis
|
|
2
|
+
|
|
3
|
+
use std::collections::HashMap;
|
|
4
|
+
|
|
5
|
+
/// Compute Gini coefficient for inequality measurement
|
|
6
|
+
pub fn gini_coefficient(values: &[f64]) -> f64 {
|
|
7
|
+
if values.is_empty() {
|
|
8
|
+
return 0.0;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
let mut sorted: Vec<f64> = values.to_vec();
|
|
12
|
+
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
|
|
13
|
+
|
|
14
|
+
let n = sorted.len() as f64;
|
|
15
|
+
let mut sum = 0.0;
|
|
16
|
+
|
|
17
|
+
for (i, &val) in sorted.iter().enumerate() {
|
|
18
|
+
sum += (2.0 * (i as f64 + 1.0) - n - 1.0) * val;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
let mean: f64 = sorted.iter().sum::<f64>() / n;
|
|
22
|
+
if mean == 0.0 {
|
|
23
|
+
return 0.0;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
sum / (n * n * mean)
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/// Compute Shannon entropy
|
|
30
|
+
pub fn shannon_entropy(values: &[f64]) -> f64 {
|
|
31
|
+
let total: f64 = values.iter().sum();
|
|
32
|
+
if total == 0.0 {
|
|
33
|
+
return 0.0;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
let mut entropy = 0.0;
|
|
37
|
+
for &val in values {
|
|
38
|
+
if val > 0.0 {
|
|
39
|
+
let p = val / total;
|
|
40
|
+
entropy -= p * p.ln();
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
entropy
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/// Compute normalized Shannon entropy (0-1)
|
|
48
|
+
pub fn normalized_entropy(values: &[f64]) -> f64 {
|
|
49
|
+
let n = values.len();
|
|
50
|
+
if n <= 1 {
|
|
51
|
+
return 0.0;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
let entropy = shannon_entropy(values);
|
|
55
|
+
let max_entropy = (n as f64).ln();
|
|
56
|
+
|
|
57
|
+
if max_entropy == 0.0 {
|
|
58
|
+
return 0.0;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
entropy / max_entropy
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/// Compute Theil index (generalized entropy)
|
|
65
|
+
pub fn theil_index(values: &[f64]) -> f64 {
|
|
66
|
+
let n = values.len();
|
|
67
|
+
if n == 0 {
|
|
68
|
+
return 0.0;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
let mean: f64 = values.iter().sum::<f64>() / n as f64;
|
|
72
|
+
if mean == 0.0 {
|
|
73
|
+
return 0.0;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
let mut theil = 0.0;
|
|
77
|
+
for &val in values {
|
|
78
|
+
if val > 0.0 {
|
|
79
|
+
let ratio = val / mean;
|
|
80
|
+
theil += ratio * ratio.ln();
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
theil / n as f64
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/// Compute Atkinson index
|
|
88
|
+
pub fn atkinson_index(values: &[f64], epsilon: f64) -> f64 {
|
|
89
|
+
let n = values.len();
|
|
90
|
+
if n == 0 {
|
|
91
|
+
return 0.0;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
let mean: f64 = values.iter().sum::<f64>() / n as f64;
|
|
95
|
+
if mean == 0.0 {
|
|
96
|
+
return 0.0;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (epsilon - 1.0).abs() < 0.0001 {
|
|
100
|
+
// Special case: epsilon = 1 (uses geometric mean)
|
|
101
|
+
let log_sum: f64 = values.iter()
|
|
102
|
+
.filter(|&&v| v > 0.0)
|
|
103
|
+
.map(|&v| v.ln())
|
|
104
|
+
.sum();
|
|
105
|
+
let geometric_mean = (log_sum / n as f64).exp();
|
|
106
|
+
1.0 - geometric_mean / mean
|
|
107
|
+
} else {
|
|
108
|
+
let sum: f64 = values.iter()
|
|
109
|
+
.map(|&v| (v / mean).powf(1.0 - epsilon))
|
|
110
|
+
.sum();
|
|
111
|
+
let power_mean = (sum / n as f64).powf(1.0 / (1.0 - epsilon));
|
|
112
|
+
1.0 - power_mean
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/// Compute Pareto ratio (80/20 analysis)
|
|
117
|
+
pub fn pareto_ratio(values: &[f64], percentile: f64) -> f64 {
|
|
118
|
+
if values.is_empty() {
|
|
119
|
+
return 0.0;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
let mut sorted: Vec<f64> = values.to_vec();
|
|
123
|
+
sorted.sort_by(|a, b| b.partial_cmp(a).unwrap_or(std::cmp::Ordering::Equal));
|
|
124
|
+
|
|
125
|
+
let total: f64 = sorted.iter().sum();
|
|
126
|
+
if total == 0.0 {
|
|
127
|
+
return 0.0;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
let target = total * percentile;
|
|
131
|
+
let mut cumulative = 0.0;
|
|
132
|
+
let mut count = 0;
|
|
133
|
+
|
|
134
|
+
for val in sorted {
|
|
135
|
+
cumulative += val;
|
|
136
|
+
count += 1;
|
|
137
|
+
if cumulative >= target {
|
|
138
|
+
break;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
count as f64 / values.len() as f64
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/// Compute Zipf coefficient from territory distribution
|
|
146
|
+
pub fn zipf_coefficient(counts: &[usize]) -> f64 {
|
|
147
|
+
if counts.len() < 2 {
|
|
148
|
+
return 0.0;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
let mut sorted: Vec<usize> = counts.to_vec();
|
|
152
|
+
sorted.sort_by(|a, b| b.cmp(a));
|
|
153
|
+
|
|
154
|
+
// Filter out zeros
|
|
155
|
+
let non_zero: Vec<f64> = sorted.iter()
|
|
156
|
+
.filter(|&&c| c > 0)
|
|
157
|
+
.map(|&c| c as f64)
|
|
158
|
+
.collect();
|
|
159
|
+
|
|
160
|
+
if non_zero.len() < 2 {
|
|
161
|
+
return 0.0;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// Linear regression on log-log scale
|
|
165
|
+
let n = non_zero.len() as f64;
|
|
166
|
+
let mut sum_x = 0.0;
|
|
167
|
+
let mut sum_y = 0.0;
|
|
168
|
+
let mut sum_xy = 0.0;
|
|
169
|
+
let mut sum_xx = 0.0;
|
|
170
|
+
|
|
171
|
+
for (i, &val) in non_zero.iter().enumerate() {
|
|
172
|
+
let x = ((i + 1) as f64).ln();
|
|
173
|
+
let y = val.ln();
|
|
174
|
+
sum_x += x;
|
|
175
|
+
sum_y += y;
|
|
176
|
+
sum_xy += x * y;
|
|
177
|
+
sum_xx += x * x;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Slope of regression line
|
|
181
|
+
let denominator = n * sum_xx - sum_x * sum_x;
|
|
182
|
+
if denominator.abs() < 0.0001 {
|
|
183
|
+
return 0.0;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
-(n * sum_xy - sum_x * sum_y) / denominator
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/// Compute Herfindahl-Hirschman Index (market concentration)
|
|
190
|
+
pub fn herfindahl_index(values: &[f64]) -> f64 {
|
|
191
|
+
let total: f64 = values.iter().sum();
|
|
192
|
+
if total == 0.0 {
|
|
193
|
+
return 0.0;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
values.iter()
|
|
197
|
+
.map(|&v| {
|
|
198
|
+
let share = v / total;
|
|
199
|
+
share * share
|
|
200
|
+
})
|
|
201
|
+
.sum()
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/// Compute coefficient of variation
|
|
205
|
+
pub fn coefficient_of_variation(values: &[f64]) -> f64 {
|
|
206
|
+
let n = values.len();
|
|
207
|
+
if n == 0 {
|
|
208
|
+
return 0.0;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
let mean: f64 = values.iter().sum::<f64>() / n as f64;
|
|
212
|
+
if mean == 0.0 {
|
|
213
|
+
return 0.0;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
let variance: f64 = values.iter()
|
|
217
|
+
.map(|&v| (v - mean).powi(2))
|
|
218
|
+
.sum::<f64>() / n as f64;
|
|
219
|
+
|
|
220
|
+
variance.sqrt() / mean
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/// Compute skewness (asymmetry measure)
|
|
224
|
+
pub fn skewness(values: &[f64]) -> f64 {
|
|
225
|
+
let n = values.len();
|
|
226
|
+
if n < 3 {
|
|
227
|
+
return 0.0;
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
let mean: f64 = values.iter().sum::<f64>() / n as f64;
|
|
231
|
+
|
|
232
|
+
let variance: f64 = values.iter()
|
|
233
|
+
.map(|&v| (v - mean).powi(2))
|
|
234
|
+
.sum::<f64>() / n as f64;
|
|
235
|
+
|
|
236
|
+
if variance == 0.0 {
|
|
237
|
+
return 0.0;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
let std_dev = variance.sqrt();
|
|
241
|
+
|
|
242
|
+
let m3: f64 = values.iter()
|
|
243
|
+
.map(|&v| ((v - mean) / std_dev).powi(3))
|
|
244
|
+
.sum::<f64>() / n as f64;
|
|
245
|
+
|
|
246
|
+
m3
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/// Compute kurtosis (tail weight measure)
|
|
250
|
+
pub fn kurtosis(values: &[f64]) -> f64 {
|
|
251
|
+
let n = values.len();
|
|
252
|
+
if n < 4 {
|
|
253
|
+
return 0.0;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
let mean: f64 = values.iter().sum::<f64>() / n as f64;
|
|
257
|
+
|
|
258
|
+
let variance: f64 = values.iter()
|
|
259
|
+
.map(|&v| (v - mean).powi(2))
|
|
260
|
+
.sum::<f64>() / n as f64;
|
|
261
|
+
|
|
262
|
+
if variance == 0.0 {
|
|
263
|
+
return 0.0;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
let std_dev = variance.sqrt();
|
|
267
|
+
|
|
268
|
+
let m4: f64 = values.iter()
|
|
269
|
+
.map(|&v| ((v - mean) / std_dev).powi(4))
|
|
270
|
+
.sum::<f64>() / n as f64;
|
|
271
|
+
|
|
272
|
+
m4 - 3.0 // Excess kurtosis
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
/// Compute Kullback-Leibler divergence
|
|
276
|
+
pub fn kl_divergence(p: &[f64], q: &[f64]) -> f64 {
|
|
277
|
+
if p.len() != q.len() || p.is_empty() {
|
|
278
|
+
return 0.0;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
let p_sum: f64 = p.iter().sum();
|
|
282
|
+
let q_sum: f64 = q.iter().sum();
|
|
283
|
+
|
|
284
|
+
if p_sum == 0.0 || q_sum == 0.0 {
|
|
285
|
+
return 0.0;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
let mut divergence = 0.0;
|
|
289
|
+
for (i, &pi) in p.iter().enumerate() {
|
|
290
|
+
if pi > 0.0 && q[i] > 0.0 {
|
|
291
|
+
let p_norm = pi / p_sum;
|
|
292
|
+
let q_norm = q[i] / q_sum;
|
|
293
|
+
divergence += p_norm * (p_norm / q_norm).ln();
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
divergence
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
/// Compute Jensen-Shannon divergence (symmetric)
|
|
301
|
+
pub fn js_divergence(p: &[f64], q: &[f64]) -> f64 {
|
|
302
|
+
if p.len() != q.len() || p.is_empty() {
|
|
303
|
+
return 0.0;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
// Compute M = (P + Q) / 2
|
|
307
|
+
let m: Vec<f64> = p.iter()
|
|
308
|
+
.zip(q.iter())
|
|
309
|
+
.map(|(&pi, &qi)| (pi + qi) / 2.0)
|
|
310
|
+
.collect();
|
|
311
|
+
|
|
312
|
+
(kl_divergence(p, &m) + kl_divergence(q, &m)) / 2.0
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
/// Compute running statistics (online algorithm)
|
|
316
|
+
pub struct RunningStats {
|
|
317
|
+
count: usize,
|
|
318
|
+
mean: f64,
|
|
319
|
+
m2: f64, // For variance
|
|
320
|
+
m3: f64, // For skewness
|
|
321
|
+
m4: f64, // For kurtosis
|
|
322
|
+
min: f64,
|
|
323
|
+
max: f64,
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
impl RunningStats {
|
|
327
|
+
pub fn new() -> Self {
|
|
328
|
+
Self {
|
|
329
|
+
count: 0,
|
|
330
|
+
mean: 0.0,
|
|
331
|
+
m2: 0.0,
|
|
332
|
+
m3: 0.0,
|
|
333
|
+
m4: 0.0,
|
|
334
|
+
min: f64::MAX,
|
|
335
|
+
max: f64::MIN,
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
pub fn push(&mut self, value: f64) {
|
|
340
|
+
let n = self.count + 1;
|
|
341
|
+
let delta = value - self.mean;
|
|
342
|
+
let delta_n = delta / n as f64;
|
|
343
|
+
let delta_n2 = delta_n * delta_n;
|
|
344
|
+
let term1 = delta * delta_n * self.count as f64;
|
|
345
|
+
|
|
346
|
+
self.mean += delta_n;
|
|
347
|
+
self.m4 += term1 * delta_n2 * (n * n - 3 * n + 3) as f64
|
|
348
|
+
+ 6.0 * delta_n2 * self.m2
|
|
349
|
+
- 4.0 * delta_n * self.m3;
|
|
350
|
+
self.m3 += term1 * delta_n * (n - 2) as f64 - 3.0 * delta_n * self.m2;
|
|
351
|
+
self.m2 += term1;
|
|
352
|
+
|
|
353
|
+
self.count = n;
|
|
354
|
+
self.min = self.min.min(value);
|
|
355
|
+
self.max = self.max.max(value);
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
pub fn count(&self) -> usize { self.count }
|
|
359
|
+
pub fn mean(&self) -> f64 { self.mean }
|
|
360
|
+
pub fn min(&self) -> f64 { self.min }
|
|
361
|
+
pub fn max(&self) -> f64 { self.max }
|
|
362
|
+
|
|
363
|
+
pub fn variance(&self) -> f64 {
|
|
364
|
+
if self.count < 2 {
|
|
365
|
+
return 0.0;
|
|
366
|
+
}
|
|
367
|
+
self.m2 / self.count as f64
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
pub fn std_dev(&self) -> f64 {
|
|
371
|
+
self.variance().sqrt()
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
pub fn skewness(&self) -> f64 {
|
|
375
|
+
if self.count < 3 || self.m2 == 0.0 {
|
|
376
|
+
return 0.0;
|
|
377
|
+
}
|
|
378
|
+
let n = self.count as f64;
|
|
379
|
+
(n.sqrt() * self.m3) / self.m2.powf(1.5)
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
pub fn kurtosis(&self) -> f64 {
|
|
383
|
+
if self.count < 4 || self.m2 == 0.0 {
|
|
384
|
+
return 0.0;
|
|
385
|
+
}
|
|
386
|
+
let n = self.count as f64;
|
|
387
|
+
(n * self.m4) / (self.m2 * self.m2) - 3.0
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
impl Default for RunningStats {
|
|
392
|
+
fn default() -> Self {
|
|
393
|
+
Self::new()
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
/// Exponential moving average
|
|
398
|
+
pub struct ExponentialMovingAverage {
|
|
399
|
+
alpha: f64,
|
|
400
|
+
value: Option<f64>,
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
impl ExponentialMovingAverage {
|
|
404
|
+
pub fn new(alpha: f64) -> Self {
|
|
405
|
+
Self {
|
|
406
|
+
alpha: alpha.clamp(0.0, 1.0),
|
|
407
|
+
value: None,
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
pub fn from_period(period: usize) -> Self {
|
|
412
|
+
let alpha = 2.0 / (period + 1) as f64;
|
|
413
|
+
Self::new(alpha)
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
pub fn push(&mut self, value: f64) -> f64 {
|
|
417
|
+
match self.value {
|
|
418
|
+
None => {
|
|
419
|
+
self.value = Some(value);
|
|
420
|
+
value
|
|
421
|
+
}
|
|
422
|
+
Some(prev) => {
|
|
423
|
+
let new_value = self.alpha * value + (1.0 - self.alpha) * prev;
|
|
424
|
+
self.value = Some(new_value);
|
|
425
|
+
new_value
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
pub fn current(&self) -> Option<f64> {
|
|
431
|
+
self.value
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
pub fn reset(&mut self) {
|
|
435
|
+
self.value = None;
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
/// Histogram with fixed bins
|
|
440
|
+
pub struct Histogram {
|
|
441
|
+
bins: Vec<usize>,
|
|
442
|
+
min: f64,
|
|
443
|
+
max: f64,
|
|
444
|
+
bin_width: f64,
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
impl Histogram {
|
|
448
|
+
pub fn new(min: f64, max: f64, num_bins: usize) -> Self {
|
|
449
|
+
Self {
|
|
450
|
+
bins: vec![0; num_bins],
|
|
451
|
+
min,
|
|
452
|
+
max,
|
|
453
|
+
bin_width: (max - min) / num_bins as f64,
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
pub fn add(&mut self, value: f64) {
|
|
458
|
+
if value < self.min || value > self.max {
|
|
459
|
+
return;
|
|
460
|
+
}
|
|
461
|
+
let bin = ((value - self.min) / self.bin_width) as usize;
|
|
462
|
+
let bin = bin.min(self.bins.len() - 1);
|
|
463
|
+
self.bins[bin] += 1;
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
pub fn get_bins(&self) -> &[usize] {
|
|
467
|
+
&self.bins
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
pub fn normalize(&self) -> Vec<f64> {
|
|
471
|
+
let total: usize = self.bins.iter().sum();
|
|
472
|
+
if total == 0 {
|
|
473
|
+
return vec![0.0; self.bins.len()];
|
|
474
|
+
}
|
|
475
|
+
self.bins.iter().map(|&c| c as f64 / total as f64).collect()
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
pub fn percentile(&self, p: f64) -> f64 {
|
|
479
|
+
let total: usize = self.bins.iter().sum();
|
|
480
|
+
if total == 0 {
|
|
481
|
+
return self.min;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
let target = (total as f64 * p) as usize;
|
|
485
|
+
let mut cumulative = 0;
|
|
486
|
+
|
|
487
|
+
for (i, &count) in self.bins.iter().enumerate() {
|
|
488
|
+
cumulative += count;
|
|
489
|
+
if cumulative >= target {
|
|
490
|
+
return self.min + (i as f64 + 0.5) * self.bin_width;
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
self.max
|
|
495
|
+
}
|
|
496
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import type { Axial } from './math/HexCoordinates';
|
|
2
|
+
import type { Vector2 } from './math/Vector3';
|
|
3
|
+
|
|
4
|
+
export interface EnhancedHexCell {
|
|
5
|
+
index: number;
|
|
6
|
+
axial: Axial;
|
|
7
|
+
position: Vector2;
|
|
8
|
+
owner: number;
|
|
9
|
+
population: number;
|
|
10
|
+
infection: number;
|
|
11
|
+
infectedBy: number;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface EnhancedHexGridEngine {
|
|
15
|
+
cells?: EnhancedHexCell[];
|
|
16
|
+
}
|