@sjcrh/proteinpaint-rust 2.26.1 → 2.29.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Cargo.toml +7 -2
- package/package.json +1 -1
- package/src/DEanalysis.rs +1010 -0
- package/src/stats_functions.rs +275 -0
- package/src/wilcoxon.rs +2 -272
package/src/stats_functions.rs
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
use fishers_exact::fishers_exact;
|
|
2
|
+
//use r_mathlib;
|
|
2
3
|
use statrs::distribution::{ChiSquared, ContinuousCDF};
|
|
3
4
|
use std::panic;
|
|
4
5
|
|
|
6
|
+
#[allow(dead_code)]
|
|
5
7
|
pub fn strand_analysis_one_iteration(
|
|
6
8
|
// Runs fisher's exact test or Chisquare test
|
|
7
9
|
alternate_forward_count: u32, //for Fisher/Chi test, represents R1C1 in 2X2 contingency table
|
|
@@ -85,6 +87,7 @@ pub fn strand_analysis_one_iteration(
|
|
|
85
87
|
(p_value, fisher_chisq_test_final)
|
|
86
88
|
}
|
|
87
89
|
|
|
90
|
+
#[allow(dead_code)]
|
|
88
91
|
fn chi_square_test(
|
|
89
92
|
alternate_forward_count: u32,
|
|
90
93
|
alternate_reverse_count: u32,
|
|
@@ -137,3 +140,275 @@ fn chi_square_test(
|
|
|
137
140
|
p_value
|
|
138
141
|
}
|
|
139
142
|
}
|
|
143
|
+
|
|
144
|
+
//#[allow(dead_code)]
|
|
145
|
+
//pub fn wilcoxon_rank_sum_test(
|
|
146
|
+
// mut group1: Vec<f64>,
|
|
147
|
+
// mut group2: Vec<f64>,
|
|
148
|
+
// threshold: usize,
|
|
149
|
+
// alternative: char,
|
|
150
|
+
// correct: bool,
|
|
151
|
+
//) -> f64 {
|
|
152
|
+
// // Check if there are any ties between the two groups
|
|
153
|
+
//
|
|
154
|
+
// let mut combined = group1.clone();
|
|
155
|
+
// combined.extend(group2.iter().cloned());
|
|
156
|
+
// combined.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
157
|
+
// //println!("combined:{:?}", combined);
|
|
158
|
+
//
|
|
159
|
+
// group1.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
160
|
+
// group2.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
161
|
+
// //println!("group1:{:?}", group1);
|
|
162
|
+
// //println!("group2:{:?}", group2);
|
|
163
|
+
//
|
|
164
|
+
// let mut group1_iter = 0;
|
|
165
|
+
// let mut group2_iter = 0;
|
|
166
|
+
// let mut xy: Vec<char> = Vec::with_capacity(combined.len()); // Stores X-Y classification
|
|
167
|
+
// let mut ranks: Vec<f64> = Vec::with_capacity(combined.len()); // Stores the rank of each element
|
|
168
|
+
// let mut is_repeat = false;
|
|
169
|
+
// let mut repeat_present = false;
|
|
170
|
+
// let mut frac_rank: f64 = 0.0;
|
|
171
|
+
// let mut num_repeats: f64 = 1.0;
|
|
172
|
+
// let mut repeat_iter: f64 = 1.0;
|
|
173
|
+
// #[allow(unused_variables)]
|
|
174
|
+
// let mut weight_x: f64 = 0.0;
|
|
175
|
+
// let mut weight_y: f64 = 0.0;
|
|
176
|
+
// let mut group_char: char = 'X';
|
|
177
|
+
// let mut rank_frequencies: Vec<f64> = Vec::with_capacity(combined.len());
|
|
178
|
+
// for i in 0..combined.len() {
|
|
179
|
+
// //println!("group1_iter:{}", group1_iter);
|
|
180
|
+
// //println!("group2_iter:{}", group2_iter);
|
|
181
|
+
// //println!("item1:{}", combined[i]);
|
|
182
|
+
// //println!("is_repeat:{}", is_repeat);
|
|
183
|
+
// if group1_iter < group1.len() && combined[i] == group1[group1_iter] {
|
|
184
|
+
// xy.push('X');
|
|
185
|
+
// group1_iter += 1;
|
|
186
|
+
// group_char = 'X';
|
|
187
|
+
// } else if group2_iter < group2.len() && combined[i] == group2[group2_iter] {
|
|
188
|
+
// xy.push('Y');
|
|
189
|
+
// group2_iter += 1;
|
|
190
|
+
// group_char = 'Y';
|
|
191
|
+
// }
|
|
192
|
+
//
|
|
193
|
+
// // Computing ranks
|
|
194
|
+
// if is_repeat == false {
|
|
195
|
+
// // Check if current element has other occurences
|
|
196
|
+
// num_repeats = 1.0;
|
|
197
|
+
// for j in i + 1..combined.len() {
|
|
198
|
+
// if combined[i] == combined[j] {
|
|
199
|
+
// is_repeat = true;
|
|
200
|
+
// repeat_present = true;
|
|
201
|
+
// repeat_iter = 1.0;
|
|
202
|
+
// num_repeats += 1.0;
|
|
203
|
+
// } else {
|
|
204
|
+
// break;
|
|
205
|
+
// }
|
|
206
|
+
// }
|
|
207
|
+
// //println!("num_repeats:{}", num_repeats);
|
|
208
|
+
// if is_repeat == false {
|
|
209
|
+
// ranks.push(i as f64 + 1.0);
|
|
210
|
+
// if group_char == 'X' {
|
|
211
|
+
// weight_x += i as f64 + 1.0;
|
|
212
|
+
// } else if group_char == 'Y' {
|
|
213
|
+
// weight_y += i as f64 + 1.0;
|
|
214
|
+
// }
|
|
215
|
+
// //rank_frequencies.push(RankFreq {
|
|
216
|
+
// // rank: i as f64 + 1.0,
|
|
217
|
+
// // freq: 1,
|
|
218
|
+
// //});
|
|
219
|
+
// rank_frequencies.push(1.0);
|
|
220
|
+
// } else {
|
|
221
|
+
// frac_rank = calculate_frac_rank(i as f64 + 1.0, num_repeats);
|
|
222
|
+
// ranks.push(frac_rank);
|
|
223
|
+
// if group_char == 'X' {
|
|
224
|
+
// weight_x += frac_rank;
|
|
225
|
+
// } else if group_char == 'Y' {
|
|
226
|
+
// weight_y += frac_rank
|
|
227
|
+
// }
|
|
228
|
+
// //rank_frequencies.push(RankFreq {
|
|
229
|
+
// // rank: frac_rank,
|
|
230
|
+
// // freq: num_repeats as usize,
|
|
231
|
+
// //});
|
|
232
|
+
// rank_frequencies.push(num_repeats);
|
|
233
|
+
// }
|
|
234
|
+
// } else if repeat_iter < num_repeats {
|
|
235
|
+
// // Repeat case
|
|
236
|
+
// ranks.push(frac_rank);
|
|
237
|
+
// repeat_iter += 1.0;
|
|
238
|
+
// if group_char == 'X' {
|
|
239
|
+
// weight_x += frac_rank;
|
|
240
|
+
// } else if group_char == 'Y' {
|
|
241
|
+
// weight_y += frac_rank
|
|
242
|
+
// }
|
|
243
|
+
// if repeat_iter == num_repeats {
|
|
244
|
+
// is_repeat = false;
|
|
245
|
+
// }
|
|
246
|
+
// } else {
|
|
247
|
+
// //println!("i:{}", i);
|
|
248
|
+
// ranks.push(i as f64 + 1.0);
|
|
249
|
+
// repeat_iter = 1.0;
|
|
250
|
+
// num_repeats = 1.0;
|
|
251
|
+
// if group_char == 'X' {
|
|
252
|
+
// weight_x += i as f64 + 1.0;
|
|
253
|
+
// } else if group_char == 'Y' {
|
|
254
|
+
// weight_y += i as f64 + 1.0;
|
|
255
|
+
// }
|
|
256
|
+
// }
|
|
257
|
+
// }
|
|
258
|
+
// //println!("rank_frequencies:{:?}", rank_frequencies);
|
|
259
|
+
// //println!("xy:{:?}", xy);
|
|
260
|
+
// //println!("ranks:{:?}", ranks);
|
|
261
|
+
// //println!("weight_x:{}", weight_x);
|
|
262
|
+
// //println!("weight_y:{}", weight_y);
|
|
263
|
+
//
|
|
264
|
+
// //u_dash (calculated below) calculates the "W Statistic" in wilcox.test function in R
|
|
265
|
+
//
|
|
266
|
+
// let u_y = weight_y - (group2.len() as f64 * (group2.len() as f64 + 1.0) / 2.0) as f64;
|
|
267
|
+
// let u_dash_y = (u_y - (group1.len() * group2.len()) as f64).abs();
|
|
268
|
+
// //println!("u_dash_y:{}", u_dash_y);
|
|
269
|
+
//
|
|
270
|
+
// let u_x = weight_x - (group1.len() as f64 * (group1.len() as f64 + 1.0) / 2.0) as f64;
|
|
271
|
+
// let _u_dash_x = (u_x - (group1.len() * group2.len()) as f64).abs();
|
|
272
|
+
// //println!("u_dash_x:{}", u_dash_x);
|
|
273
|
+
//
|
|
274
|
+
// // Calculate test_statistic
|
|
275
|
+
//
|
|
276
|
+
// //let t1 = weight_x - ((group1.len() as f64) * (group1.len() as f64 + 1.0)) / 2.0;
|
|
277
|
+
// //let t2 = weight_y - ((group2.len() as f64) * (group2.len() as f64 + 1.0)) / 2.0;
|
|
278
|
+
// //
|
|
279
|
+
// //let mut test_statistic = t1;
|
|
280
|
+
// //if t2 < t1 {
|
|
281
|
+
// // test_statistic = t2;
|
|
282
|
+
// //}
|
|
283
|
+
//
|
|
284
|
+
// //println!("test_statistic:{}", test_statistic);
|
|
285
|
+
//
|
|
286
|
+
// if group1.len() < threshold && group2.len() < threshold && repeat_present == false {
|
|
287
|
+
// // Compute exact p-values
|
|
288
|
+
//
|
|
289
|
+
// // Calculate conditional probability for weight_y
|
|
290
|
+
//
|
|
291
|
+
// if alternative == 'g' {
|
|
292
|
+
// // Alternative "greater"
|
|
293
|
+
// //if group1.len() <= low_cutoff && group2.len() <= low_cutoff {
|
|
294
|
+
// // iterate_exact_p_values(ranks, weight_y, group2.len())
|
|
295
|
+
// //} else {
|
|
296
|
+
// calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
297
|
+
// //}
|
|
298
|
+
// } else if alternative == 'l' {
|
|
299
|
+
// // Alternative "lesser"
|
|
300
|
+
// //if group1.len() <= low_cutoff && group2.len() <= low_cutoff {
|
|
301
|
+
// // iterate_exact_p_values(ranks, weight_x, group1.len())
|
|
302
|
+
// //} else {
|
|
303
|
+
// calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
304
|
+
// //}
|
|
305
|
+
// } else {
|
|
306
|
+
// // Two-sided distribution
|
|
307
|
+
// calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
308
|
+
// }
|
|
309
|
+
// } else {
|
|
310
|
+
// // Compute p-values from a normal distribution
|
|
311
|
+
// //println!("group1 length:{}", group1.len());
|
|
312
|
+
// //println!("group2 length:{}", group2.len());
|
|
313
|
+
//
|
|
314
|
+
// let mut z = u_dash_y - ((group1.len() * group2.len()) as f64) / 2.0;
|
|
315
|
+
// //println!("z_original:{}", z);
|
|
316
|
+
// let mut nties_sum: f64 = 0.0;
|
|
317
|
+
// for i in 0..rank_frequencies.len() {
|
|
318
|
+
// nties_sum += rank_frequencies[i] * rank_frequencies[i] * rank_frequencies[i]
|
|
319
|
+
// - rank_frequencies[i];
|
|
320
|
+
// }
|
|
321
|
+
//
|
|
322
|
+
// let sigma = (((group1.len() * group2.len()) as f64) / 12.0
|
|
323
|
+
// * ((group1.len() + group2.len() + 1) as f64
|
|
324
|
+
// - nties_sum
|
|
325
|
+
// / (((group1.len() + group2.len()) as f64)
|
|
326
|
+
// * ((group1.len() + group2.len() - 1) as f64))))
|
|
327
|
+
// .sqrt();
|
|
328
|
+
// //println!("sigma:{}", sigma);
|
|
329
|
+
// let mut correction: f64 = 0.0;
|
|
330
|
+
// if correct == true {
|
|
331
|
+
// if alternative == 'g' {
|
|
332
|
+
// // Alternative "greater"
|
|
333
|
+
// correction = 0.5;
|
|
334
|
+
// } else if alternative == 'l' {
|
|
335
|
+
// // Alternative "lesser"
|
|
336
|
+
// correction = -0.5;
|
|
337
|
+
// } else {
|
|
338
|
+
// // Alternative "two-sided"
|
|
339
|
+
// if z > 0.0 {
|
|
340
|
+
// correction = 0.5;
|
|
341
|
+
// } else if z < 0.0 {
|
|
342
|
+
// correction = -0.5;
|
|
343
|
+
// } else {
|
|
344
|
+
// // z=0
|
|
345
|
+
// correction = 0.0;
|
|
346
|
+
// }
|
|
347
|
+
// }
|
|
348
|
+
// }
|
|
349
|
+
// z = (z - correction) / sigma;
|
|
350
|
+
// //println!("z:{}", z);
|
|
351
|
+
// if alternative == 'g' {
|
|
352
|
+
// // Alternative "greater"
|
|
353
|
+
// //println!("greater:{}", n.cdf(weight_y));
|
|
354
|
+
// //1.0 - n.cdf(z) // Applying continuity correction
|
|
355
|
+
// r_mathlib::normal_cdf(z, 0.0, 1.0, false, false)
|
|
356
|
+
// } else if alternative == 'l' {
|
|
357
|
+
// // Alternative "lesser"
|
|
358
|
+
// //println!("lesser:{}", n.cdf(weight_x));
|
|
359
|
+
// //n.cdf(z) // Applying continuity coorection
|
|
360
|
+
// r_mathlib::normal_cdf(z, 0.0, 1.0, true, false)
|
|
361
|
+
// } else {
|
|
362
|
+
// // Alternative "two-sided"
|
|
363
|
+
// let p_g = r_mathlib::normal_cdf(z, 0.0, 1.0, false, false); // Applying continuity correction
|
|
364
|
+
// let p_l = r_mathlib::normal_cdf(z, 0.0, 1.0, true, false); // Applying continuity correction
|
|
365
|
+
// let mut p_value;
|
|
366
|
+
// if p_g < p_l {
|
|
367
|
+
// p_value = 2.0 * p_g;
|
|
368
|
+
// } else {
|
|
369
|
+
// p_value = 2.0 * p_l;
|
|
370
|
+
// }
|
|
371
|
+
// //println!("p_value:{}", p_value);
|
|
372
|
+
// if p_value > 1.0 {
|
|
373
|
+
// p_value = 1.0;
|
|
374
|
+
// }
|
|
375
|
+
// p_value
|
|
376
|
+
// }
|
|
377
|
+
// }
|
|
378
|
+
//}
|
|
379
|
+
|
|
380
|
+
//// To be used only when there are no ties in the input data
|
|
381
|
+
//#[allow(dead_code)]
|
|
382
|
+
//fn calculate_exact_probability(weight: f64, x: usize, y: usize, alternative: char) -> f64 {
|
|
383
|
+
// //println!("Using Wilcoxon CDF");
|
|
384
|
+
// let mut p_value;
|
|
385
|
+
// if alternative == 't' {
|
|
386
|
+
// if weight > ((x * y) as f64) / 2.0 {
|
|
387
|
+
// p_value = 2.0 * r_mathlib::wilcox_cdf(weight - 1.0, x as f64, y as f64, false, false);
|
|
388
|
+
// } else {
|
|
389
|
+
// p_value = 2.0 * r_mathlib::wilcox_cdf(weight, x as f64, y as f64, true, false);
|
|
390
|
+
// }
|
|
391
|
+
// if p_value > 1.0 {
|
|
392
|
+
// p_value = 1.0;
|
|
393
|
+
// }
|
|
394
|
+
// } else if alternative == 'g' {
|
|
395
|
+
// p_value = r_mathlib::wilcox_cdf(weight - 1.0, x as f64, y as f64, false, false);
|
|
396
|
+
// } else if alternative == 'l' {
|
|
397
|
+
// p_value = r_mathlib::wilcox_cdf(weight, x as f64, y as f64, true, false);
|
|
398
|
+
// } else {
|
|
399
|
+
// // Should not happen
|
|
400
|
+
// panic!("Unknown alternative option given, please check!");
|
|
401
|
+
// }
|
|
402
|
+
// //println!("p_value:{}", p_value);
|
|
403
|
+
// p_value
|
|
404
|
+
//}
|
|
405
|
+
//
|
|
406
|
+
//#[allow(dead_code)]
|
|
407
|
+
//pub fn calculate_frac_rank(current_rank: f64, num_repeats: f64) -> f64 {
|
|
408
|
+
// let mut sum = 0.0;
|
|
409
|
+
// for i in 0..num_repeats as usize {
|
|
410
|
+
// let rank = current_rank + i as f64;
|
|
411
|
+
// sum += rank;
|
|
412
|
+
// }
|
|
413
|
+
// sum / num_repeats
|
|
414
|
+
//}
|
package/src/wilcoxon.rs
CHANGED
|
@@ -44,10 +44,10 @@ This wilcoxon test implementation aims to copy the methodology used in R's wilco
|
|
|
44
44
|
*/
|
|
45
45
|
|
|
46
46
|
use json;
|
|
47
|
-
use r_stats;
|
|
48
47
|
use serde::{Deserialize, Serialize};
|
|
49
48
|
use std::io;
|
|
50
49
|
|
|
50
|
+
mod stats_functions; // Import Wilcoxon function
|
|
51
51
|
mod test_examples; // Contains examples to test the wilcoxon rank sum test
|
|
52
52
|
|
|
53
53
|
#[derive(Debug, Serialize, Deserialize)]
|
|
@@ -116,7 +116,7 @@ fn main() {
|
|
|
116
116
|
.unwrap();
|
|
117
117
|
output_string += &",".to_string();
|
|
118
118
|
} else {
|
|
119
|
-
let pvalue: f64 = wilcoxon_rank_sum_test(
|
|
119
|
+
let pvalue: f64 = stats_functions::wilcoxon_rank_sum_test(
|
|
120
120
|
vec1.clone(),
|
|
121
121
|
vec2.clone(),
|
|
122
122
|
THRESHOLD,
|
|
@@ -155,273 +155,3 @@ fn main() {
|
|
|
155
155
|
Err(error) => println!("Piping error: {}", error),
|
|
156
156
|
}
|
|
157
157
|
}
|
|
158
|
-
|
|
159
|
-
fn wilcoxon_rank_sum_test(
|
|
160
|
-
mut group1: Vec<f64>,
|
|
161
|
-
mut group2: Vec<f64>,
|
|
162
|
-
threshold: usize,
|
|
163
|
-
alternative: char,
|
|
164
|
-
correct: bool,
|
|
165
|
-
) -> f64 {
|
|
166
|
-
// Check if there are any ties between the two groups
|
|
167
|
-
|
|
168
|
-
let mut combined = group1.clone();
|
|
169
|
-
combined.extend(group2.iter().cloned());
|
|
170
|
-
combined.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
171
|
-
//println!("combined:{:?}", combined);
|
|
172
|
-
|
|
173
|
-
group1.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
174
|
-
group2.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
|
175
|
-
//println!("group1:{:?}", group1);
|
|
176
|
-
//println!("group2:{:?}", group2);
|
|
177
|
-
|
|
178
|
-
let mut group1_iter = 0;
|
|
179
|
-
let mut group2_iter = 0;
|
|
180
|
-
let mut xy = Vec::<char>::new(); // Stores X-Y classification
|
|
181
|
-
let mut ranks = Vec::<f64>::new(); // Stores the rank of each element
|
|
182
|
-
let mut is_repeat = false;
|
|
183
|
-
let mut repeat_present = false;
|
|
184
|
-
let mut frac_rank: f64 = 0.0;
|
|
185
|
-
let mut num_repeats: f64 = 1.0;
|
|
186
|
-
let mut repeat_iter: f64 = 1.0;
|
|
187
|
-
#[allow(unused_variables)]
|
|
188
|
-
let mut weight_x: f64 = 0.0;
|
|
189
|
-
let mut weight_y: f64 = 0.0;
|
|
190
|
-
let mut group_char: char = 'X';
|
|
191
|
-
let mut rank_frequencies = Vec::<f64>::new();
|
|
192
|
-
for i in 0..combined.len() {
|
|
193
|
-
//println!("group1_iter:{}", group1_iter);
|
|
194
|
-
//println!("group2_iter:{}", group2_iter);
|
|
195
|
-
//println!("item1:{}", combined[i]);
|
|
196
|
-
//println!("is_repeat:{}", is_repeat);
|
|
197
|
-
if group1_iter < group1.len() && combined[i] == group1[group1_iter] {
|
|
198
|
-
xy.push('X');
|
|
199
|
-
group1_iter += 1;
|
|
200
|
-
group_char = 'X';
|
|
201
|
-
} else if group2_iter < group2.len() && combined[i] == group2[group2_iter] {
|
|
202
|
-
xy.push('Y');
|
|
203
|
-
group2_iter += 1;
|
|
204
|
-
group_char = 'Y';
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
// Computing ranks
|
|
208
|
-
if is_repeat == false {
|
|
209
|
-
// Check if current element has other occurences
|
|
210
|
-
num_repeats = 1.0;
|
|
211
|
-
for j in i + 1..combined.len() {
|
|
212
|
-
if combined[i] == combined[j] {
|
|
213
|
-
is_repeat = true;
|
|
214
|
-
repeat_present = true;
|
|
215
|
-
repeat_iter = 1.0;
|
|
216
|
-
num_repeats += 1.0;
|
|
217
|
-
} else {
|
|
218
|
-
break;
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
//println!("num_repeats:{}", num_repeats);
|
|
222
|
-
if is_repeat == false {
|
|
223
|
-
ranks.push(i as f64 + 1.0);
|
|
224
|
-
if group_char == 'X' {
|
|
225
|
-
weight_x += i as f64 + 1.0;
|
|
226
|
-
} else if group_char == 'Y' {
|
|
227
|
-
weight_y += i as f64 + 1.0;
|
|
228
|
-
}
|
|
229
|
-
//rank_frequencies.push(RankFreq {
|
|
230
|
-
// rank: i as f64 + 1.0,
|
|
231
|
-
// freq: 1,
|
|
232
|
-
//});
|
|
233
|
-
rank_frequencies.push(1.0);
|
|
234
|
-
} else {
|
|
235
|
-
frac_rank = calculate_frac_rank(i as f64 + 1.0, num_repeats);
|
|
236
|
-
ranks.push(frac_rank);
|
|
237
|
-
if group_char == 'X' {
|
|
238
|
-
weight_x += frac_rank;
|
|
239
|
-
} else if group_char == 'Y' {
|
|
240
|
-
weight_y += frac_rank
|
|
241
|
-
}
|
|
242
|
-
//rank_frequencies.push(RankFreq {
|
|
243
|
-
// rank: frac_rank,
|
|
244
|
-
// freq: num_repeats as usize,
|
|
245
|
-
//});
|
|
246
|
-
rank_frequencies.push(num_repeats);
|
|
247
|
-
}
|
|
248
|
-
} else if repeat_iter < num_repeats {
|
|
249
|
-
// Repeat case
|
|
250
|
-
ranks.push(frac_rank);
|
|
251
|
-
repeat_iter += 1.0;
|
|
252
|
-
if group_char == 'X' {
|
|
253
|
-
weight_x += frac_rank;
|
|
254
|
-
} else if group_char == 'Y' {
|
|
255
|
-
weight_y += frac_rank
|
|
256
|
-
}
|
|
257
|
-
if repeat_iter == num_repeats {
|
|
258
|
-
is_repeat = false;
|
|
259
|
-
}
|
|
260
|
-
} else {
|
|
261
|
-
//println!("i:{}", i);
|
|
262
|
-
ranks.push(i as f64 + 1.0);
|
|
263
|
-
repeat_iter = 1.0;
|
|
264
|
-
num_repeats = 1.0;
|
|
265
|
-
if group_char == 'X' {
|
|
266
|
-
weight_x += i as f64 + 1.0;
|
|
267
|
-
} else if group_char == 'Y' {
|
|
268
|
-
weight_y += i as f64 + 1.0;
|
|
269
|
-
}
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
|
-
//println!("rank_frequencies:{:?}", rank_frequencies);
|
|
273
|
-
//println!("xy:{:?}", xy);
|
|
274
|
-
//println!("ranks:{:?}", ranks);
|
|
275
|
-
//println!("weight_x:{}", weight_x);
|
|
276
|
-
//println!("weight_y:{}", weight_y);
|
|
277
|
-
|
|
278
|
-
//u_dash (calculated below) calculates the "W Statistic" in wilcox.test function in R
|
|
279
|
-
|
|
280
|
-
let u_y = weight_y - (group2.len() as f64 * (group2.len() as f64 + 1.0) / 2.0) as f64;
|
|
281
|
-
let u_dash_y = (u_y - (group1.len() * group2.len()) as f64).abs();
|
|
282
|
-
//println!("u_dash_y:{}", u_dash_y);
|
|
283
|
-
|
|
284
|
-
let u_x = weight_x - (group1.len() as f64 * (group1.len() as f64 + 1.0) / 2.0) as f64;
|
|
285
|
-
let _u_dash_x = (u_x - (group1.len() * group2.len()) as f64).abs();
|
|
286
|
-
//println!("u_dash_x:{}", u_dash_x);
|
|
287
|
-
|
|
288
|
-
// Calculate test_statistic
|
|
289
|
-
|
|
290
|
-
//let t1 = weight_x - ((group1.len() as f64) * (group1.len() as f64 + 1.0)) / 2.0;
|
|
291
|
-
//let t2 = weight_y - ((group2.len() as f64) * (group2.len() as f64 + 1.0)) / 2.0;
|
|
292
|
-
//
|
|
293
|
-
//let mut test_statistic = t1;
|
|
294
|
-
//if t2 < t1 {
|
|
295
|
-
// test_statistic = t2;
|
|
296
|
-
//}
|
|
297
|
-
|
|
298
|
-
//println!("test_statistic:{}", test_statistic);
|
|
299
|
-
|
|
300
|
-
if group1.len() < threshold && group2.len() < threshold && repeat_present == false {
|
|
301
|
-
// Compute exact p-values
|
|
302
|
-
|
|
303
|
-
// Calculate conditional probability for weight_y
|
|
304
|
-
|
|
305
|
-
if alternative == 'g' {
|
|
306
|
-
// Alternative "greater"
|
|
307
|
-
//if group1.len() <= low_cutoff && group2.len() <= low_cutoff {
|
|
308
|
-
// iterate_exact_p_values(ranks, weight_y, group2.len())
|
|
309
|
-
//} else {
|
|
310
|
-
calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
311
|
-
//}
|
|
312
|
-
} else if alternative == 'l' {
|
|
313
|
-
// Alternative "lesser"
|
|
314
|
-
//if group1.len() <= low_cutoff && group2.len() <= low_cutoff {
|
|
315
|
-
// iterate_exact_p_values(ranks, weight_x, group1.len())
|
|
316
|
-
//} else {
|
|
317
|
-
calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
318
|
-
//}
|
|
319
|
-
} else {
|
|
320
|
-
// Two-sided distribution
|
|
321
|
-
calculate_exact_probability(u_dash_y, group1.len(), group2.len(), alternative)
|
|
322
|
-
}
|
|
323
|
-
} else {
|
|
324
|
-
// Compute p-values from a normal distribution
|
|
325
|
-
//println!("group1 length:{}", group1.len());
|
|
326
|
-
//println!("group2 length:{}", group2.len());
|
|
327
|
-
|
|
328
|
-
let mut z = u_dash_y - ((group1.len() * group2.len()) as f64) / 2.0;
|
|
329
|
-
//println!("z_original:{}", z);
|
|
330
|
-
let mut nties_sum: f64 = 0.0;
|
|
331
|
-
for i in 0..rank_frequencies.len() {
|
|
332
|
-
nties_sum += rank_frequencies[i] * rank_frequencies[i] * rank_frequencies[i]
|
|
333
|
-
- rank_frequencies[i];
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
let sigma = (((group1.len() * group2.len()) as f64) / 12.0
|
|
337
|
-
* ((group1.len() + group2.len() + 1) as f64
|
|
338
|
-
- nties_sum
|
|
339
|
-
/ (((group1.len() + group2.len()) as f64)
|
|
340
|
-
* ((group1.len() + group2.len() - 1) as f64))))
|
|
341
|
-
.sqrt();
|
|
342
|
-
//println!("sigma:{}", sigma);
|
|
343
|
-
let mut correction: f64 = 0.0;
|
|
344
|
-
if correct == true {
|
|
345
|
-
if alternative == 'g' {
|
|
346
|
-
// Alternative "greater"
|
|
347
|
-
correction = 0.5;
|
|
348
|
-
} else if alternative == 'g' {
|
|
349
|
-
// Alternative "lesser"
|
|
350
|
-
correction = -0.5;
|
|
351
|
-
} else {
|
|
352
|
-
// Alternative "two-sided"
|
|
353
|
-
if z > 0.0 {
|
|
354
|
-
correction = 0.5;
|
|
355
|
-
} else if z < 0.0 {
|
|
356
|
-
correction = -0.5;
|
|
357
|
-
} else {
|
|
358
|
-
// z=0
|
|
359
|
-
correction = 0.0;
|
|
360
|
-
}
|
|
361
|
-
}
|
|
362
|
-
}
|
|
363
|
-
z = (z - correction) / sigma;
|
|
364
|
-
//println!("z:{}", z);
|
|
365
|
-
if alternative == 'g' {
|
|
366
|
-
// Alternative "greater"
|
|
367
|
-
//println!("greater:{}", n.cdf(weight_y));
|
|
368
|
-
//1.0 - n.cdf(z) // Applying continuity correction
|
|
369
|
-
r_stats::normal_cdf(z, 0.0, 1.0, false, false)
|
|
370
|
-
} else if alternative == 'l' {
|
|
371
|
-
// Alternative "lesser"
|
|
372
|
-
//println!("lesser:{}", n.cdf(weight_x));
|
|
373
|
-
//n.cdf(z) // Applying continuity coorection
|
|
374
|
-
r_stats::normal_cdf(z, 0.0, 1.0, true, false)
|
|
375
|
-
} else {
|
|
376
|
-
// Alternative "two-sided"
|
|
377
|
-
let p_g = r_stats::normal_cdf(z, 0.0, 1.0, false, false); // Applying continuity correction
|
|
378
|
-
let p_l = r_stats::normal_cdf(z, 0.0, 1.0, true, false); // Applying continuity correction
|
|
379
|
-
let mut p_value;
|
|
380
|
-
if p_g < p_l {
|
|
381
|
-
p_value = 2.0 * p_g;
|
|
382
|
-
} else {
|
|
383
|
-
p_value = 2.0 * p_l;
|
|
384
|
-
}
|
|
385
|
-
//println!("p_value:{}", p_value);
|
|
386
|
-
if p_value > 1.0 {
|
|
387
|
-
p_value = 1.0;
|
|
388
|
-
}
|
|
389
|
-
p_value
|
|
390
|
-
}
|
|
391
|
-
}
|
|
392
|
-
}
|
|
393
|
-
|
|
394
|
-
// To be used only when there are no ties in the input data
|
|
395
|
-
fn calculate_exact_probability(weight: f64, x: usize, y: usize, alternative: char) -> f64 {
|
|
396
|
-
//println!("Using Wilcoxon CDF");
|
|
397
|
-
let mut p_value;
|
|
398
|
-
if alternative == 't' {
|
|
399
|
-
if weight > ((x * y) as f64) / 2.0 {
|
|
400
|
-
p_value = 2.0 * r_stats::wilcox_cdf(weight - 1.0, x as f64, y as f64, false, false);
|
|
401
|
-
} else {
|
|
402
|
-
p_value = 2.0 * r_stats::wilcox_cdf(weight, x as f64, y as f64, true, false);
|
|
403
|
-
}
|
|
404
|
-
if p_value > 1.0 {
|
|
405
|
-
p_value = 1.0;
|
|
406
|
-
}
|
|
407
|
-
} else if alternative == 'g' {
|
|
408
|
-
p_value = r_stats::wilcox_cdf(weight - 1.0, x as f64, y as f64, false, false);
|
|
409
|
-
} else if alternative == 'l' {
|
|
410
|
-
p_value = r_stats::wilcox_cdf(weight, x as f64, y as f64, true, false);
|
|
411
|
-
} else {
|
|
412
|
-
// Should not happen
|
|
413
|
-
panic!("Unknown alternative option given, please check!");
|
|
414
|
-
}
|
|
415
|
-
//println!("p_value:{}", p_value);
|
|
416
|
-
p_value
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
fn calculate_frac_rank(current_rank: f64, num_repeats: f64) -> f64 {
|
|
420
|
-
let mut sum = 0.0;
|
|
421
|
-
for i in 0..num_repeats as usize {
|
|
422
|
-
let rank = current_rank + i as f64;
|
|
423
|
-
sum += rank;
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
sum / num_repeats
|
|
427
|
-
}
|