risk-network 0.0.11__py3-none-any.whl → 0.0.12b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. risk/__init__.py +1 -1
  2. risk/risk.py +5 -5
  3. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/METADATA +10 -12
  4. risk_network-0.0.12b0.dist-info/RECORD +7 -0
  5. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/WHEEL +1 -1
  6. risk/annotations/__init__.py +0 -7
  7. risk/annotations/annotations.py +0 -354
  8. risk/annotations/io.py +0 -240
  9. risk/annotations/nltk_setup.py +0 -85
  10. risk/log/__init__.py +0 -11
  11. risk/log/console.py +0 -141
  12. risk/log/parameters.py +0 -172
  13. risk/neighborhoods/__init__.py +0 -8
  14. risk/neighborhoods/api.py +0 -442
  15. risk/neighborhoods/community.py +0 -412
  16. risk/neighborhoods/domains.py +0 -358
  17. risk/neighborhoods/neighborhoods.py +0 -508
  18. risk/network/__init__.py +0 -6
  19. risk/network/geometry.py +0 -150
  20. risk/network/graph/__init__.py +0 -6
  21. risk/network/graph/api.py +0 -200
  22. risk/network/graph/graph.py +0 -269
  23. risk/network/graph/summary.py +0 -254
  24. risk/network/io.py +0 -550
  25. risk/network/plotter/__init__.py +0 -6
  26. risk/network/plotter/api.py +0 -54
  27. risk/network/plotter/canvas.py +0 -291
  28. risk/network/plotter/contour.py +0 -330
  29. risk/network/plotter/labels.py +0 -924
  30. risk/network/plotter/network.py +0 -294
  31. risk/network/plotter/plotter.py +0 -143
  32. risk/network/plotter/utils/colors.py +0 -416
  33. risk/network/plotter/utils/layout.py +0 -94
  34. risk/stats/__init__.py +0 -15
  35. risk/stats/permutation/__init__.py +0 -6
  36. risk/stats/permutation/permutation.py +0 -237
  37. risk/stats/permutation/test_functions.py +0 -70
  38. risk/stats/significance.py +0 -166
  39. risk/stats/stat_tests.py +0 -267
  40. risk_network-0.0.11.dist-info/RECORD +0 -41
  41. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info/licenses}/LICENSE +0 -0
  42. {risk_network-0.0.11.dist-info → risk_network-0.0.12b0.dist-info}/top_level.txt +0 -0
@@ -1,237 +0,0 @@
1
- """
2
- risk/stats/permutation/permutation
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- from multiprocessing import get_context, Manager
7
- from multiprocessing.managers import ValueProxy
8
- from typing import Any, Callable, Dict, List, Tuple, Union
9
-
10
- import numpy as np
11
- from scipy.sparse import csr_matrix
12
- from threadpoolctl import threadpool_limits
13
- from tqdm import tqdm
14
-
15
- from risk.stats.permutation.test_functions import DISPATCH_TEST_FUNCTIONS
16
-
17
-
18
- def compute_permutation_test(
19
- neighborhoods: csr_matrix,
20
- annotations: csr_matrix,
21
- score_metric: str = "sum",
22
- null_distribution: str = "network",
23
- num_permutations: int = 1000,
24
- random_seed: int = 888,
25
- max_workers: int = 1,
26
- ) -> Dict[str, Any]:
27
- """Compute permutation test for enrichment and depletion in neighborhoods.
28
-
29
- Args:
30
- neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
31
- annotations (csr_matrix): Sparse binary matrix representing annotations.
32
- score_metric (str, optional): Metric to use for scoring ('sum' or 'stdev'). Defaults to "sum".
33
- null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
34
- num_permutations (int, optional): Number of permutations to run. Defaults to 1000.
35
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
36
- max_workers (int, optional): Number of workers for multiprocessing. Defaults to 1.
37
-
38
- Returns:
39
- Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
40
- """
41
- # Ensure that the matrices are in the correct format and free of NaN values
42
- # NOTE: Keep the data type as float32 to avoid locking issues with dot product operations
43
- neighborhoods = neighborhoods.astype(np.float32)
44
- annotations = annotations.astype(np.float32)
45
- # Retrieve the appropriate neighborhood score function based on the metric
46
- neighborhood_score_func = DISPATCH_TEST_FUNCTIONS[score_metric]
47
-
48
- # Run the permutation test to calculate depletion and enrichment counts
49
- counts_depletion, counts_enrichment = _run_permutation_test(
50
- neighborhoods=neighborhoods,
51
- annotations=annotations,
52
- neighborhood_score_func=neighborhood_score_func,
53
- null_distribution=null_distribution,
54
- num_permutations=num_permutations,
55
- random_seed=random_seed,
56
- max_workers=max_workers,
57
- )
58
- # Compute p-values for depletion and enrichment
59
- # If counts are 0, set p-value to 1/num_permutations to avoid zero p-values
60
- depletion_pvals = np.maximum(counts_depletion, 1) / num_permutations
61
- enrichment_pvals = np.maximum(counts_enrichment, 1) / num_permutations
62
-
63
- return {
64
- "depletion_pvals": depletion_pvals,
65
- "enrichment_pvals": enrichment_pvals,
66
- }
67
-
68
-
69
- def _run_permutation_test(
70
- neighborhoods: csr_matrix,
71
- annotations: csr_matrix,
72
- neighborhood_score_func: Callable,
73
- null_distribution: str = "network",
74
- num_permutations: int = 1000,
75
- random_seed: int = 888,
76
- max_workers: int = 4,
77
- ) -> tuple:
78
- """Run the permutation test to calculate depletion and enrichment counts.
79
-
80
- Args:
81
- neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
82
- annotations (csr_matrix): Sparse binary matrix representing annotations.
83
- neighborhood_score_func (Callable): Function to calculate neighborhood scores.
84
- null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
85
- num_permutations (int, optional): Number of permutations. Defaults to 1000.
86
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
87
- max_workers (int, optional): Number of workers for multiprocessing. Defaults to 4.
88
-
89
- Returns:
90
- tuple: Depletion and enrichment counts.
91
- """
92
- # Initialize the RNG for reproducibility
93
- rng = np.random.default_rng(seed=random_seed)
94
- # Determine the indices to use based on the null distribution type
95
- if null_distribution == "network":
96
- idxs = range(annotations.shape[0])
97
- elif null_distribution == "annotations":
98
- idxs = np.nonzero(annotations.getnnz(axis=1) > 0)[0]
99
- else:
100
- raise ValueError(
101
- "Invalid null_distribution value. Choose either 'network' or 'annotations'."
102
- )
103
-
104
- # Replace NaNs with zeros in the sparse annotations matrix
105
- annotations.data[np.isnan(annotations.data)] = 0
106
- annotation_matrix_obsv = annotations[idxs]
107
- neighborhoods_matrix_obsv = neighborhoods.T[idxs].T
108
- # Calculate observed neighborhood scores
109
- with np.errstate(invalid="ignore", divide="ignore"):
110
- observed_neighborhood_scores = neighborhood_score_func(
111
- neighborhoods_matrix_obsv, annotation_matrix_obsv
112
- )
113
-
114
- # Initialize count matrices for depletion and enrichment
115
- counts_depletion = np.zeros(observed_neighborhood_scores.shape)
116
- counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
117
- # Determine the number of permutations to run in each worker process
118
- subset_size = num_permutations // max_workers
119
- remainder = num_permutations % max_workers
120
-
121
- # Use the spawn context for creating a new multiprocessing pool
122
- ctx = get_context("spawn")
123
- manager = Manager()
124
- progress_counter = manager.Value("i", 0)
125
- total_progress = num_permutations
126
-
127
- # Generate precomputed permutations
128
- permutations = [rng.permutation(idxs) for _ in range(num_permutations)]
129
- # Divide permutations into batches for workers
130
- batch_size = subset_size + (1 if remainder > 0 else 0)
131
- permutation_batches = [
132
- permutations[i * batch_size : (i + 1) * batch_size] for i in range(max_workers)
133
- ]
134
-
135
- # Execute the permutation test using multiprocessing
136
- with ctx.Pool(max_workers) as pool:
137
- with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
138
- # Prepare parameters for multiprocessing
139
- params_list = [
140
- (
141
- permutation_batches[i], # Pass the batch of precomputed permutations
142
- annotations,
143
- neighborhoods_matrix_obsv,
144
- observed_neighborhood_scores,
145
- neighborhood_score_func,
146
- num_permutations,
147
- progress_counter,
148
- max_workers,
149
- )
150
- for i in range(max_workers)
151
- ]
152
-
153
- # Start the permutation process in parallel
154
- results = pool.starmap_async(_permutation_process_batch, params_list, chunksize=1)
155
-
156
- # Update progress bar based on progress_counter
157
- while not results.ready():
158
- progress.update(progress_counter.value - progress.n)
159
- results.wait(0.1) # Wait for 100ms
160
- # Ensure progress bar reaches 100%
161
- progress.update(total_progress - progress.n)
162
-
163
- # Accumulate results from each worker
164
- for local_counts_depletion, local_counts_enrichment in results.get():
165
- counts_depletion = np.add(counts_depletion, local_counts_depletion)
166
- counts_enrichment = np.add(counts_enrichment, local_counts_enrichment)
167
-
168
- return counts_depletion, counts_enrichment
169
-
170
-
171
- def _permutation_process_batch(
172
- permutations: Union[List, Tuple, np.ndarray],
173
- annotation_matrix: csr_matrix,
174
- neighborhoods_matrix_obsv: csr_matrix,
175
- observed_neighborhood_scores: np.ndarray,
176
- neighborhood_score_func: Callable,
177
- num_permutations: int,
178
- progress_counter: ValueProxy,
179
- max_workers: int,
180
- ) -> tuple:
181
- """Process a batch of permutations in a worker process.
182
-
183
- Args:
184
- permutations (Union[List, Tuple, np.ndarray]): Permutation batch to process.
185
- annotation_matrix (csr_matrix): Sparse binary matrix representing annotations.
186
- neighborhoods_matrix_obsv (csr_matrix): Sparse binary matrix representing observed neighborhoods.
187
- observed_neighborhood_scores (np.ndarray): Observed neighborhood scores.
188
- neighborhood_score_func (Callable): Function to calculate neighborhood scores.
189
- num_permutations (int): Number of total permutations across all subsets.
190
- progress_counter (multiprocessing.managers.ValueProxy): Shared counter for tracking progress.
191
- max_workers (int): Number of workers for multiprocessing.
192
-
193
- Returns:
194
- tuple: Local counts of depletion and enrichment.
195
- """
196
- # Initialize local count matrices for this worker
197
- local_counts_depletion = np.zeros(observed_neighborhood_scores.shape)
198
- local_counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
199
-
200
- # Limit the number of threads used by NumPy's BLAS implementation to 1 when more than one worker is used
201
- # NOTE: This does not work for Mac M chips due to a bug in the threadpoolctl package
202
- # This is currently a known issue and is being addressed by the maintainers [https://github.com/joblib/threadpoolctl/issues/135]
203
- limits = None if max_workers == 1 else 1
204
- with threadpool_limits(limits=limits, user_api="blas"):
205
- # Initialize a local counter for batched progress updates
206
- local_progress = 0
207
- # Calculate the modulo value based on total permutations for 1/100th frequency updates
208
- modulo_value = max(1, num_permutations // 100)
209
-
210
- for permuted_idxs in permutations:
211
- # Apply precomputed permutation
212
- annotation_matrix_permut = annotation_matrix[permuted_idxs]
213
- # Calculate permuted neighborhood scores
214
- with np.errstate(invalid="ignore", divide="ignore"):
215
- permuted_neighborhood_scores = neighborhood_score_func(
216
- neighborhoods_matrix_obsv, annotation_matrix_permut
217
- )
218
-
219
- # Update local depletion and enrichment counts
220
- local_counts_depletion = np.add(
221
- local_counts_depletion, permuted_neighborhood_scores <= observed_neighborhood_scores
222
- )
223
- local_counts_enrichment = np.add(
224
- local_counts_enrichment,
225
- permuted_neighborhood_scores >= observed_neighborhood_scores,
226
- )
227
-
228
- # Update progress
229
- local_progress += 1
230
- if local_progress % modulo_value == 0:
231
- progress_counter.value += modulo_value
232
-
233
- # Final progress update for any remaining iterations
234
- if local_progress % modulo_value != 0:
235
- progress_counter.value += modulo_value
236
-
237
- return local_counts_depletion, local_counts_enrichment
@@ -1,70 +0,0 @@
1
- """
2
- risk/stats/permutation/test_functions
3
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- import numpy as np
7
- from scipy.sparse import csr_matrix
8
-
9
- # NOTE: Cython optimizations provided minimal performance benefits.
10
- # The final version with Cython is archived in the `cython_permutation` branch.
11
-
12
- # DISPATCH_TEST_FUNCTIONS can be found at the end of the file.
13
-
14
-
15
- def compute_neighborhood_score_by_sum(
16
- neighborhoods_matrix: csr_matrix, annotation_matrix: csr_matrix
17
- ) -> np.ndarray:
18
- """Compute the sum of attribute values for each neighborhood using sparse matrices.
19
-
20
- Args:
21
- neighborhoods_matrix (csr_matrix): Sparse binary matrix representing neighborhoods.
22
- annotation_matrix (csr_matrix): Sparse matrix representing annotation values.
23
-
24
- Returns:
25
- np.ndarray: Dense array of summed attribute values for each neighborhood.
26
- """
27
- # Calculate the neighborhood score as the dot product of neighborhoods and annotations
28
- neighborhood_score = neighborhoods_matrix @ annotation_matrix # Sparse matrix multiplication
29
- # Convert the result to a dense array for downstream calculations
30
- neighborhood_score_dense = neighborhood_score.toarray()
31
- return neighborhood_score_dense
32
-
33
-
34
- def compute_neighborhood_score_by_stdev(
35
- neighborhoods_matrix: csr_matrix, annotation_matrix: csr_matrix
36
- ) -> np.ndarray:
37
- """Compute the standard deviation of neighborhood scores for sparse matrices.
38
-
39
- Args:
40
- neighborhoods_matrix (csr_matrix): Sparse binary matrix representing neighborhoods.
41
- annotation_matrix (csr_matrix): Sparse matrix representing annotation values.
42
-
43
- Returns:
44
- np.ndarray: Standard deviation of the neighborhood scores.
45
- """
46
- # Calculate the neighborhood score as the dot product of neighborhoods and annotations
47
- neighborhood_score = neighborhoods_matrix @ annotation_matrix # Sparse matrix multiplication
48
- # Calculate the number of elements in each neighborhood (sum of rows)
49
- N = neighborhoods_matrix.sum(axis=1).A.flatten() # Convert to 1D array
50
- # Avoid division by zero by replacing zeros in N with np.nan temporarily
51
- N[N == 0] = np.nan
52
- # Compute the mean of the neighborhood scores
53
- M = neighborhood_score.multiply(1 / N[:, None]).toarray() # Sparse element-wise division
54
- # Compute the mean of squares (EXX) directly using squared annotation matrix
55
- annotation_squared = annotation_matrix.multiply(annotation_matrix) # Element-wise squaring
56
- EXX = (neighborhoods_matrix @ annotation_squared).multiply(1 / N[:, None]).toarray()
57
- # Calculate variance as EXX - M^2
58
- variance = EXX - np.power(M, 2)
59
- # Compute the standard deviation as the square root of the variance
60
- neighborhood_stdev = np.sqrt(variance)
61
- # Replace np.nan back with zeros in case N was 0 (no elements in the neighborhood)
62
- neighborhood_stdev[np.isnan(neighborhood_stdev)] = 0
63
- return neighborhood_stdev
64
-
65
-
66
- # Dictionary to dispatch statistical test functions based on the score metric
67
- DISPATCH_TEST_FUNCTIONS = {
68
- "sum": compute_neighborhood_score_by_sum,
69
- "stdev": compute_neighborhood_score_by_stdev,
70
- }
@@ -1,166 +0,0 @@
1
- """
2
- risk/stats/significance
3
- ~~~~~~~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- from typing import Any, Dict, Union
7
-
8
- import numpy as np
9
- from statsmodels.stats.multitest import fdrcorrection
10
-
11
-
12
- def calculate_significance_matrices(
13
- depletion_pvals: np.ndarray,
14
- enrichment_pvals: np.ndarray,
15
- tail: str = "right",
16
- pval_cutoff: float = 0.05,
17
- fdr_cutoff: float = 0.05,
18
- ) -> Dict[str, Any]:
19
- """Calculate significance matrices based on p-values and specified tail.
20
-
21
- Args:
22
- depletion_pvals (np.ndarray): Matrix of depletion p-values.
23
- enrichment_pvals (np.ndarray): Matrix of enrichment p-values.
24
- tail (str, optional): The tail type for significance selection ('left', 'right', 'both'). Defaults to 'right'.
25
- pval_cutoff (float, optional): Cutoff for p-value significance. Defaults to 0.05.
26
- fdr_cutoff (float, optional): Cutoff for FDR significance if applied. Defaults to 0.05.
27
-
28
- Returns:
29
- Dict[str, Any]: Dictionary containing the enrichment matrix, binary significance matrix,
30
- and the matrix of significant enrichment values.
31
- """
32
- if fdr_cutoff < 1.0:
33
- # Apply FDR correction to depletion p-values
34
- depletion_qvals = np.apply_along_axis(fdrcorrection, 1, depletion_pvals)[:, 1, :]
35
- depletion_alpha_threshold_matrix = _compute_threshold_matrix(
36
- depletion_pvals, depletion_qvals, pval_cutoff=pval_cutoff, fdr_cutoff=fdr_cutoff
37
- )
38
- # Compute the depletion matrix using both q-values and p-values
39
- depletion_matrix = (depletion_qvals**2) * (depletion_pvals**0.5)
40
-
41
- # Apply FDR correction to enrichment p-values
42
- enrichment_qvals = np.apply_along_axis(fdrcorrection, 1, enrichment_pvals)[:, 1, :]
43
- enrichment_alpha_threshold_matrix = _compute_threshold_matrix(
44
- enrichment_pvals, enrichment_qvals, pval_cutoff=pval_cutoff, fdr_cutoff=fdr_cutoff
45
- )
46
- # Compute the enrichment matrix using both q-values and p-values
47
- enrichment_matrix = (enrichment_pvals**0.5) * (enrichment_qvals**2)
48
- else:
49
- # Compute threshold matrices based on p-value cutoffs only
50
- depletion_alpha_threshold_matrix = _compute_threshold_matrix(
51
- depletion_pvals, pval_cutoff=pval_cutoff
52
- )
53
- depletion_matrix = depletion_pvals
54
-
55
- enrichment_alpha_threshold_matrix = _compute_threshold_matrix(
56
- enrichment_pvals, pval_cutoff=pval_cutoff
57
- )
58
- enrichment_matrix = enrichment_pvals
59
-
60
- # Apply a negative log10 transformation for visualization purposes
61
- log_depletion_matrix = -np.log10(depletion_matrix)
62
- log_enrichment_matrix = -np.log10(enrichment_matrix)
63
-
64
- # Select the appropriate significance matrices based on the specified tail
65
- significance_matrix, significant_binary_significance_matrix = _select_significance_matrices(
66
- tail,
67
- log_depletion_matrix,
68
- depletion_alpha_threshold_matrix,
69
- log_enrichment_matrix,
70
- enrichment_alpha_threshold_matrix,
71
- )
72
-
73
- # Filter the enrichment matrix using the binary significance matrix
74
- significant_significance_matrix = np.where(
75
- significant_binary_significance_matrix == 1, significance_matrix, 0
76
- )
77
-
78
- return {
79
- "significance_matrix": significance_matrix,
80
- "significant_significance_matrix": significant_significance_matrix,
81
- "significant_binary_significance_matrix": significant_binary_significance_matrix,
82
- }
83
-
84
-
85
- def _select_significance_matrices(
86
- tail: str,
87
- log_depletion_matrix: np.ndarray,
88
- depletion_alpha_threshold_matrix: np.ndarray,
89
- log_enrichment_matrix: np.ndarray,
90
- enrichment_alpha_threshold_matrix: np.ndarray,
91
- ) -> tuple:
92
- """Select significance matrices based on the specified tail type.
93
-
94
- Args:
95
- tail (str): The tail type for significance selection. Options are 'left', 'right', or 'both'.
96
- log_depletion_matrix (np.ndarray): Matrix of log-transformed depletion values.
97
- depletion_alpha_threshold_matrix (np.ndarray): Alpha threshold matrix for depletion significance.
98
- log_enrichment_matrix (np.ndarray): Matrix of log-transformed enrichment values.
99
- enrichment_alpha_threshold_matrix (np.ndarray): Alpha threshold matrix for enrichment significance.
100
-
101
- Returns:
102
- tuple: A tuple containing the selected enrichment matrix and binary significance matrix.
103
-
104
- Raises:
105
- ValueError: If the provided tail type is not 'left', 'right', or 'both'.
106
- """
107
- if tail not in {"left", "right", "both"}:
108
- raise ValueError("Invalid value for 'tail'. Must be 'left', 'right', or 'both'.")
109
-
110
- if tail == "left":
111
- # Select depletion matrix and corresponding alpha threshold for left-tail analysis
112
- significance_matrix = -log_depletion_matrix
113
- alpha_threshold_matrix = depletion_alpha_threshold_matrix
114
- elif tail == "right":
115
- # Select enrichment matrix and corresponding alpha threshold for right-tail analysis
116
- significance_matrix = log_enrichment_matrix
117
- alpha_threshold_matrix = enrichment_alpha_threshold_matrix
118
- elif tail == "both":
119
- # Select the matrix with the highest absolute values while preserving the sign
120
- significance_matrix = np.where(
121
- np.abs(log_depletion_matrix) >= np.abs(log_enrichment_matrix),
122
- -log_depletion_matrix,
123
- log_enrichment_matrix,
124
- )
125
- # Combine alpha thresholds using a logical OR operation
126
- alpha_threshold_matrix = np.logical_or(
127
- depletion_alpha_threshold_matrix, enrichment_alpha_threshold_matrix
128
- )
129
- else:
130
- raise ValueError("Invalid value for 'tail'. Must be 'left', 'right', or 'both'.")
131
-
132
- # Create a binary significance matrix where valid indices meet the alpha threshold
133
- valid_idxs = ~np.isnan(alpha_threshold_matrix)
134
- significant_binary_significance_matrix = np.zeros(alpha_threshold_matrix.shape)
135
- significant_binary_significance_matrix[valid_idxs] = alpha_threshold_matrix[valid_idxs]
136
-
137
- return significance_matrix, significant_binary_significance_matrix
138
-
139
-
140
- def _compute_threshold_matrix(
141
- pvals: np.ndarray,
142
- fdr_pvals: Union[np.ndarray, None] = None,
143
- pval_cutoff: float = 0.05,
144
- fdr_cutoff: float = 0.05,
145
- ) -> np.ndarray:
146
- """Compute a threshold matrix indicating significance based on p-value and FDR cutoffs.
147
-
148
- Args:
149
- pvals (np.ndarray): Array of p-values for statistical tests.
150
- fdr_pvals (np.ndarray, optional): Array of FDR-corrected p-values corresponding to the p-values. Defaults to None.
151
- pval_cutoff (float, optional): Cutoff for p-value significance. Defaults to 0.05.
152
- fdr_cutoff (float, optional): Cutoff for FDR significance. Defaults to 0.05.
153
-
154
- Returns:
155
- np.ndarray: A threshold matrix where 1 indicates significance based on the provided cutoffs, 0 otherwise.
156
- """
157
- if fdr_pvals is not None:
158
- # Compute the threshold matrix based on both p-value and FDR cutoffs
159
- pval_below_cutoff = pvals <= pval_cutoff
160
- fdr_below_cutoff = fdr_pvals <= fdr_cutoff
161
- threshold_matrix = np.logical_and(pval_below_cutoff, fdr_below_cutoff).astype(int)
162
- else:
163
- # Compute the threshold matrix based only on p-value cutoff
164
- threshold_matrix = (pvals <= pval_cutoff).astype(int)
165
-
166
- return threshold_matrix