risk-network 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- risk/__init__.py +13 -0
- risk/annotations/__init__.py +7 -0
- risk/annotations/annotations.py +259 -0
- risk/annotations/io.py +183 -0
- risk/constants.py +31 -0
- risk/log/__init__.py +9 -0
- risk/log/console.py +16 -0
- risk/log/params.py +195 -0
- risk/neighborhoods/__init__.py +10 -0
- risk/neighborhoods/community.py +189 -0
- risk/neighborhoods/domains.py +257 -0
- risk/neighborhoods/neighborhoods.py +319 -0
- risk/network/__init__.py +8 -0
- risk/network/geometry.py +165 -0
- risk/network/graph.py +280 -0
- risk/network/io.py +319 -0
- risk/network/plot.py +795 -0
- risk/risk.py +379 -0
- risk/stats/__init__.py +6 -0
- risk/stats/permutation.py +88 -0
- risk/stats/stats.py +373 -0
- risk_network-0.0.3.dist-info/LICENSE +674 -0
- risk_network-0.0.3.dist-info/METADATA +751 -0
- risk_network-0.0.3.dist-info/RECORD +26 -0
- risk_network-0.0.3.dist-info/WHEEL +5 -0
- risk_network-0.0.3.dist-info/top_level.txt +1 -0
risk/stats/stats.py
ADDED
@@ -0,0 +1,373 @@
|
|
1
|
+
"""
|
2
|
+
risk/stats/stats
|
3
|
+
~~~~~~~~~~~~~~~~
|
4
|
+
"""
|
5
|
+
|
6
|
+
from multiprocessing import get_context, Manager
|
7
|
+
from tqdm import tqdm
|
8
|
+
from typing import Any, Callable, Dict, Union
|
9
|
+
|
10
|
+
import numpy as np
|
11
|
+
from statsmodels.stats.multitest import fdrcorrection
|
12
|
+
from threadpoolctl import threadpool_limits
|
13
|
+
|
14
|
+
from risk.stats.permutation import (
|
15
|
+
compute_neighborhood_score_by_sum,
|
16
|
+
compute_neighborhood_score_by_stdev,
|
17
|
+
compute_neighborhood_score_by_z_score,
|
18
|
+
)
|
19
|
+
|
20
|
+
DISPATCH_PERMUTATION_TABLE = {
|
21
|
+
"sum": compute_neighborhood_score_by_sum,
|
22
|
+
"stdev": compute_neighborhood_score_by_stdev,
|
23
|
+
"z_score": compute_neighborhood_score_by_z_score,
|
24
|
+
}
|
25
|
+
|
26
|
+
|
27
|
+
def compute_permutation(
|
28
|
+
neighborhoods: np.ndarray,
|
29
|
+
annotations: np.ndarray,
|
30
|
+
score_metric: str = "sum",
|
31
|
+
null_distribution: str = "network",
|
32
|
+
num_permutations: int = 1000,
|
33
|
+
random_seed: int = 888,
|
34
|
+
max_workers: int = 1,
|
35
|
+
) -> Dict[str, Any]:
|
36
|
+
"""Compute permutation test for enrichment and depletion in neighborhoods.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
|
40
|
+
annotations (np.ndarray): Binary matrix representing annotations.
|
41
|
+
score_metric (str, optional): Metric to use for scoring ('sum', 'mean', etc.). Defaults to "sum".
|
42
|
+
null_distribution (str, optional): Type of null distribution ('network' or other). Defaults to "network".
|
43
|
+
num_permutations (int, optional): Number of permutations to run. Defaults to 1000.
|
44
|
+
random_seed (int, optional): Seed for random number generation. Defaults to 888.
|
45
|
+
max_workers (int, optional): Number of workers for multiprocessing. Defaults to 1.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
dict: Dictionary containing depletion and enrichment p-values.
|
49
|
+
"""
|
50
|
+
# Ensure that the matrices are in the correct format and free of NaN values
|
51
|
+
neighborhoods = neighborhoods.astype(np.float32)
|
52
|
+
annotations = annotations.astype(np.float32)
|
53
|
+
# Retrieve the appropriate neighborhood score function based on the metric
|
54
|
+
neighborhood_score_func = DISPATCH_PERMUTATION_TABLE[score_metric]
|
55
|
+
|
56
|
+
# Run the permutation test to calculate depletion and enrichment counts
|
57
|
+
counts_depletion, counts_enrichment = _run_permutation_test(
|
58
|
+
neighborhoods=neighborhoods,
|
59
|
+
annotations=annotations,
|
60
|
+
neighborhood_score_func=neighborhood_score_func,
|
61
|
+
null_distribution=null_distribution,
|
62
|
+
num_permutations=num_permutations,
|
63
|
+
random_seed=random_seed,
|
64
|
+
max_workers=max_workers,
|
65
|
+
)
|
66
|
+
|
67
|
+
# Compute p-values for depletion and enrichment
|
68
|
+
# If counts are 0, set p-value to 1/num_permutations to avoid zero p-values
|
69
|
+
depletion_pvals = np.maximum(counts_depletion, 1) / num_permutations
|
70
|
+
enrichment_pvals = np.maximum(counts_enrichment, 1) / num_permutations
|
71
|
+
|
72
|
+
return {
|
73
|
+
"depletion_pvals": depletion_pvals,
|
74
|
+
"enrichment_pvals": enrichment_pvals,
|
75
|
+
}
|
76
|
+
|
77
|
+
|
78
|
+
def _run_permutation_test(
|
79
|
+
neighborhoods: np.ndarray,
|
80
|
+
annotations: np.ndarray,
|
81
|
+
neighborhood_score_func: Callable,
|
82
|
+
null_distribution: str = "network",
|
83
|
+
num_permutations: int = 1000,
|
84
|
+
random_seed: int = 888,
|
85
|
+
max_workers: int = 4,
|
86
|
+
) -> tuple:
|
87
|
+
"""Run a permutation test to calculate enrichment and depletion counts.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
neighborhoods (np.ndarray): The neighborhood matrix.
|
91
|
+
annotations (np.ndarray): The annotation matrix.
|
92
|
+
neighborhood_score_func (Callable): Function to calculate neighborhood scores.
|
93
|
+
null_distribution (str, optional): Type of null distribution. Defaults to "network".
|
94
|
+
num_permutations (int, optional): Number of permutations. Defaults to 1000.
|
95
|
+
random_seed (int, optional): Seed for random number generation. Defaults to 888.
|
96
|
+
max_workers (int, optional): Number of workers for multiprocessing. Defaults to 4.
|
97
|
+
|
98
|
+
Returns:
|
99
|
+
tuple: Depletion and enrichment counts.
|
100
|
+
"""
|
101
|
+
# Set the random seed for reproducibility
|
102
|
+
np.random.seed(random_seed)
|
103
|
+
|
104
|
+
# Determine the indices to use based on the null distribution type
|
105
|
+
if null_distribution == "network":
|
106
|
+
idxs = range(annotations.shape[0])
|
107
|
+
else:
|
108
|
+
idxs = np.nonzero(np.sum(~np.isnan(annotations), axis=1))[0]
|
109
|
+
|
110
|
+
# Replace NaNs with zeros in the annotations matrix
|
111
|
+
annotations[np.isnan(annotations)] = 0
|
112
|
+
annotation_matrix_obsv = annotations[idxs]
|
113
|
+
neighborhoods_matrix_obsv = neighborhoods.T[idxs].T
|
114
|
+
|
115
|
+
# Calculate observed neighborhood scores
|
116
|
+
with np.errstate(invalid="ignore", divide="ignore"):
|
117
|
+
observed_neighborhood_scores = neighborhood_score_func(
|
118
|
+
neighborhoods_matrix_obsv, annotation_matrix_obsv
|
119
|
+
)
|
120
|
+
|
121
|
+
# Initialize count matrices for depletion and enrichment
|
122
|
+
counts_depletion = np.zeros(observed_neighborhood_scores.shape)
|
123
|
+
counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
|
124
|
+
|
125
|
+
# Determine the number of permutations to run in each worker process
|
126
|
+
subset_size = num_permutations // max_workers
|
127
|
+
remainder = num_permutations % max_workers
|
128
|
+
|
129
|
+
# Use the spawn context for creating a new multiprocessing pool
|
130
|
+
ctx = get_context("spawn")
|
131
|
+
manager = Manager()
|
132
|
+
progress_counter = manager.Value("i", 0)
|
133
|
+
total_progress = num_permutations
|
134
|
+
|
135
|
+
# Execute the permutation test using multiprocessing
|
136
|
+
with ctx.Pool(max_workers) as pool:
|
137
|
+
with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
|
138
|
+
# Prepare parameters for multiprocessing
|
139
|
+
params_list = [
|
140
|
+
(
|
141
|
+
annotations,
|
142
|
+
np.array(idxs),
|
143
|
+
neighborhoods_matrix_obsv,
|
144
|
+
observed_neighborhood_scores,
|
145
|
+
neighborhood_score_func,
|
146
|
+
subset_size + (1 if i < remainder else 0),
|
147
|
+
progress_counter,
|
148
|
+
)
|
149
|
+
for i in range(max_workers)
|
150
|
+
]
|
151
|
+
|
152
|
+
# Start the permutation process in parallel
|
153
|
+
results = pool.starmap_async(_permutation_process_subset, params_list, chunksize=1)
|
154
|
+
|
155
|
+
# Update progress bar based on progress_counter
|
156
|
+
# NOTE: Waiting for results to be ready while updating progress bar gives a big improvement
|
157
|
+
# in performance, especially for large number of permutations and workers
|
158
|
+
while not results.ready():
|
159
|
+
progress.update(progress_counter.value - progress.n)
|
160
|
+
results.wait(0.05) # Wait for 50ms
|
161
|
+
|
162
|
+
# Ensure progress bar reaches 100%
|
163
|
+
progress.update(total_progress - progress.n)
|
164
|
+
|
165
|
+
# Accumulate results from each worker
|
166
|
+
for local_counts_depletion, local_counts_enrichment in results.get():
|
167
|
+
counts_depletion = np.add(counts_depletion, local_counts_depletion)
|
168
|
+
counts_enrichment = np.add(counts_enrichment, local_counts_enrichment)
|
169
|
+
|
170
|
+
return counts_depletion, counts_enrichment
|
171
|
+
|
172
|
+
|
173
|
+
def _permutation_process_subset(
|
174
|
+
annotation_matrix: np.ndarray,
|
175
|
+
idxs: np.ndarray,
|
176
|
+
neighborhoods_matrix_obsv: np.ndarray,
|
177
|
+
observed_neighborhood_scores: np.ndarray,
|
178
|
+
neighborhood_score_func: Callable,
|
179
|
+
subset_size: int,
|
180
|
+
progress_counter,
|
181
|
+
) -> tuple:
|
182
|
+
"""Process a subset of permutations for the permutation test.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
annotation_matrix (np.ndarray): The annotation matrix.
|
186
|
+
idxs (np.ndarray): Indices of valid rows in the matrix.
|
187
|
+
neighborhoods_matrix_obsv (np.ndarray): Observed neighborhoods matrix.
|
188
|
+
observed_neighborhood_scores (np.ndarray): Observed neighborhood scores.
|
189
|
+
neighborhood_score_func (Callable): Function to calculate neighborhood scores.
|
190
|
+
subset_size (int): Number of permutations to run in this subset.
|
191
|
+
progress_counter: Shared counter for tracking progress.
|
192
|
+
|
193
|
+
Returns:
|
194
|
+
tuple: Local counts of depletion and enrichment.
|
195
|
+
"""
|
196
|
+
# Initialize local count matrices for this worker
|
197
|
+
local_counts_depletion = np.zeros(observed_neighborhood_scores.shape)
|
198
|
+
local_counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
|
199
|
+
|
200
|
+
with threadpool_limits(limits=1, user_api="blas"):
|
201
|
+
for _ in range(subset_size):
|
202
|
+
# Permute the annotation matrix
|
203
|
+
annotation_matrix_permut = annotation_matrix[np.random.permutation(idxs)]
|
204
|
+
# Calculate permuted neighborhood scores
|
205
|
+
with np.errstate(invalid="ignore", divide="ignore"):
|
206
|
+
permuted_neighborhood_scores = neighborhood_score_func(
|
207
|
+
neighborhoods_matrix_obsv, annotation_matrix_permut
|
208
|
+
)
|
209
|
+
# Update local depletion and enrichment counts based on permuted scores
|
210
|
+
local_counts_depletion = np.add(
|
211
|
+
local_counts_depletion, permuted_neighborhood_scores <= observed_neighborhood_scores
|
212
|
+
)
|
213
|
+
local_counts_enrichment = np.add(
|
214
|
+
local_counts_enrichment,
|
215
|
+
permuted_neighborhood_scores >= observed_neighborhood_scores,
|
216
|
+
)
|
217
|
+
# Update the shared progress counter
|
218
|
+
progress_counter.value += 1
|
219
|
+
|
220
|
+
return local_counts_depletion, local_counts_enrichment
|
221
|
+
|
222
|
+
|
223
|
+
def calculate_significance_matrices(
|
224
|
+
depletion_pvals: np.ndarray,
|
225
|
+
enrichment_pvals: np.ndarray,
|
226
|
+
tail: str = "right",
|
227
|
+
pval_cutoff: float = 0.05,
|
228
|
+
fdr_cutoff: float = 0.05,
|
229
|
+
) -> dict:
|
230
|
+
"""Calculate significance matrices based on p-values and specified tail.
|
231
|
+
|
232
|
+
Args:
|
233
|
+
depletion_pvals (np.ndarray): Matrix of depletion p-values.
|
234
|
+
enrichment_pvals (np.ndarray): Matrix of enrichment p-values.
|
235
|
+
tail (str, optional): The tail type for significance selection ('left', 'right', 'both'). Defaults to 'right'.
|
236
|
+
pval_cutoff (float, optional): Cutoff for p-value significance. Defaults to 0.05.
|
237
|
+
fdr_cutoff (float, optional): Cutoff for FDR significance if applied. Defaults to 0.05.
|
238
|
+
|
239
|
+
Returns:
|
240
|
+
dict: Dictionary containing the enrichment matrix, binary significance matrix,
|
241
|
+
and the matrix of significant enrichment values.
|
242
|
+
"""
|
243
|
+
if fdr_cutoff < 1.0:
|
244
|
+
# Apply FDR correction to depletion p-values
|
245
|
+
depletion_qvals = np.apply_along_axis(fdrcorrection, 1, depletion_pvals)[:, 1, :]
|
246
|
+
depletion_alpha_threshold_matrix = _compute_threshold_matrix(
|
247
|
+
depletion_pvals, depletion_qvals, pval_cutoff=pval_cutoff, fdr_cutoff=fdr_cutoff
|
248
|
+
)
|
249
|
+
# Compute the depletion matrix using both q-values and p-values
|
250
|
+
depletion_matrix = (depletion_qvals**2) * (depletion_pvals**0.5)
|
251
|
+
|
252
|
+
# Apply FDR correction to enrichment p-values
|
253
|
+
enrichment_qvals = np.apply_along_axis(fdrcorrection, 1, enrichment_pvals)[:, 1, :]
|
254
|
+
enrichment_alpha_threshold_matrix = _compute_threshold_matrix(
|
255
|
+
enrichment_pvals, enrichment_qvals, pval_cutoff=pval_cutoff, fdr_cutoff=fdr_cutoff
|
256
|
+
)
|
257
|
+
# Compute the enrichment matrix using both q-values and p-values
|
258
|
+
enrichment_matrix = (enrichment_qvals**2) * (enrichment_pvals**0.5)
|
259
|
+
else:
|
260
|
+
# Compute threshold matrices based on p-value cutoffs only
|
261
|
+
depletion_alpha_threshold_matrix = _compute_threshold_matrix(
|
262
|
+
depletion_pvals, pval_cutoff=pval_cutoff
|
263
|
+
)
|
264
|
+
depletion_matrix = depletion_pvals
|
265
|
+
|
266
|
+
enrichment_alpha_threshold_matrix = _compute_threshold_matrix(
|
267
|
+
enrichment_pvals, pval_cutoff=pval_cutoff
|
268
|
+
)
|
269
|
+
enrichment_matrix = enrichment_pvals
|
270
|
+
|
271
|
+
# Apply a negative log10 transformation for visualization purposes
|
272
|
+
log_depletion_matrix = -np.log10(depletion_matrix)
|
273
|
+
log_enrichment_matrix = -np.log10(enrichment_matrix)
|
274
|
+
|
275
|
+
# Select the appropriate significance matrices based on the specified tail
|
276
|
+
enrichment_matrix, binary_enrichment_matrix = _select_significance_matrices(
|
277
|
+
tail,
|
278
|
+
log_depletion_matrix,
|
279
|
+
depletion_alpha_threshold_matrix,
|
280
|
+
log_enrichment_matrix,
|
281
|
+
enrichment_alpha_threshold_matrix,
|
282
|
+
)
|
283
|
+
|
284
|
+
# Filter the enrichment matrix using the binary significance matrix
|
285
|
+
significant_enrichment_matrix = np.where(binary_enrichment_matrix == 1, enrichment_matrix, 0)
|
286
|
+
|
287
|
+
return {
|
288
|
+
"enrichment_matrix": enrichment_matrix,
|
289
|
+
"binary_enrichment_matrix": binary_enrichment_matrix,
|
290
|
+
"significant_enrichment_matrix": significant_enrichment_matrix,
|
291
|
+
}
|
292
|
+
|
293
|
+
|
294
|
+
def _select_significance_matrices(
|
295
|
+
tail: str,
|
296
|
+
log_depletion_matrix: np.ndarray,
|
297
|
+
depletion_alpha_threshold_matrix: np.ndarray,
|
298
|
+
log_enrichment_matrix: np.ndarray,
|
299
|
+
enrichment_alpha_threshold_matrix: np.ndarray,
|
300
|
+
) -> tuple:
|
301
|
+
"""Select significance matrices based on the specified tail type.
|
302
|
+
|
303
|
+
Args:
|
304
|
+
tail (str): The tail type for significance selection. Options are 'left', 'right', or 'both'.
|
305
|
+
log_depletion_matrix (np.ndarray): Matrix of log-transformed depletion values.
|
306
|
+
depletion_alpha_threshold_matrix (np.ndarray): Alpha threshold matrix for depletion significance.
|
307
|
+
log_enrichment_matrix (np.ndarray): Matrix of log-transformed enrichment values.
|
308
|
+
enrichment_alpha_threshold_matrix (np.ndarray): Alpha threshold matrix for enrichment significance.
|
309
|
+
|
310
|
+
Returns:
|
311
|
+
tuple: A tuple containing the selected enrichment matrix and binary significance matrix.
|
312
|
+
|
313
|
+
Raises:
|
314
|
+
ValueError: If the provided tail type is not 'left', 'right', or 'both'.
|
315
|
+
"""
|
316
|
+
if tail not in {"left", "right", "both"}:
|
317
|
+
raise ValueError("Invalid value for 'tail'. Must be 'left', 'right', or 'both'.")
|
318
|
+
|
319
|
+
if tail == "left":
|
320
|
+
# Select depletion matrix and corresponding alpha threshold for left-tail analysis
|
321
|
+
enrichment_matrix = -log_depletion_matrix
|
322
|
+
alpha_threshold_matrix = depletion_alpha_threshold_matrix
|
323
|
+
elif tail == "right":
|
324
|
+
# Select enrichment matrix and corresponding alpha threshold for right-tail analysis
|
325
|
+
enrichment_matrix = log_enrichment_matrix
|
326
|
+
alpha_threshold_matrix = enrichment_alpha_threshold_matrix
|
327
|
+
elif tail == "both":
|
328
|
+
# Select the matrix with the highest absolute values while preserving the sign
|
329
|
+
enrichment_matrix = np.where(
|
330
|
+
np.abs(log_depletion_matrix) >= np.abs(log_enrichment_matrix),
|
331
|
+
-log_depletion_matrix,
|
332
|
+
log_enrichment_matrix,
|
333
|
+
)
|
334
|
+
# Combine alpha thresholds using a logical OR operation
|
335
|
+
alpha_threshold_matrix = np.logical_or(
|
336
|
+
depletion_alpha_threshold_matrix, enrichment_alpha_threshold_matrix
|
337
|
+
)
|
338
|
+
|
339
|
+
# Create a binary significance matrix where valid indices meet the alpha threshold
|
340
|
+
valid_idxs = ~np.isnan(alpha_threshold_matrix)
|
341
|
+
binary_enrichment_matrix = np.zeros(alpha_threshold_matrix.shape)
|
342
|
+
binary_enrichment_matrix[valid_idxs] = alpha_threshold_matrix[valid_idxs]
|
343
|
+
|
344
|
+
return enrichment_matrix, binary_enrichment_matrix
|
345
|
+
|
346
|
+
|
347
|
+
def _compute_threshold_matrix(
|
348
|
+
pvals: np.ndarray,
|
349
|
+
fdr_pvals: Union[np.ndarray, None] = None,
|
350
|
+
pval_cutoff: float = 0.05,
|
351
|
+
fdr_cutoff: float = 0.05,
|
352
|
+
) -> np.ndarray:
|
353
|
+
"""Compute a threshold matrix indicating significance based on p-value and FDR cutoffs.
|
354
|
+
|
355
|
+
Args:
|
356
|
+
pvals (np.ndarray): Array of p-values for statistical tests.
|
357
|
+
fdr_pvals (np.ndarray, optional): Array of FDR-corrected p-values corresponding to the p-values. Defaults to None.
|
358
|
+
pval_cutoff (float, optional): Cutoff for p-value significance. Defaults to 0.05.
|
359
|
+
fdr_cutoff (float, optional): Cutoff for FDR significance. Defaults to 0.05.
|
360
|
+
|
361
|
+
Returns:
|
362
|
+
np.ndarray: A threshold matrix where 1 indicates significance based on the provided cutoffs, 0 otherwise.
|
363
|
+
"""
|
364
|
+
if fdr_pvals is not None:
|
365
|
+
# Compute the threshold matrix based on both p-value and FDR cutoffs
|
366
|
+
pval_below_cutoff = pvals <= pval_cutoff
|
367
|
+
fdr_below_cutoff = fdr_pvals <= fdr_cutoff
|
368
|
+
threshold_matrix = np.logical_and(pval_below_cutoff, fdr_below_cutoff).astype(int)
|
369
|
+
else:
|
370
|
+
# Compute the threshold matrix based only on p-value cutoff
|
371
|
+
threshold_matrix = (pvals <= pval_cutoff).astype(int)
|
372
|
+
|
373
|
+
return threshold_matrix
|