risk-network 0.0.6b9__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- risk/__init__.py +1 -1
- risk/annotations/annotations.py +61 -42
- risk/annotations/io.py +14 -14
- risk/log/__init__.py +1 -1
- risk/log/config.py +139 -0
- risk/log/params.py +6 -4
- risk/neighborhoods/community.py +25 -36
- risk/neighborhoods/domains.py +29 -27
- risk/neighborhoods/neighborhoods.py +171 -72
- risk/network/graph.py +92 -41
- risk/network/io.py +22 -26
- risk/network/plot.py +132 -19
- risk/risk.py +84 -81
- risk/stats/__init__.py +2 -2
- risk/stats/hypergeom.py +30 -107
- risk/stats/permutation/permutation.py +23 -17
- risk/stats/permutation/test_functions.py +2 -2
- risk/stats/poisson.py +44 -0
- {risk_network-0.0.6b9.dist-info → risk_network-0.0.7.dist-info}/METADATA +1 -1
- risk_network-0.0.7.dist-info/RECORD +30 -0
- {risk_network-0.0.6b9.dist-info → risk_network-0.0.7.dist-info}/WHEEL +1 -1
- risk/log/console.py +0 -16
- risk/stats/fisher_exact.py +0 -132
- risk_network-0.0.6b9.dist-info/RECORD +0 -30
- {risk_network-0.0.6b9.dist-info → risk_network-0.0.7.dist-info}/LICENSE +0 -0
- {risk_network-0.0.6b9.dist-info → risk_network-0.0.7.dist-info}/top_level.txt +0 -0
@@ -4,6 +4,7 @@ risk/stats/permutation/permutation
|
|
4
4
|
"""
|
5
5
|
|
6
6
|
from multiprocessing import get_context, Manager
|
7
|
+
from multiprocessing.managers import ValueProxy
|
7
8
|
from tqdm import tqdm
|
8
9
|
from typing import Any, Callable, Dict
|
9
10
|
|
@@ -28,7 +29,7 @@ def compute_permutation_test(
|
|
28
29
|
neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
|
29
30
|
annotations (np.ndarray): Binary matrix representing annotations.
|
30
31
|
score_metric (str, optional): Metric to use for scoring ('sum', 'mean', etc.). Defaults to "sum".
|
31
|
-
null_distribution (str, optional): Type of null distribution ('network' or
|
32
|
+
null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
|
32
33
|
num_permutations (int, optional): Number of permutations to run. Defaults to 1000.
|
33
34
|
random_seed (int, optional): Seed for random number generation. Defaults to 888.
|
34
35
|
max_workers (int, optional): Number of workers for multiprocessing. Defaults to 1.
|
@@ -78,7 +79,7 @@ def _run_permutation_test(
|
|
78
79
|
neighborhoods (np.ndarray): The neighborhood matrix.
|
79
80
|
annotations (np.ndarray): The annotation matrix.
|
80
81
|
neighborhood_score_func (Callable): Function to calculate neighborhood scores.
|
81
|
-
null_distribution (str, optional): Type of null distribution. Defaults to "network".
|
82
|
+
null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
|
82
83
|
num_permutations (int, optional): Number of permutations. Defaults to 1000.
|
83
84
|
random_seed (int, optional): Seed for random number generation. Defaults to 888.
|
84
85
|
max_workers (int, optional): Number of workers for multiprocessing. Defaults to 4.
|
@@ -91,8 +92,12 @@ def _run_permutation_test(
|
|
91
92
|
# Determine the indices to use based on the null distribution type
|
92
93
|
if null_distribution == "network":
|
93
94
|
idxs = range(annotations.shape[0])
|
94
|
-
|
95
|
+
elif null_distribution == "annotations":
|
95
96
|
idxs = np.nonzero(np.sum(~np.isnan(annotations), axis=1))[0]
|
97
|
+
else:
|
98
|
+
raise ValueError(
|
99
|
+
"Invalid null_distribution value. Choose either 'network' or 'annotations'."
|
100
|
+
)
|
96
101
|
|
97
102
|
# Replace NaNs with zeros in the annotations matrix
|
98
103
|
annotations[np.isnan(annotations)] = 0
|
@@ -107,7 +112,6 @@ def _run_permutation_test(
|
|
107
112
|
# Initialize count matrices for depletion and enrichment
|
108
113
|
counts_depletion = np.zeros(observed_neighborhood_scores.shape)
|
109
114
|
counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
|
110
|
-
|
111
115
|
# Determine the number of permutations to run in each worker process
|
112
116
|
subset_size = num_permutations // max_workers
|
113
117
|
remainder = num_permutations % max_workers
|
@@ -117,7 +121,6 @@ def _run_permutation_test(
|
|
117
121
|
manager = Manager()
|
118
122
|
progress_counter = manager.Value("i", 0)
|
119
123
|
total_progress = num_permutations
|
120
|
-
|
121
124
|
# Execute the permutation test using multiprocessing
|
122
125
|
with ctx.Pool(max_workers) as pool:
|
123
126
|
with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
|
@@ -131,7 +134,8 @@ def _run_permutation_test(
|
|
131
134
|
neighborhood_score_func,
|
132
135
|
subset_size + (1 if i < remainder else 0),
|
133
136
|
progress_counter,
|
134
|
-
|
137
|
+
max_workers,
|
138
|
+
rng, # Pass the random number generator to each worker
|
135
139
|
)
|
136
140
|
for i in range(max_workers)
|
137
141
|
]
|
@@ -148,10 +152,10 @@ def _run_permutation_test(
|
|
148
152
|
# Ensure progress bar reaches 100%
|
149
153
|
progress.update(total_progress - progress.n)
|
150
154
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
+
# Accumulate results from each worker
|
156
|
+
for local_counts_depletion, local_counts_enrichment in results.get():
|
157
|
+
counts_depletion = np.add(counts_depletion, local_counts_depletion)
|
158
|
+
counts_enrichment = np.add(counts_enrichment, local_counts_enrichment)
|
155
159
|
|
156
160
|
return counts_depletion, counts_enrichment
|
157
161
|
|
@@ -163,7 +167,8 @@ def _permutation_process_subset(
|
|
163
167
|
observed_neighborhood_scores: np.ndarray,
|
164
168
|
neighborhood_score_func: Callable,
|
165
169
|
subset_size: int,
|
166
|
-
progress_counter,
|
170
|
+
progress_counter: ValueProxy,
|
171
|
+
max_workers: int,
|
167
172
|
rng: np.random.Generator,
|
168
173
|
) -> tuple:
|
169
174
|
"""Process a subset of permutations for the permutation test.
|
@@ -175,7 +180,8 @@ def _permutation_process_subset(
|
|
175
180
|
observed_neighborhood_scores (np.ndarray): Observed neighborhood scores.
|
176
181
|
neighborhood_score_func (Callable): Function to calculate neighborhood scores.
|
177
182
|
subset_size (int): Number of permutations to run in this subset.
|
178
|
-
progress_counter: Shared counter for tracking progress.
|
183
|
+
progress_counter (multiprocessing.managers.ValueProxy): Shared counter for tracking progress.
|
184
|
+
max_workers (int): Number of workers for multiprocessing.
|
179
185
|
rng (np.random.Generator): Random number generator object.
|
180
186
|
|
181
187
|
Returns:
|
@@ -184,10 +190,11 @@ def _permutation_process_subset(
|
|
184
190
|
# Initialize local count matrices for this worker
|
185
191
|
local_counts_depletion = np.zeros(observed_neighborhood_scores.shape)
|
186
192
|
local_counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
|
187
|
-
# NOTE: Limit the number of threads used by NumPy's BLAS implementation to 1.
|
188
|
-
# This can help prevent oversubscription of CPU resources during multiprocessing,
|
189
|
-
#
|
190
|
-
|
193
|
+
# NOTE: Limit the number of threads used by NumPy's BLAS implementation to 1 when more than one worker is used.
|
194
|
+
# This can help prevent oversubscription of CPU resources during multiprocessing, ensuring that each process
|
195
|
+
# doesn't use more than one CPU core.
|
196
|
+
limits = None if max_workers == 1 else 1
|
197
|
+
with threadpool_limits(limits=limits, user_api="blas"):
|
191
198
|
for _ in range(subset_size):
|
192
199
|
# Permute the annotation matrix using the RNG
|
193
200
|
annotation_matrix_permut = annotation_matrix[rng.permutation(idxs)]
|
@@ -205,7 +212,6 @@ def _permutation_process_subset(
|
|
205
212
|
local_counts_enrichment,
|
206
213
|
permuted_neighborhood_scores >= observed_neighborhood_scores,
|
207
214
|
)
|
208
|
-
|
209
215
|
# Update the shared progress counter
|
210
216
|
progress_counter.value += 1
|
211
217
|
|
risk/stats/poisson.py
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
"""
|
2
|
+
risk/stats/poisson
|
3
|
+
~~~~~~~~~~~~~~~~~~
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Dict, Any
|
7
|
+
|
8
|
+
import numpy as np
|
9
|
+
from scipy.stats import poisson
|
10
|
+
|
11
|
+
|
12
|
+
def compute_poisson_test(
|
13
|
+
neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
|
14
|
+
) -> Dict[str, Any]:
|
15
|
+
"""Compute Poisson test for enrichment and depletion in neighborhoods with selectable null distribution.
|
16
|
+
|
17
|
+
Args:
|
18
|
+
neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
|
19
|
+
annotations (np.ndarray): Binary matrix representing annotations.
|
20
|
+
null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
dict: Dictionary containing depletion and enrichment p-values.
|
24
|
+
"""
|
25
|
+
# Matrix multiplication to get the number of annotated nodes in each neighborhood
|
26
|
+
annotated_in_neighborhood = neighborhoods @ annotations
|
27
|
+
|
28
|
+
# Compute lambda_expected based on the chosen null distribution
|
29
|
+
if null_distribution == "network":
|
30
|
+
# Use the mean across neighborhoods (axis=1)
|
31
|
+
lambda_expected = np.mean(annotated_in_neighborhood, axis=1, keepdims=True)
|
32
|
+
elif null_distribution == "annotations":
|
33
|
+
# Use the mean across annotations (axis=0)
|
34
|
+
lambda_expected = np.mean(annotated_in_neighborhood, axis=0, keepdims=True)
|
35
|
+
else:
|
36
|
+
raise ValueError(
|
37
|
+
"Invalid null_distribution value. Choose either 'network' or 'annotations'."
|
38
|
+
)
|
39
|
+
|
40
|
+
# Compute p-values for enrichment and depletion using Poisson distribution
|
41
|
+
enrichment_pvals = 1 - poisson.cdf(annotated_in_neighborhood - 1, lambda_expected)
|
42
|
+
depletion_pvals = poisson.cdf(annotated_in_neighborhood, lambda_expected)
|
43
|
+
|
44
|
+
return {"enrichment_pvals": enrichment_pvals, "depletion_pvals": depletion_pvals}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
risk/__init__.py,sha256=gan-IiMjd-I7uHhZ63SkH_N40btvcNbrREH27gHjIZA,105
|
2
|
+
risk/constants.py,sha256=XInRaH78Slnw_sWgAsBFbUHkyA0h0jL0DKGuQNbOvjM,550
|
3
|
+
risk/risk.py,sha256=FaQhDCBZxZSAXJsScH0rSbjjCTNZA5vgf9rJj1GHW44,20924
|
4
|
+
risk/annotations/__init__.py,sha256=vUpVvMRE5if01Ic8QY6M2Ae3EFGJHdugEe9PdEkAW4Y,138
|
5
|
+
risk/annotations/annotations.py,sha256=ySc_N3nXnKx5RnOpFaEkM6zvTbswbrRcfFLzM0KdOck,11391
|
6
|
+
risk/annotations/io.py,sha256=TTXVJQgUGAlKpnGBcx7Dow146IGyozA03nSbl3S7M5M,9475
|
7
|
+
risk/log/__init__.py,sha256=aDUz5LMFQsz0UlsQI2EdXtiBKRLfml1UMeZKC7QQIGU,134
|
8
|
+
risk/log/config.py,sha256=m8pzj-hN4vI_2JdJUfyOoSvzT8_lhoIfBt27sKbnOes,4535
|
9
|
+
risk/log/params.py,sha256=DUmsqPo9hi3rQHFgLTunP14I-vVoyQSFZbx5aSYmVts,6363
|
10
|
+
risk/neighborhoods/__init__.py,sha256=tKKEg4lsbqFukpgYlUGxU_v_9FOqK7V0uvM9T2QzoL0,206
|
11
|
+
risk/neighborhoods/community.py,sha256=stYYBXeZlGLMV-k8ckQeIqThT6v9y-S3hETobAo9590,6817
|
12
|
+
risk/neighborhoods/domains.py,sha256=Ov52EEr-tWqy96y8_0tJ9f1K8FI-8tZQxHR7a59A1k8,10738
|
13
|
+
risk/neighborhoods/neighborhoods.py,sha256=M-wL4xB_BUTlSZg90swygO5NdrZ6hFUFqs6jsiZaqHk,18260
|
14
|
+
risk/network/__init__.py,sha256=iEPeJdZfqp0toxtbElryB8jbz9_t_k4QQ3iDvKE8C_0,126
|
15
|
+
risk/network/geometry.py,sha256=H1yGVVqgbfpzBzJwEheDLfvGLSA284jGQQTn612L4Vc,6759
|
16
|
+
risk/network/graph.py,sha256=_LEoom4EEowGALuJKSXcev9RAAHu2FqIeq3u7mkifW0,16479
|
17
|
+
risk/network/io.py,sha256=ASoKG4vkCC_aHwxlF4502W_SyaaCrRnHsTmRwL00spI,21266
|
18
|
+
risk/network/plot.py,sha256=9GcLKkH3CMEtraYnfdLXNJCi04rBQCjw4T6Q8k5yNOI,67091
|
19
|
+
risk/stats/__init__.py,sha256=WcgoETQ-hS0LQqKRsAMIPtP15xZ-4eul6VUBuUx4Wzc,220
|
20
|
+
risk/stats/hypergeom.py,sha256=o6Qnj31gCAKxr2uQirXrbv7XvdDJGEq69MFW-ubx_hA,2272
|
21
|
+
risk/stats/poisson.py,sha256=8x9hB4DCukq4gNIlIKO-c_jYG1-BTwTX53oLauFyfj8,1793
|
22
|
+
risk/stats/stats.py,sha256=kvShov-94W6ffgDUTb522vB9hDJQSyTsYif_UIaFfSM,7059
|
23
|
+
risk/stats/permutation/__init__.py,sha256=neJp7FENC-zg_CGOXqv-iIvz1r5XUKI9Ruxhmq7kDOI,105
|
24
|
+
risk/stats/permutation/permutation.py,sha256=kmSZ7bQ-AD0TFiQDgIwfxTeqHa4pjp7fIcOzAqyhUNY,9714
|
25
|
+
risk/stats/permutation/test_functions.py,sha256=lftOude6hee0pyR80HlBD32522JkDoN5hrKQ9VEbuoY,2345
|
26
|
+
risk_network-0.0.7.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
|
27
|
+
risk_network-0.0.7.dist-info/METADATA,sha256=xxBE9FEu5YSnh41_SJZeJskSX-VFY0HnRarXFapvEag,43140
|
28
|
+
risk_network-0.0.7.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
29
|
+
risk_network-0.0.7.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
|
30
|
+
risk_network-0.0.7.dist-info/RECORD,,
|
risk/log/console.py
DELETED
@@ -1,16 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
risk/log/console
|
3
|
-
~~~~~~~~~~~~~~~~
|
4
|
-
"""
|
5
|
-
|
6
|
-
|
7
|
-
def print_header(input_string: str) -> None:
|
8
|
-
"""Print the input string as a header with a line of dashes above and below it.
|
9
|
-
|
10
|
-
Args:
|
11
|
-
input_string (str): The string to be printed as a header.
|
12
|
-
"""
|
13
|
-
border = "-" * len(input_string)
|
14
|
-
print(border)
|
15
|
-
print(input_string)
|
16
|
-
print(border)
|
risk/stats/fisher_exact.py
DELETED
@@ -1,132 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
risk/stats/fisher_exact
|
3
|
-
~~~~~~~~~~~~~~~~~~~~~~~
|
4
|
-
"""
|
5
|
-
|
6
|
-
from multiprocessing import get_context, Manager
|
7
|
-
from tqdm import tqdm
|
8
|
-
from typing import Any, Dict
|
9
|
-
|
10
|
-
import numpy as np
|
11
|
-
from scipy.stats import fisher_exact
|
12
|
-
|
13
|
-
|
14
|
-
def compute_fisher_exact_test(
|
15
|
-
neighborhoods: np.ndarray,
|
16
|
-
annotations: np.ndarray,
|
17
|
-
max_workers: int = 4,
|
18
|
-
) -> Dict[str, Any]:
|
19
|
-
"""Compute Fisher's exact test for enrichment and depletion in neighborhoods.
|
20
|
-
|
21
|
-
Args:
|
22
|
-
neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
|
23
|
-
annotations (np.ndarray): Binary matrix representing annotations.
|
24
|
-
max_workers (int, optional): Number of workers for multiprocessing. Defaults to 4.
|
25
|
-
|
26
|
-
Returns:
|
27
|
-
dict: Dictionary containing depletion and enrichment p-values.
|
28
|
-
"""
|
29
|
-
# Ensure that the matrices are binary (boolean) and free of NaN values
|
30
|
-
neighborhoods = neighborhoods.astype(bool) # Convert to boolean
|
31
|
-
annotations = annotations.astype(bool) # Convert to boolean
|
32
|
-
|
33
|
-
# Initialize the process of calculating p-values using multiprocessing
|
34
|
-
ctx = get_context("spawn")
|
35
|
-
manager = Manager()
|
36
|
-
progress_counter = manager.Value("i", 0)
|
37
|
-
total_tasks = neighborhoods.shape[1] * annotations.shape[1]
|
38
|
-
|
39
|
-
# Calculate the workload per worker
|
40
|
-
chunk_size = total_tasks // max_workers
|
41
|
-
remainder = total_tasks % max_workers
|
42
|
-
|
43
|
-
# Execute the Fisher's exact test using multiprocessing
|
44
|
-
with ctx.Pool(max_workers) as pool:
|
45
|
-
with tqdm(total=total_tasks, desc="Total progress", position=0) as progress:
|
46
|
-
params_list = []
|
47
|
-
start_idx = 0
|
48
|
-
for i in range(max_workers):
|
49
|
-
end_idx = start_idx + chunk_size + (1 if i < remainder else 0)
|
50
|
-
params_list.append(
|
51
|
-
(neighborhoods, annotations, start_idx, end_idx, progress_counter)
|
52
|
-
)
|
53
|
-
start_idx = end_idx
|
54
|
-
|
55
|
-
# Start the Fisher's exact test process in parallel
|
56
|
-
results = pool.starmap_async(_fisher_exact_process_subset, params_list, chunksize=1)
|
57
|
-
|
58
|
-
# Update progress bar based on progress_counter
|
59
|
-
while not results.ready():
|
60
|
-
progress.update(progress_counter.value - progress.n)
|
61
|
-
results.wait(0.05) # Wait for 50ms
|
62
|
-
# Ensure progress bar reaches 100%
|
63
|
-
progress.update(total_tasks - progress.n)
|
64
|
-
|
65
|
-
# Accumulate results from each worker
|
66
|
-
depletion_pvals, enrichment_pvals = [], []
|
67
|
-
for dp, ep in results.get():
|
68
|
-
depletion_pvals.extend(dp)
|
69
|
-
enrichment_pvals.extend(ep)
|
70
|
-
|
71
|
-
# Reshape the results back into arrays with the appropriate dimensions
|
72
|
-
depletion_pvals = np.array(depletion_pvals).reshape(
|
73
|
-
neighborhoods.shape[1], annotations.shape[1]
|
74
|
-
)
|
75
|
-
enrichment_pvals = np.array(enrichment_pvals).reshape(
|
76
|
-
neighborhoods.shape[1], annotations.shape[1]
|
77
|
-
)
|
78
|
-
|
79
|
-
return {
|
80
|
-
"depletion_pvals": depletion_pvals,
|
81
|
-
"enrichment_pvals": enrichment_pvals,
|
82
|
-
}
|
83
|
-
|
84
|
-
|
85
|
-
def _fisher_exact_process_subset(
|
86
|
-
neighborhoods: np.ndarray,
|
87
|
-
annotations: np.ndarray,
|
88
|
-
start_idx: int,
|
89
|
-
end_idx: int,
|
90
|
-
progress_counter,
|
91
|
-
) -> tuple:
|
92
|
-
"""Process a subset of neighborhoods using Fisher's exact test.
|
93
|
-
|
94
|
-
Args:
|
95
|
-
neighborhoods (np.ndarray): The full neighborhood matrix.
|
96
|
-
annotations (np.ndarray): The annotation matrix.
|
97
|
-
start_idx (int): Starting index of the neighborhood-annotation pairs to process.
|
98
|
-
end_idx (int): Ending index of the neighborhood-annotation pairs to process.
|
99
|
-
progress_counter: Shared counter for tracking progress.
|
100
|
-
|
101
|
-
Returns:
|
102
|
-
tuple: Local p-values for depletion and enrichment.
|
103
|
-
"""
|
104
|
-
# Initialize lists to store p-values for depletion and enrichment
|
105
|
-
depletion_pvals = []
|
106
|
-
enrichment_pvals = []
|
107
|
-
# Process the subset of tasks assigned to this worker
|
108
|
-
for idx in range(start_idx, end_idx):
|
109
|
-
i = idx // annotations.shape[1] # Neighborhood index
|
110
|
-
j = idx % annotations.shape[1] # Annotation index
|
111
|
-
|
112
|
-
neighborhood = neighborhoods[:, i]
|
113
|
-
annotation = annotations[:, j]
|
114
|
-
|
115
|
-
# Calculate the contingency table values
|
116
|
-
TP = np.sum(neighborhood & annotation)
|
117
|
-
FP = np.sum(neighborhood & ~annotation)
|
118
|
-
FN = np.sum(~neighborhood & annotation)
|
119
|
-
TN = np.sum(~neighborhood & ~annotation)
|
120
|
-
table = np.array([[TP, FP], [FN, TN]])
|
121
|
-
|
122
|
-
# Perform Fisher's exact test for depletion (alternative='less')
|
123
|
-
_, p_value_depletion = fisher_exact(table, alternative="less")
|
124
|
-
depletion_pvals.append(p_value_depletion)
|
125
|
-
# Perform Fisher's exact test for enrichment (alternative='greater')
|
126
|
-
_, p_value_enrichment = fisher_exact(table, alternative="greater")
|
127
|
-
enrichment_pvals.append(p_value_enrichment)
|
128
|
-
|
129
|
-
# Update the shared progress counter
|
130
|
-
progress_counter.value += 1
|
131
|
-
|
132
|
-
return depletion_pvals, enrichment_pvals
|
@@ -1,30 +0,0 @@
|
|
1
|
-
risk/__init__.py,sha256=6tlj1gnLhGwFbzT3xQqzWEWhatgBFXGguhpqJMhE5EA,112
|
2
|
-
risk/constants.py,sha256=XInRaH78Slnw_sWgAsBFbUHkyA0h0jL0DKGuQNbOvjM,550
|
3
|
-
risk/risk.py,sha256=-QVVWCBNmu3ho0QDiB5lRqpNsBR5fkpXpWJan3K3YGg,20702
|
4
|
-
risk/annotations/__init__.py,sha256=vUpVvMRE5if01Ic8QY6M2Ae3EFGJHdugEe9PdEkAW4Y,138
|
5
|
-
risk/annotations/annotations.py,sha256=DRUTdGzMdqo62NWSapBUksbvPr9CrzD76qtOcxeNKmo,10554
|
6
|
-
risk/annotations/io.py,sha256=lo7NKqOVkeeBp58JBxWJHtA0xjL5Yoxqe9Ox0daKlZk,9457
|
7
|
-
risk/log/__init__.py,sha256=xuLImfxFlKpnVhzi_gDYlr2_c9cLkrw2c_3iEsXb1as,107
|
8
|
-
risk/log/console.py,sha256=im9DRExwf6wHlcn9fewoDcKIpo3vPcorZIaNAl-0csY,355
|
9
|
-
risk/log/params.py,sha256=Tbb-sovFTptGBqPDKafUA8KOpby4zFObutAT_Iti1hE,6302
|
10
|
-
risk/neighborhoods/__init__.py,sha256=tKKEg4lsbqFukpgYlUGxU_v_9FOqK7V0uvM9T2QzoL0,206
|
11
|
-
risk/neighborhoods/community.py,sha256=7ebo1Q5KokSQISnxZIh2SQxsKXdXm8aVkp-h_DiQ3K0,6818
|
12
|
-
risk/neighborhoods/domains.py,sha256=5V--Nj-TrSdubhD_2PI57ffcn_PMSEgpX_iY5OjT6R8,10626
|
13
|
-
risk/neighborhoods/neighborhoods.py,sha256=sHmjFFl2U5qV9YbQCRbpbI36j7dS7IFfFwwRb1_-AuM,13945
|
14
|
-
risk/network/__init__.py,sha256=iEPeJdZfqp0toxtbElryB8jbz9_t_k4QQ3iDvKE8C_0,126
|
15
|
-
risk/network/geometry.py,sha256=H1yGVVqgbfpzBzJwEheDLfvGLSA284jGQQTn612L4Vc,6759
|
16
|
-
risk/network/graph.py,sha256=7haHu4M3fleqbrIzs6HC9jnKizSERzmmAYSmUwdoSXA,13953
|
17
|
-
risk/network/io.py,sha256=gG50kOknO-D3HkW1HsbHMkTMvjUtn3l4W4Jwd-rXNr8,21202
|
18
|
-
risk/network/plot.py,sha256=_g5xHolMTAfZCBvYYEX1CYME4s4zA2hTHtN-utaMPik,61978
|
19
|
-
risk/stats/__init__.py,sha256=e-BE_Dr_jgiK6hKM-T-tlG4yvHnId8e5qjnM0pdwNVc,230
|
20
|
-
risk/stats/fisher_exact.py,sha256=-bPwzu76-ob0HzrTV20mXUTot7v-MLuqFaAoab-QxPg,4966
|
21
|
-
risk/stats/hypergeom.py,sha256=lrIFdhCWRjvM4apYw1MlOKqT_IY5OjtCwrjdtJdt6Tg,4954
|
22
|
-
risk/stats/stats.py,sha256=kvShov-94W6ffgDUTb522vB9hDJQSyTsYif_UIaFfSM,7059
|
23
|
-
risk/stats/permutation/__init__.py,sha256=neJp7FENC-zg_CGOXqv-iIvz1r5XUKI9Ruxhmq7kDOI,105
|
24
|
-
risk/stats/permutation/permutation.py,sha256=qLWdwxEY6nmkYPxpM8HLDcd2mbqYv9Qr7CKtJvhLqIM,9220
|
25
|
-
risk/stats/permutation/test_functions.py,sha256=HuDIM-V1jkkfE1rlaIqrWWBSKZt3dQ1f-YEDjWpnLSE,2343
|
26
|
-
risk_network-0.0.6b9.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
|
27
|
-
risk_network-0.0.6b9.dist-info/METADATA,sha256=xhmfOR_CeYgPfZQ6n5OGN4zkiOJMYZ6j3SXVqt3dWzo,43142
|
28
|
-
risk_network-0.0.6b9.dist-info/WHEEL,sha256=5Mi1sN9lKoFv_gxcPtisEVrJZihrm_beibeg5R6xb4I,91
|
29
|
-
risk_network-0.0.6b9.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
|
30
|
-
risk_network-0.0.6b9.dist-info/RECORD,,
|
File without changes
|
File without changes
|