risk-network 0.0.7b6__tar.gz → 0.0.7b8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/PKG-INFO +1 -1
  2. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/__init__.py +1 -1
  3. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/neighborhoods/domains.py +11 -11
  4. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/hypergeom.py +1 -3
  5. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/permutation/permutation.py +16 -14
  6. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/poisson.py +0 -3
  7. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk_network.egg-info/PKG-INFO +1 -1
  8. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/LICENSE +0 -0
  9. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/MANIFEST.in +0 -0
  10. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/README.md +0 -0
  11. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/pyproject.toml +0 -0
  12. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/annotations/__init__.py +0 -0
  13. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/annotations/annotations.py +0 -0
  14. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/annotations/io.py +0 -0
  15. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/constants.py +0 -0
  16. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/log/__init__.py +0 -0
  17. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/log/console.py +0 -0
  18. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/log/params.py +0 -0
  19. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/neighborhoods/__init__.py +0 -0
  20. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/neighborhoods/community.py +0 -0
  21. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/neighborhoods/neighborhoods.py +2 -2
  22. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/network/__init__.py +0 -0
  23. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/network/geometry.py +0 -0
  24. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/network/graph.py +0 -0
  25. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/network/io.py +0 -0
  26. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/network/plot.py +0 -0
  27. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/risk.py +0 -0
  28. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/__init__.py +0 -0
  29. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/permutation/__init__.py +0 -0
  30. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/permutation/test_functions.py +0 -0
  31. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk/stats/stats.py +0 -0
  32. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk_network.egg-info/SOURCES.txt +0 -0
  33. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk_network.egg-info/dependency_links.txt +0 -0
  34. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk_network.egg-info/requires.txt +0 -0
  35. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/risk_network.egg-info/top_level.txt +0 -0
  36. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/setup.cfg +0 -0
  37. {risk_network-0.0.7b6 → risk_network-0.0.7b8}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: risk-network
3
- Version: 0.0.7b6
3
+ Version: 0.0.7b8
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
@@ -7,4 +7,4 @@ RISK: RISK Infers Spatial Kinships
7
7
 
8
8
  from risk.risk import RISK
9
9
 
10
- __version__ = "0.0.7-beta.6"
10
+ __version__ = "0.0.7-beta.8"
@@ -4,6 +4,7 @@ risk/neighborhoods/domains
4
4
  """
5
5
 
6
6
  from contextlib import suppress
7
+ from itertools import product
7
8
  from tqdm import tqdm
8
9
  from typing import Tuple
9
10
 
@@ -165,21 +166,20 @@ def _optimize_silhouette_across_linkage_and_metrics(
165
166
  total_combinations = len(linkage_methods) * len(linkage_metrics)
166
167
 
167
168
  # Evaluating optimal linkage method and metric
168
- for method in tqdm(
169
- linkage_methods,
169
+ for method, metric in tqdm(
170
+ product(linkage_methods, linkage_metrics),
170
171
  desc="Evaluating optimal linkage method and metric",
171
172
  total=total_combinations,
172
173
  bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]",
173
174
  ):
174
- for metric in linkage_metrics:
175
- with suppress(Exception):
176
- Z = linkage(m, method=method, metric=metric)
177
- threshold, score = _find_best_silhouette_score(Z, m, metric, linkage_criterion)
178
- if score > best_overall_score:
179
- best_overall_score = score
180
- best_overall_threshold = threshold
181
- best_overall_method = method
182
- best_overall_metric = metric
175
+ with suppress(Exception):
176
+ Z = linkage(m, method=method, metric=metric)
177
+ threshold, score = _find_best_silhouette_score(Z, m, metric, linkage_criterion)
178
+ if score > best_overall_score:
179
+ best_overall_score = score
180
+ best_overall_threshold = threshold
181
+ best_overall_method = method
182
+ best_overall_metric = metric
183
183
 
184
184
  return best_overall_method, best_overall_metric, best_overall_threshold
185
185
 
@@ -22,9 +22,7 @@ def compute_hypergeom_test(
22
22
  Returns:
23
23
  dict: Dictionary containing depletion and enrichment p-values.
24
24
  """
25
- # Ensure both matrices are binary (presence/absence)
26
- neighborhoods = (neighborhoods > 0).astype(int)
27
- annotations = (annotations > 0).astype(int)
25
+ # Get the total number of nodes in the network
28
26
  total_node_count = neighborhoods.shape[0]
29
27
 
30
28
  if null_distribution == "network":
@@ -4,6 +4,7 @@ risk/stats/permutation/permutation
4
4
  """
5
5
 
6
6
  from multiprocessing import get_context, Manager
7
+ from multiprocessing.managers import ValueProxy
7
8
  from tqdm import tqdm
8
9
  from typing import Any, Callable, Dict
9
10
 
@@ -111,7 +112,6 @@ def _run_permutation_test(
111
112
  # Initialize count matrices for depletion and enrichment
112
113
  counts_depletion = np.zeros(observed_neighborhood_scores.shape)
113
114
  counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
114
-
115
115
  # Determine the number of permutations to run in each worker process
116
116
  subset_size = num_permutations // max_workers
117
117
  remainder = num_permutations % max_workers
@@ -121,7 +121,6 @@ def _run_permutation_test(
121
121
  manager = Manager()
122
122
  progress_counter = manager.Value("i", 0)
123
123
  total_progress = num_permutations
124
-
125
124
  # Execute the permutation test using multiprocessing
126
125
  with ctx.Pool(max_workers) as pool:
127
126
  with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
@@ -135,7 +134,8 @@ def _run_permutation_test(
135
134
  neighborhood_score_func,
136
135
  subset_size + (1 if i < remainder else 0),
137
136
  progress_counter,
138
- rng, # Pass the RNG to each process
137
+ max_workers,
138
+ rng, # Pass the random number generator to each worker
139
139
  )
140
140
  for i in range(max_workers)
141
141
  ]
@@ -152,10 +152,10 @@ def _run_permutation_test(
152
152
  # Ensure progress bar reaches 100%
153
153
  progress.update(total_progress - progress.n)
154
154
 
155
- # Accumulate results from each worker
156
- for local_counts_depletion, local_counts_enrichment in results.get():
157
- counts_depletion = np.add(counts_depletion, local_counts_depletion)
158
- counts_enrichment = np.add(counts_enrichment, local_counts_enrichment)
155
+ # Accumulate results from each worker
156
+ for local_counts_depletion, local_counts_enrichment in results.get():
157
+ counts_depletion = np.add(counts_depletion, local_counts_depletion)
158
+ counts_enrichment = np.add(counts_enrichment, local_counts_enrichment)
159
159
 
160
160
  return counts_depletion, counts_enrichment
161
161
 
@@ -167,7 +167,8 @@ def _permutation_process_subset(
167
167
  observed_neighborhood_scores: np.ndarray,
168
168
  neighborhood_score_func: Callable,
169
169
  subset_size: int,
170
- progress_counter,
170
+ progress_counter: ValueProxy,
171
+ max_workers: int,
171
172
  rng: np.random.Generator,
172
173
  ) -> tuple:
173
174
  """Process a subset of permutations for the permutation test.
@@ -179,7 +180,8 @@ def _permutation_process_subset(
179
180
  observed_neighborhood_scores (np.ndarray): Observed neighborhood scores.
180
181
  neighborhood_score_func (Callable): Function to calculate neighborhood scores.
181
182
  subset_size (int): Number of permutations to run in this subset.
182
- progress_counter: Shared counter for tracking progress.
183
+ progress_counter (multiprocessing.managers.ValueProxy): Shared counter for tracking progress.
184
+ max_workers (int): Number of workers for multiprocessing.
183
185
  rng (np.random.Generator): Random number generator object.
184
186
 
185
187
  Returns:
@@ -188,10 +190,11 @@ def _permutation_process_subset(
188
190
  # Initialize local count matrices for this worker
189
191
  local_counts_depletion = np.zeros(observed_neighborhood_scores.shape)
190
192
  local_counts_enrichment = np.zeros(observed_neighborhood_scores.shape)
191
- # NOTE: Limit the number of threads used by NumPy's BLAS implementation to 1.
192
- # This can help prevent oversubscription of CPU resources during multiprocessing,
193
- # ensuring that each process doesn't use more than one CPU core.
194
- with threadpool_limits(limits=1, user_api="blas"):
193
+ # NOTE: Limit the number of threads used by NumPy's BLAS implementation to 1 when more than one worker is used.
194
+ # This can help prevent oversubscription of CPU resources during multiprocessing, ensuring that each process
195
+ # doesn't use more than one CPU core.
196
+ limits = None if max_workers == 1 else 1
197
+ with threadpool_limits(limits=limits, user_api="blas"):
195
198
  for _ in range(subset_size):
196
199
  # Permute the annotation matrix using the RNG
197
200
  annotation_matrix_permut = annotation_matrix[rng.permutation(idxs)]
@@ -209,7 +212,6 @@ def _permutation_process_subset(
209
212
  local_counts_enrichment,
210
213
  permuted_neighborhood_scores >= observed_neighborhood_scores,
211
214
  )
212
-
213
215
  # Update the shared progress counter
214
216
  progress_counter.value += 1
215
217
 
@@ -22,9 +22,6 @@ def compute_poisson_test(
22
22
  Returns:
23
23
  dict: Dictionary containing depletion and enrichment p-values.
24
24
  """
25
- # Ensure both matrices are binary (presence/absence)
26
- neighborhoods = (neighborhoods > 0).astype(int)
27
- annotations = (annotations > 0).astype(int)
28
25
  # Matrix multiplication to get the number of annotated nodes in each neighborhood
29
26
  annotated_in_neighborhood = neighborhoods @ annotations
30
27
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: risk-network
3
- Version: 0.0.7b6
3
+ Version: 0.0.7b8
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
File without changes
File without changes
@@ -54,10 +54,10 @@ def get_network_neighborhoods(
54
54
  network, edge_length_percentile=edge_length_threshold
55
55
  )
56
56
 
57
- if distance_metric == "greedy_modularity":
58
- return calculate_greedy_modularity_neighborhoods(network)
59
57
  if distance_metric == "louvain":
60
58
  return calculate_louvain_neighborhoods(network, louvain_resolution, random_seed=random_seed)
59
+ if distance_metric == "greedy_modularity":
60
+ return calculate_greedy_modularity_neighborhoods(network)
61
61
  if distance_metric == "label_propagation":
62
62
  return calculate_label_propagation_neighborhoods(network)
63
63
  if distance_metric == "markov_clustering":
File without changes
File without changes