risk-network 0.0.9b24__py3-none-any.whl → 0.0.9b25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
risk/__init__.py CHANGED
@@ -7,4 +7,4 @@ RISK: Regional Inference of Significant Kinships
7
7
 
8
8
  from risk.risk import RISK
9
9
 
10
- __version__ = "0.0.9-beta.24"
10
+ __version__ = "0.0.9-beta.25"
@@ -16,6 +16,7 @@ from nltk.tokenize import word_tokenize
16
16
  from nltk.corpus import stopwords
17
17
 
18
18
  from risk.log import logger
19
+ from scipy.sparse import csr_matrix
19
20
 
20
21
 
21
22
  def _setup_nltk():
@@ -47,17 +48,15 @@ def load_annotations(
47
48
  annotations_input (Dict[str, Any]): A dictionary with annotations.
48
49
  min_nodes_per_term (int, optional): The minimum number of network nodes required for each annotation
49
50
  term to be included. Defaults to 2.
51
+ use_sparse (bool, optional): Whether to return the annotations matrix as a sparse matrix. Defaults to True.
50
52
 
51
53
  Returns:
52
- Dict[str, Any]: A dictionary containing ordered nodes, ordered annotations, and the binary annotations matrix.
54
+ Dict[str, Any]: A dictionary containing ordered nodes, ordered annotations, and the sparse binary annotations
55
+ matrix.
53
56
 
54
57
  Raises:
55
58
  ValueError: If no annotations are found for the nodes in the network.
56
59
  ValueError: If no annotations have at least min_nodes_per_term nodes in the network.
57
-
58
- Comment:
59
- This function should be optimized to handle large networks and annotations efficiently. An attempt
60
- to use sparse matrices did not yield significant performance improvements, so it was not implemented.
61
60
  """
62
61
  # Flatten the dictionary to a list of tuples for easier DataFrame creation
63
62
  flattened_annotations = [
@@ -78,7 +77,6 @@ def load_annotations(
78
77
  raise ValueError("No terms found in the annotation file for the nodes in the network.")
79
78
 
80
79
  # Filter out annotations with fewer than min_nodes_per_term occurrences
81
- # This assists in reducing noise and focusing on more relevant annotations for statistical analysis
82
80
  num_terms_before_filtering = annotations_pivot.shape[1]
83
81
  annotations_pivot = annotations_pivot.loc[
84
82
  :, (annotations_pivot.sum(axis=0) >= min_nodes_per_term)
@@ -96,13 +94,15 @@ def load_annotations(
96
94
  # Extract ordered nodes and annotations
97
95
  ordered_nodes = tuple(annotations_pivot.index)
98
96
  ordered_annotations = tuple(annotations_pivot.columns)
99
- # Convert the annotations_pivot matrix to a numpy array and ensure it's binary
100
- annotations_pivot_numpy = (annotations_pivot.fillna(0).to_numpy() > 0).astype(int)
97
+ # Convert the annotations_pivot matrix to a numpy array or sparse matrix
98
+ annotations_pivot_binary = (annotations_pivot.fillna(0).to_numpy() > 0).astype(int)
99
+ # Convert the binary annotations matrix to a sparse matrix
100
+ annotations_pivot_binary = csr_matrix(annotations_pivot_binary)
101
101
 
102
102
  return {
103
103
  "ordered_nodes": ordered_nodes,
104
104
  "ordered_annotations": ordered_annotations,
105
- "matrix": annotations_pivot_numpy,
105
+ "matrix": annotations_pivot_binary,
106
106
  }
107
107
 
108
108
 
@@ -4,5 +4,5 @@ risk/neighborhoods
4
4
  """
5
5
 
6
6
  from risk.neighborhoods.domains import define_domains, trim_domains
7
- from risk.neighborhoods.io import NeighborhoodsIO
7
+ from risk.neighborhoods.api import NeighborhoodsAPI
8
8
  from risk.neighborhoods.neighborhoods import process_neighborhoods
@@ -1,6 +1,6 @@
1
1
  """
2
- risk/neighborhoods/io
3
- ~~~~~~~~~~~~~~~~~~~~~
2
+ risk/neighborhoods/api
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
4
  """
5
5
 
6
6
  import copy
@@ -8,6 +8,7 @@ from typing import Any, Dict, List, Tuple, Union
8
8
 
9
9
  import networkx as nx
10
10
  import numpy as np
11
+ from scipy.sparse import csr_matrix
11
12
 
12
13
  from risk.log import logger, log_header, params
13
14
  from risk.neighborhoods.neighborhoods import get_network_neighborhoods
@@ -21,10 +22,10 @@ from risk.stats import (
21
22
  )
22
23
 
23
24
 
24
- class NeighborhoodsIO:
25
+ class NeighborhoodsAPI:
25
26
  """Handles the loading of statistical results and annotation significance for neighborhoods.
26
27
 
27
- The NeighborhoodsIO class provides methods to load neighborhood results from statistical tests.
28
+ The NeighborhoodsAPI class provides methods to load neighborhood results from statistical tests.
28
29
  """
29
30
 
30
31
  def __init__() -> None:
@@ -86,7 +87,7 @@ class NeighborhoodsIO:
86
87
  null_distribution: str = "network",
87
88
  random_seed: int = 888,
88
89
  ) -> Dict[str, Any]:
89
- """Load significant neighborhoods for the network using the Chi-squared test.
90
+ """Load significant neighborhoods for the network using the chi-squared test.
90
91
 
91
92
  Args:
92
93
  network (nx.Graph): The network graph.
@@ -396,12 +397,11 @@ class NeighborhoodsIO:
396
397
  leiden_resolution: float = 1.0,
397
398
  fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 0.5,
398
399
  random_seed: int = 888,
399
- ) -> np.ndarray:
400
+ ) -> csr_matrix:
400
401
  """Load significant neighborhoods for the network.
401
402
 
402
403
  Args:
403
404
  network (nx.Graph): The network graph.
404
- annotations (pd.DataFrame): The matrix of annotations associated with the network.
405
405
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
406
406
  metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'leiden', 'label_propagation',
407
407
  'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
@@ -413,7 +413,7 @@ class NeighborhoodsIO:
413
413
  random_seed (int, optional): Seed for random number generation. Defaults to 888.
414
414
 
415
415
  Returns:
416
- np.ndarray: Neighborhood matrix calculated based on the selected distance metric.
416
+ csr_matrix: Sparse neighborhood matrix calculated based on the selected distance metric.
417
417
  """
418
418
  # Display the chosen distance metric
419
419
  if distance_metric == "louvain":
@@ -422,12 +422,13 @@ class NeighborhoodsIO:
422
422
  for_print_distance_metric = f"leiden (resolution={leiden_resolution})"
423
423
  else:
424
424
  for_print_distance_metric = distance_metric
425
+
425
426
  # Log and display neighborhood settings
426
427
  logger.debug(f"Distance metric: '{for_print_distance_metric}'")
427
428
  logger.debug(f"Edge length threshold: {fraction_shortest_edges}")
428
429
  logger.debug(f"Random seed: {random_seed}")
429
430
 
430
- # Compute neighborhoods based on the network and distance metric
431
+ # Compute neighborhoods
431
432
  neighborhoods = get_network_neighborhoods(
432
433
  network,
433
434
  distance_metric,
@@ -437,5 +438,9 @@ class NeighborhoodsIO:
437
438
  random_seed=random_seed,
438
439
  )
439
440
 
440
- # Return the computed neighborhoods
441
+ # Ensure the neighborhood matrix is in sparse format
442
+ if not isinstance(neighborhoods, csr_matrix):
443
+ neighborhoods = csr_matrix(neighborhoods)
444
+
445
+ # Return the sparse neighborhood matrix
441
446
  return neighborhoods
risk/network/__init__.py CHANGED
@@ -3,6 +3,4 @@ risk/network
3
3
  ~~~~~~~~~~~~
4
4
  """
5
5
 
6
- from risk.network.graph import GraphIO
7
6
  from risk.network.io import NetworkIO
8
- from risk.network.plot import PlotterIO
@@ -3,4 +3,4 @@ risk/network/graph
3
3
  ~~~~~~~~~~~~~~~~~~
4
4
  """
5
5
 
6
- from risk.network.graph.io import GraphIO
6
+ from risk.network.graph.api import GraphAPI
@@ -1,6 +1,6 @@
1
1
  """
2
- risk/network/graph/io
3
- ~~~~~~~~~~~~~~~~~~~~~
2
+ risk/network/graph/api
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
4
  """
5
5
 
6
6
  import copy
@@ -20,10 +20,10 @@ from risk.network.graph.network import NetworkGraph
20
20
  from risk.stats import calculate_significance_matrices
21
21
 
22
22
 
23
- class GraphIO:
23
+ class GraphAPI:
24
24
  """Handles the loading of network graphs and associated data.
25
25
 
26
- The GraphIO class provides methods to load and process network graphs, annotations, and neighborhoods.
26
+ The GraphAPI class provides methods to load and process network graphs, annotations, and neighborhoods.
27
27
  """
28
28
 
29
29
  def __init__() -> None:
@@ -240,8 +240,12 @@ class AnalysisSummary:
240
240
  except ValueError:
241
241
  return "" # Description not found
242
242
 
243
- # Get nodes present for the annotation and sort by node label
244
- nodes_present = np.where(self.annotations["matrix"][:, annotation_idx] == 1)[0]
243
+ # Get the column (safely) from the sparse matrix
244
+ column = self.annotations["matrix"][:, annotation_idx]
245
+ # Convert the column to a dense array if needed
246
+ column = column.toarray().ravel() # Convert to a 1D dense array
247
+ # Get nodes present for the annotation and sort by node label - use np.where on the dense array
248
+ nodes_present = np.where(column == 1)[0]
245
249
  node_labels = sorted(
246
250
  self.graph.node_id_to_node_label_map[node_id]
247
251
  for node_id in nodes_present
@@ -0,0 +1,6 @@
1
+ """
2
+ risk/network/plot
3
+ ~~~~~~~~~~~~~~~~~
4
+ """
5
+
6
+ from risk.network.plotter.api import PlotterAPI
@@ -1,6 +1,6 @@
1
1
  """
2
- risk/network/graph/io
3
- ~~~~~~~~~~~~~~~~~~~~~
2
+ risk/network/graph/api
3
+ ~~~~~~~~~~~~~~~~~~~~~~
4
4
  """
5
5
 
6
6
  from typing import List, Tuple, Union
@@ -9,13 +9,13 @@ import numpy as np
9
9
 
10
10
  from risk.log import log_header
11
11
  from risk.network.graph.network import NetworkGraph
12
- from risk.network.plot.network import NetworkPlotter
12
+ from risk.network.plotter.network import NetworkPlotter
13
13
 
14
14
 
15
- class PlotterIO:
15
+ class PlotterAPI:
16
16
  """Handles the loading of network plotter objects.
17
17
 
18
- The PlotterIO class provides methods to load and configure NetworkPlotter objects for plotting network graphs.
18
+ The PlotterAPI class provides methods to load and configure NetworkPlotter objects for plotting network graphs.
19
19
  """
20
20
 
21
21
  def __init__() -> None:
@@ -10,8 +10,8 @@ import numpy as np
10
10
 
11
11
  from risk.log import params
12
12
  from risk.network.graph.network import NetworkGraph
13
- from risk.network.plot.utils.colors import to_rgba
14
- from risk.network.plot.utils.layout import calculate_bounding_box
13
+ from risk.network.plotter.utils.colors import to_rgba
14
+ from risk.network.plotter.utils.layout import calculate_bounding_box
15
15
 
16
16
 
17
17
  class Canvas:
@@ -13,7 +13,7 @@ from scipy.stats import gaussian_kde
13
13
 
14
14
  from risk.log import params, logger
15
15
  from risk.network.graph.network import NetworkGraph
16
- from risk.network.plot.utils.colors import get_annotated_domain_colors, to_rgba
16
+ from risk.network.plotter.utils.colors import get_annotated_domain_colors, to_rgba
17
17
 
18
18
 
19
19
  class Contour:
@@ -12,8 +12,8 @@ import pandas as pd
12
12
 
13
13
  from risk.log import params
14
14
  from risk.network.graph.network import NetworkGraph
15
- from risk.network.plot.utils.colors import get_annotated_domain_colors, to_rgba
16
- from risk.network.plot.utils.layout import calculate_bounding_box
15
+ from risk.network.plotter.utils.colors import get_annotated_domain_colors, to_rgba
16
+ from risk.network.plotter.utils.layout import calculate_bounding_box
17
17
 
18
18
  TERM_DELIMITER = "::::" # String used to separate multiple domain terms when constructing composite domain labels
19
19
 
@@ -11,11 +11,11 @@ import numpy as np
11
11
 
12
12
  from risk.log import params
13
13
  from risk.network.graph.network import NetworkGraph
14
- from risk.network.plot.canvas import Canvas
15
- from risk.network.plot.contour import Contour
16
- from risk.network.plot.labels import Labels
17
- from risk.network.plot.utils.colors import get_domain_colors, to_rgba
18
- from risk.network.plot.utils.layout import calculate_bounding_box
14
+ from risk.network.plotter.canvas import Canvas
15
+ from risk.network.plotter.contour import Contour
16
+ from risk.network.plotter.labels import Labels
17
+ from risk.network.plotter.utils.colors import get_domain_colors, to_rgba
18
+ from risk.network.plotter.utils.layout import calculate_bounding_box
19
19
 
20
20
 
21
21
  class Network:
risk/risk.py CHANGED
@@ -3,13 +3,16 @@ risk/risk
3
3
  ~~~~~~~~~
4
4
  """
5
5
 
6
+ from risk.network import NetworkIO
6
7
  from risk.annotations import AnnotationsIO
8
+ from risk.neighborhoods import NeighborhoodsAPI
9
+ from risk.network.graph import GraphAPI
10
+ from risk.network.plotter import PlotterAPI
11
+
7
12
  from risk.log import params, set_global_verbosity
8
- from risk.neighborhoods import NeighborhoodsIO
9
- from risk.network import GraphIO, NetworkIO, PlotterIO
10
13
 
11
14
 
12
- class RISK(NetworkIO, AnnotationsIO, NeighborhoodsIO, GraphIO, PlotterIO):
15
+ class RISK(NetworkIO, AnnotationsIO, NeighborhoodsAPI, GraphAPI, PlotterAPI):
13
16
  """RISK: A class for network analysis and visualization.
14
17
 
15
18
  The RISK class integrates functionalities for loading networks, processing annotations,
risk/stats/binom.py CHANGED
@@ -5,43 +5,47 @@ risk/stats/binomial
5
5
 
6
6
  from typing import Any, Dict
7
7
 
8
- import numpy as np
8
+ from scipy.sparse import csr_matrix
9
9
  from scipy.stats import binom
10
10
 
11
11
 
12
12
  def compute_binom_test(
13
- neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
13
+ neighborhoods: csr_matrix,
14
+ annotations: csr_matrix,
15
+ null_distribution: str = "network",
14
16
  ) -> Dict[str, Any]:
15
17
  """Compute Binomial test for enrichment and depletion in neighborhoods with selectable null distribution.
16
18
 
17
19
  Args:
18
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods (rows as nodes, columns as neighbors).
19
- annotations (np.ndarray): Binary matrix representing annotations (rows as nodes, columns as annotations).
20
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
21
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
20
22
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
21
23
 
22
24
  Returns:
23
25
  Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
24
26
  """
25
- # Calculate the total counts of annotated nodes and neighborhood sizes
26
- annotated_counts = neighborhoods @ annotations
27
- neighborhood_sizes = neighborhoods.sum(axis=1, keepdims=True)
28
- annotation_totals = annotations.sum(axis=0, keepdims=True)
29
- total_nodes = neighborhoods.shape[1] # Total number of nodes in the network
27
+ # Get the total number of nodes in the network
28
+ total_nodes = neighborhoods.shape[1]
30
29
 
31
- # Compute p for the Binomial distribution based on the chosen null distribution
30
+ # Compute sums (remain sparse here)
31
+ neighborhood_sizes = neighborhoods.sum(axis=1) # Row sums
32
+ annotation_totals = annotations.sum(axis=0) # Column sums
33
+ # Compute probabilities (convert to dense)
32
34
  if null_distribution == "network":
33
- p_values = (
34
- annotation_totals / total_nodes
35
- ) # Probability of annotation per node across the network
35
+ p_values = (annotation_totals / total_nodes).A.flatten() # Dense 1D array
36
36
  elif null_distribution == "annotations":
37
- p_values = annotation_totals / annotations.sum() # Probability weighted by annotations
37
+ p_values = (annotation_totals / annotations.sum()).A.flatten() # Dense 1D array
38
38
  else:
39
39
  raise ValueError(
40
40
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
41
41
  )
42
42
 
43
+ # Observed counts (sparse matrix multiplication)
44
+ annotated_counts = neighborhoods @ annotations # Sparse result
45
+ annotated_counts_dense = annotated_counts.toarray() # Convert for dense operations
46
+
43
47
  # Compute enrichment and depletion p-values
44
- enrichment_pvals = 1 - binom.cdf(annotated_counts - 1, neighborhood_sizes, p_values)
45
- depletion_pvals = binom.cdf(annotated_counts, neighborhood_sizes, p_values)
48
+ enrichment_pvals = 1 - binom.cdf(annotated_counts_dense - 1, neighborhood_sizes.A, p_values)
49
+ depletion_pvals = binom.cdf(annotated_counts_dense, neighborhood_sizes.A, p_values)
46
50
 
47
51
  return {"enrichment_pvals": enrichment_pvals, "depletion_pvals": depletion_pvals}
risk/stats/chi2.py CHANGED
@@ -4,51 +4,63 @@ risk/stats/chi2
4
4
  """
5
5
 
6
6
  from typing import Any, Dict
7
+
7
8
  import numpy as np
9
+ from scipy.sparse import csr_matrix
8
10
  from scipy.stats import chi2
9
11
 
10
12
 
11
13
  def compute_chi2_test(
12
- neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
14
+ neighborhoods: csr_matrix,
15
+ annotations: csr_matrix,
16
+ null_distribution: str = "network",
13
17
  ) -> Dict[str, Any]:
14
18
  """Compute chi-squared test for enrichment and depletion in neighborhoods with selectable null distribution.
15
19
 
16
20
  Args:
17
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
18
- annotations (np.ndarray): Binary matrix representing annotations.
21
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
22
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
19
23
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
20
24
 
21
25
  Returns:
22
26
  Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
23
27
  """
24
- # Get the total number of nodes in the network
28
+ # Total number of nodes in the network
25
29
  total_node_count = neighborhoods.shape[0]
26
30
 
27
31
  if null_distribution == "network":
28
32
  # Case 1: Use all nodes as the background
29
33
  background_population = total_node_count
30
- neighborhood_sums = np.sum(
31
- neighborhoods, axis=0, keepdims=True
32
- ).T # Column sums of neighborhoods
33
- annotation_sums = np.sum(annotations, axis=0, keepdims=True) # Column sums of annotations
34
+ neighborhood_sums = neighborhoods.sum(axis=0) # Column sums of neighborhoods
35
+ annotation_sums = annotations.sum(axis=0) # Column sums of annotations
34
36
  elif null_distribution == "annotations":
35
37
  # Case 2: Only consider nodes with at least one annotation
36
- annotated_nodes = np.sum(annotations, axis=1) > 0
37
- background_population = np.sum(annotated_nodes)
38
- neighborhood_sums = np.sum(neighborhoods[annotated_nodes], axis=0, keepdims=True).T
39
- annotation_sums = np.sum(annotations[annotated_nodes], axis=0, keepdims=True)
38
+ annotated_nodes = (
39
+ np.ravel(annotations.sum(axis=1)) > 0
40
+ ) # Row-wise sum to filter nodes with annotations
41
+ background_population = annotated_nodes.sum() # Total number of annotated nodes
42
+ neighborhood_sums = neighborhoods[annotated_nodes].sum(
43
+ axis=0
44
+ ) # Neighborhood sums for annotated nodes
45
+ annotation_sums = annotations[annotated_nodes].sum(
46
+ axis=0
47
+ ) # Annotation sums for annotated nodes
40
48
  else:
41
49
  raise ValueError(
42
50
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
43
51
  )
44
52
 
53
+ # Convert to dense arrays for downstream computations
54
+ neighborhood_sums = np.asarray(neighborhood_sums).reshape(-1, 1) # Ensure column vector shape
55
+ annotation_sums = np.asarray(annotation_sums).reshape(1, -1) # Ensure row vector shape
56
+
45
57
  # Observed values: number of annotated nodes in each neighborhood
46
58
  observed = neighborhoods.T @ annotations # Shape: (neighborhoods, annotations)
47
59
  # Expected values under the null
48
60
  expected = (neighborhood_sums @ annotation_sums) / background_population
49
61
  # Chi-squared statistic: sum((observed - expected)^2 / expected)
50
62
  with np.errstate(divide="ignore", invalid="ignore"): # Handle divide-by-zero
51
- chi2_stat = np.where(expected > 0, (observed - expected) ** 2 / expected, 0)
63
+ chi2_stat = np.where(expected > 0, np.power(observed - expected, 2) / expected, 0)
52
64
 
53
65
  # Compute p-values for enrichment (upper tail) and depletion (lower tail)
54
66
  enrichment_pvals = chi2.sf(chi2_stat, df=1) # Survival function for upper tail
risk/stats/hypergeom.py CHANGED
@@ -6,44 +6,54 @@ risk/stats/hypergeom
6
6
  from typing import Any, Dict
7
7
 
8
8
  import numpy as np
9
+ from scipy.sparse import csr_matrix
9
10
  from scipy.stats import hypergeom
10
11
 
11
12
 
12
13
  def compute_hypergeom_test(
13
- neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
14
+ neighborhoods: csr_matrix,
15
+ annotations: csr_matrix,
16
+ null_distribution: str = "network",
14
17
  ) -> Dict[str, Any]:
15
- """Compute hypergeometric test for enrichment and depletion in neighborhoods with selectable null distribution.
18
+ """
19
+ Compute hypergeometric test for enrichment and depletion in neighborhoods with selectable null distribution.
16
20
 
17
21
  Args:
18
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
19
- annotations (np.ndarray): Binary matrix representing annotations.
22
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
23
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
20
24
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
21
25
 
22
26
  Returns:
23
27
  Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
24
28
  """
25
29
  # Get the total number of nodes in the network
26
- total_node_count = neighborhoods.shape[0]
30
+ total_nodes = neighborhoods.shape[1]
31
+
32
+ # Compute sums
33
+ neighborhood_sums = neighborhoods.sum(axis=0).A.flatten() # Convert to dense array
34
+ annotation_sums = annotations.sum(axis=0).A.flatten() # Convert to dense array
27
35
 
28
36
  if null_distribution == "network":
29
- # Case 1: Use all nodes as the background
30
- background_population = total_node_count
31
- neighborhood_sums = np.sum(neighborhoods, axis=0, keepdims=True).T
32
- annotation_sums = np.sum(annotations, axis=0, keepdims=True)
37
+ background_population = total_nodes
33
38
  elif null_distribution == "annotations":
34
- # Case 2: Only consider nodes with at least one annotation
35
- annotated_nodes = np.sum(annotations, axis=1) > 0
36
- background_population = np.sum(annotated_nodes)
37
- neighborhood_sums = np.sum(neighborhoods[annotated_nodes], axis=0, keepdims=True).T
38
- annotation_sums = np.sum(annotations[annotated_nodes], axis=0, keepdims=True)
39
+ annotated_nodes = annotations.sum(axis=1).A.flatten() > 0 # Boolean mask
40
+ background_population = annotated_nodes.sum()
41
+ neighborhood_sums = neighborhoods[annotated_nodes].sum(axis=0).A.flatten()
42
+ annotation_sums = annotations[annotated_nodes].sum(axis=0).A.flatten()
39
43
  else:
40
44
  raise ValueError(
41
45
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
42
46
  )
43
47
 
44
- # Matrix multiplication for annotated nodes in each neighborhood
45
- annotated_in_neighborhood = neighborhoods.T @ annotations
46
- # Calculate depletion and enrichment p-values using the hypergeometric distribution
48
+ # Observed counts
49
+ annotated_in_neighborhood = neighborhoods.T @ annotations # Sparse result
50
+ annotated_in_neighborhood = annotated_in_neighborhood.toarray() # Convert to dense
51
+ # Align shapes for broadcasting
52
+ neighborhood_sums = neighborhood_sums.reshape(-1, 1)
53
+ annotation_sums = annotation_sums.reshape(1, -1)
54
+ background_population = np.array(background_population).reshape(1, 1)
55
+
56
+ # Compute hypergeometric p-values
47
57
  depletion_pvals = hypergeom.cdf(
48
58
  annotated_in_neighborhood, background_population, annotation_sums, neighborhood_sums
49
59
  )
@@ -3,11 +3,12 @@ risk/stats/permutation/permutation
3
3
  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4
4
  """
5
5
 
6
- from multiprocessing import get_context, shared_memory, Manager
6
+ from multiprocessing import get_context, Manager
7
7
  from multiprocessing.managers import ValueProxy
8
8
  from typing import Any, Callable, Dict, List, Tuple, Union
9
9
 
10
10
  import numpy as np
11
+ from scipy.sparse import csr_matrix
11
12
  from threadpoolctl import threadpool_limits
12
13
  from tqdm import tqdm
13
14
 
@@ -15,8 +16,8 @@ from risk.stats.permutation.test_functions import DISPATCH_TEST_FUNCTIONS
15
16
 
16
17
 
17
18
  def compute_permutation_test(
18
- neighborhoods: np.ndarray,
19
- annotations: np.ndarray,
19
+ neighborhoods: csr_matrix,
20
+ annotations: csr_matrix,
20
21
  score_metric: str = "sum",
21
22
  null_distribution: str = "network",
22
23
  num_permutations: int = 1000,
@@ -26,9 +27,9 @@ def compute_permutation_test(
26
27
  """Compute permutation test for enrichment and depletion in neighborhoods.
27
28
 
28
29
  Args:
29
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
30
- annotations (np.ndarray): Binary matrix representing annotations.
31
- score_metric (str, optional): Metric to use for scoring ('sum', 'mean', etc.). Defaults to "sum".
30
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
31
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
32
+ score_metric (str, optional): Metric to use for scoring ('sum' or 'stdev'). Defaults to "sum".
32
33
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
33
34
  num_permutations (int, optional): Number of permutations to run. Defaults to 1000.
34
35
  random_seed (int, optional): Seed for random number generation. Defaults to 888.
@@ -66,19 +67,19 @@ def compute_permutation_test(
66
67
 
67
68
 
68
69
  def _run_permutation_test(
69
- neighborhoods: np.ndarray,
70
- annotations: np.ndarray,
70
+ neighborhoods: csr_matrix,
71
+ annotations: csr_matrix,
71
72
  neighborhood_score_func: Callable,
72
73
  null_distribution: str = "network",
73
74
  num_permutations: int = 1000,
74
75
  random_seed: int = 888,
75
76
  max_workers: int = 4,
76
77
  ) -> tuple:
77
- """Run a permutation test to calculate enrichment and depletion counts.
78
+ """Run the permutation test to calculate depletion and enrichment counts.
78
79
 
79
80
  Args:
80
- neighborhoods (np.ndarray): The neighborhood matrix.
81
- annotations (np.ndarray): The annotation matrix.
81
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
82
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
82
83
  neighborhood_score_func (Callable): Function to calculate neighborhood scores.
83
84
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
84
85
  num_permutations (int, optional): Number of permutations. Defaults to 1000.
@@ -100,8 +101,8 @@ def _run_permutation_test(
100
101
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
101
102
  )
102
103
 
103
- # Replace NaNs with zeros in the annotations matrix
104
- annotations[np.isnan(annotations)] = 0
104
+ # Replace NaNs with zeros in the sparse annotations matrix
105
+ annotations.data[np.isnan(annotations.data)] = 0
105
106
  annotation_matrix_obsv = annotations[idxs]
106
107
  neighborhoods_matrix_obsv = neighborhoods.T[idxs].T
107
108
  # Calculate observed neighborhood scores
@@ -131,45 +132,33 @@ def _run_permutation_test(
131
132
  permutations[i * batch_size : (i + 1) * batch_size] for i in range(max_workers)
132
133
  ]
133
134
 
134
- # Create shared memory for annotations
135
- shm_annotations = shared_memory.SharedMemory(create=True, size=annotations.nbytes)
136
- shared_annotations = np.ndarray(
137
- annotations.shape, dtype=annotations.dtype, buffer=shm_annotations.buf
138
- )
139
- np.copyto(shared_annotations, annotations)
140
-
141
135
  # Execute the permutation test using multiprocessing
142
- try:
143
- with ctx.Pool(max_workers) as pool:
144
- with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
145
- # Prepare parameters for multiprocessing
146
- params_list = [
147
- (
148
- permutation_batches[i], # Pass the batch of precomputed permutations
149
- annotations,
150
- neighborhoods_matrix_obsv,
151
- observed_neighborhood_scores,
152
- neighborhood_score_func,
153
- num_permutations,
154
- progress_counter,
155
- max_workers,
156
- )
157
- for i in range(max_workers)
158
- ]
159
-
160
- # Start the permutation process in parallel
161
- results = pool.starmap_async(_permutation_process_batch, params_list, chunksize=1)
162
-
163
- # Update progress bar based on progress_counter
164
- while not results.ready():
165
- progress.update(progress_counter.value - progress.n)
166
- results.wait(0.1) # Wait for 100ms
167
- # Ensure progress bar reaches 100%
168
- progress.update(total_progress - progress.n)
169
- finally:
170
- # Clean up shared memory
171
- shm_annotations.close()
172
- shm_annotations.unlink()
136
+ with ctx.Pool(max_workers) as pool:
137
+ with tqdm(total=total_progress, desc="Total progress", position=0) as progress:
138
+ # Prepare parameters for multiprocessing
139
+ params_list = [
140
+ (
141
+ permutation_batches[i], # Pass the batch of precomputed permutations
142
+ annotations,
143
+ neighborhoods_matrix_obsv,
144
+ observed_neighborhood_scores,
145
+ neighborhood_score_func,
146
+ num_permutations,
147
+ progress_counter,
148
+ max_workers,
149
+ )
150
+ for i in range(max_workers)
151
+ ]
152
+
153
+ # Start the permutation process in parallel
154
+ results = pool.starmap_async(_permutation_process_batch, params_list, chunksize=1)
155
+
156
+ # Update progress bar based on progress_counter
157
+ while not results.ready():
158
+ progress.update(progress_counter.value - progress.n)
159
+ results.wait(0.1) # Wait for 100ms
160
+ # Ensure progress bar reaches 100%
161
+ progress.update(total_progress - progress.n)
173
162
 
174
163
  # Accumulate results from each worker
175
164
  for local_counts_depletion, local_counts_enrichment in results.get():
@@ -181,8 +170,8 @@ def _run_permutation_test(
181
170
 
182
171
  def _permutation_process_batch(
183
172
  permutations: Union[List, Tuple, np.ndarray],
184
- annotation_matrix: np.ndarray,
185
- neighborhoods_matrix_obsv: np.ndarray,
173
+ annotation_matrix: csr_matrix,
174
+ neighborhoods_matrix_obsv: csr_matrix,
186
175
  observed_neighborhood_scores: np.ndarray,
187
176
  neighborhood_score_func: Callable,
188
177
  num_permutations: int,
@@ -193,8 +182,8 @@ def _permutation_process_batch(
193
182
 
194
183
  Args:
195
184
  permutations (Union[List, Tuple, np.ndarray]): Permutation batch to process.
196
- annotation_matrix (np.ndarray): The annotation matrix.
197
- neighborhoods_matrix_obsv (np.ndarray): Observed neighborhoods matrix.
185
+ annotation_matrix (csr_matrix): Sparse binary matrix representing annotations.
186
+ neighborhoods_matrix_obsv (csr_matrix): Sparse binary matrix representing observed neighborhoods.
198
187
  observed_neighborhood_scores (np.ndarray): Observed neighborhood scores.
199
188
  neighborhood_score_func (Callable): Function to calculate neighborhood scores.
200
189
  num_permutations (int): Number of total permutations across all subsets.
@@ -4,6 +4,7 @@ risk/stats/permutation/test_functions
4
4
  """
5
5
 
6
6
  import numpy as np
7
+ from scipy.sparse import csr_matrix
7
8
 
8
9
  # Note: Cython optimizations provided minimal performance benefits.
9
10
  # The final version with Cython is archived in the `cython_permutation` branch.
@@ -11,46 +12,53 @@ import numpy as np
11
12
 
12
13
 
13
14
  def compute_neighborhood_score_by_sum(
14
- neighborhoods_matrix: np.ndarray, annotation_matrix: np.ndarray
15
+ neighborhoods_matrix: csr_matrix, annotation_matrix: csr_matrix
15
16
  ) -> np.ndarray:
16
- """Compute the sum of attribute values for each neighborhood.
17
+ """Compute the sum of attribute values for each neighborhood using sparse matrices.
17
18
 
18
19
  Args:
19
- neighborhoods_matrix (np.ndarray): Binary matrix representing neighborhoods.
20
- annotation_matrix (np.ndarray): Matrix representing annotation values.
20
+ neighborhoods_matrix (csr_matrix): Sparse binary matrix representing neighborhoods.
21
+ annotation_matrix (csr_matrix): Sparse matrix representing annotation values.
21
22
 
22
23
  Returns:
23
- np.ndarray: Sum of attribute values for each neighborhood.
24
+ np.ndarray: Dense array of summed attribute values for each neighborhood.
24
25
  """
25
26
  # Calculate the neighborhood score as the dot product of neighborhoods and annotations
26
- neighborhood_sum = np.dot(neighborhoods_matrix, annotation_matrix)
27
- return neighborhood_sum
27
+ neighborhood_score = neighborhoods_matrix @ annotation_matrix # Sparse matrix multiplication
28
+ # Convert the result to a dense array for downstream calculations
29
+ neighborhood_score_dense = neighborhood_score.toarray()
30
+ return neighborhood_score_dense
28
31
 
29
32
 
30
33
  def compute_neighborhood_score_by_stdev(
31
- neighborhoods_matrix: np.ndarray, annotation_matrix: np.ndarray
34
+ neighborhoods_matrix: csr_matrix, annotation_matrix: csr_matrix
32
35
  ) -> np.ndarray:
33
- """Compute the standard deviation of neighborhood scores.
36
+ """Compute the standard deviation of neighborhood scores for sparse matrices.
34
37
 
35
38
  Args:
36
- neighborhoods_matrix (np.ndarray): Binary matrix representing neighborhoods.
37
- annotation_matrix (np.ndarray): Matrix representing annotation values.
39
+ neighborhoods_matrix (csr_matrix): Sparse binary matrix representing neighborhoods.
40
+ annotation_matrix (csr_matrix): Sparse matrix representing annotation values.
38
41
 
39
42
  Returns:
40
43
  np.ndarray: Standard deviation of the neighborhood scores.
41
44
  """
42
45
  # Calculate the neighborhood score as the dot product of neighborhoods and annotations
43
- neighborhood_score = np.dot(neighborhoods_matrix, annotation_matrix)
44
- # Calculate the number of elements in each neighborhood
45
- N = np.sum(neighborhoods_matrix, axis=1)
46
+ neighborhood_score = neighborhoods_matrix @ annotation_matrix # Sparse matrix multiplication
47
+ # Calculate the number of elements in each neighborhood (sum of rows)
48
+ N = neighborhoods_matrix.sum(axis=1).A.flatten() # Convert to 1D array
49
+ # Avoid division by zero by replacing zeros in N with np.nan temporarily
50
+ N[N == 0] = np.nan
46
51
  # Compute the mean of the neighborhood scores
47
- M = neighborhood_score / N[:, None]
52
+ M = neighborhood_score.multiply(1 / N[:, None]).toarray() # Sparse element-wise division
48
53
  # Compute the mean of squares (EXX) directly using squared annotation matrix
49
- EXX = np.dot(neighborhoods_matrix, annotation_matrix**2) / N[:, None]
54
+ annotation_squared = annotation_matrix.multiply(annotation_matrix) # Element-wise squaring
55
+ EXX = (neighborhoods_matrix @ annotation_squared).multiply(1 / N[:, None]).toarray()
50
56
  # Calculate variance as EXX - M^2
51
- variance = EXX - M**2
57
+ variance = EXX - np.power(M, 2)
52
58
  # Compute the standard deviation as the square root of the variance
53
59
  neighborhood_stdev = np.sqrt(variance)
60
+ # Replace np.nan back with zeros in case N was 0 (no elements in the neighborhood)
61
+ neighborhood_stdev[np.isnan(neighborhood_stdev)] = 0
54
62
  return neighborhood_stdev
55
63
 
56
64
 
risk/stats/poisson.py CHANGED
@@ -6,39 +6,45 @@ risk/stats/poisson
6
6
  from typing import Any, Dict
7
7
 
8
8
  import numpy as np
9
+ from scipy.sparse import csr_matrix
9
10
  from scipy.stats import poisson
10
11
 
11
12
 
12
13
  def compute_poisson_test(
13
- neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
14
+ neighborhoods: csr_matrix,
15
+ annotations: csr_matrix,
16
+ null_distribution: str = "network",
14
17
  ) -> Dict[str, Any]:
15
- """Compute Poisson test for enrichment and depletion in neighborhoods with selectable null distribution.
18
+ """
19
+ Compute Poisson test for enrichment and depletion in neighborhoods with selectable null distribution.
16
20
 
17
21
  Args:
18
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
19
- annotations (np.ndarray): Binary matrix representing annotations.
22
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
23
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
20
24
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
21
25
 
22
26
  Returns:
23
27
  Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
24
28
  """
25
29
  # Matrix multiplication to get the number of annotated nodes in each neighborhood
26
- annotated_in_neighborhood = neighborhoods @ annotations
30
+ annotated_in_neighborhood = neighborhoods @ annotations # Sparse result
31
+ # Convert annotated counts to dense for downstream calculations
32
+ annotated_in_neighborhood_dense = annotated_in_neighborhood.toarray()
27
33
 
28
34
  # Compute lambda_expected based on the chosen null distribution
29
35
  if null_distribution == "network":
30
36
  # Use the mean across neighborhoods (axis=1)
31
- lambda_expected = np.mean(annotated_in_neighborhood, axis=1, keepdims=True)
37
+ lambda_expected = np.mean(annotated_in_neighborhood_dense, axis=1, keepdims=True)
32
38
  elif null_distribution == "annotations":
33
39
  # Use the mean across annotations (axis=0)
34
- lambda_expected = np.mean(annotated_in_neighborhood, axis=0, keepdims=True)
40
+ lambda_expected = np.mean(annotated_in_neighborhood_dense, axis=0, keepdims=True)
35
41
  else:
36
42
  raise ValueError(
37
43
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
38
44
  )
39
45
 
40
46
  # Compute p-values for enrichment and depletion using Poisson distribution
41
- enrichment_pvals = 1 - poisson.cdf(annotated_in_neighborhood - 1, lambda_expected)
42
- depletion_pvals = poisson.cdf(annotated_in_neighborhood, lambda_expected)
47
+ enrichment_pvals = 1 - poisson.cdf(annotated_in_neighborhood_dense - 1, lambda_expected)
48
+ depletion_pvals = poisson.cdf(annotated_in_neighborhood_dense, lambda_expected)
43
49
 
44
50
  return {"enrichment_pvals": enrichment_pvals, "depletion_pvals": depletion_pvals}
risk/stats/zscore.py CHANGED
@@ -6,55 +6,61 @@ risk/stats/zscore
6
6
  from typing import Any, Dict
7
7
 
8
8
  import numpy as np
9
+ from scipy.sparse import csr_matrix
9
10
  from scipy.stats import norm
10
11
 
11
12
 
12
13
  def compute_zscore_test(
13
- neighborhoods: np.ndarray, annotations: np.ndarray, null_distribution: str = "network"
14
+ neighborhoods: csr_matrix,
15
+ annotations: csr_matrix,
16
+ null_distribution: str = "network",
14
17
  ) -> Dict[str, Any]:
15
- """Compute Z-score test for enrichment and depletion in neighborhoods with selectable null distribution.
18
+ """
19
+ Compute Z-score test for enrichment and depletion in neighborhoods with selectable null distribution.
16
20
 
17
21
  Args:
18
- neighborhoods (np.ndarray): Binary matrix representing neighborhoods.
19
- annotations (np.ndarray): Binary matrix representing annotations.
22
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
23
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
20
24
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
21
25
 
22
26
  Returns:
23
27
  Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
24
28
  """
25
- # Get the total number of nodes in the network
26
- total_node_count = neighborhoods.shape[0]
29
+ # Total number of nodes in the network
30
+ total_node_count = neighborhoods.shape[1]
27
31
 
32
+ # Compute sums
28
33
  if null_distribution == "network":
29
- # Case 1: Use all nodes as the background
30
34
  background_population = total_node_count
31
- neighborhood_sums = np.sum(neighborhoods, axis=0, keepdims=True).T
32
- annotation_sums = np.sum(annotations, axis=0, keepdims=True)
35
+ neighborhood_sums = neighborhoods.sum(axis=0).A.flatten() # Dense column sums
36
+ annotation_sums = annotations.sum(axis=0).A.flatten() # Dense row sums
33
37
  elif null_distribution == "annotations":
34
- # Case 2: Only consider nodes with at least one annotation
35
- annotated_nodes = np.sum(annotations, axis=1) > 0
36
- background_population = np.sum(annotated_nodes)
37
- neighborhood_sums = np.sum(neighborhoods[annotated_nodes], axis=0, keepdims=True).T
38
- annotation_sums = np.sum(annotations[annotated_nodes], axis=0, keepdims=True)
38
+ annotated_nodes = annotations.sum(axis=1).A.flatten() > 0 # Dense boolean mask
39
+ background_population = annotated_nodes.sum()
40
+ neighborhood_sums = neighborhoods[annotated_nodes].sum(axis=0).A.flatten()
41
+ annotation_sums = annotations[annotated_nodes].sum(axis=0).A.flatten()
39
42
  else:
40
43
  raise ValueError(
41
44
  "Invalid null_distribution value. Choose either 'network' or 'annotations'."
42
45
  )
43
46
 
44
- # Matrix multiplication for annotated nodes in each neighborhood
45
- observed = neighborhoods.T @ annotations
46
- # Compute expected values under the null distribution
47
+ # Observed values
48
+ observed = (neighborhoods.T @ annotations).toarray() # Convert sparse result to dense
49
+ # Expected values under the null
50
+ neighborhood_sums = neighborhood_sums.reshape(-1, 1) # Ensure correct shape
51
+ annotation_sums = annotation_sums.reshape(1, -1) # Ensure correct shape
47
52
  expected = (neighborhood_sums @ annotation_sums) / background_population
48
- # Compute standard deviation under the null distribution
53
+
54
+ # Standard deviation under the null
49
55
  std_dev = np.sqrt(
50
56
  expected
51
57
  * (1 - annotation_sums / background_population)
52
58
  * (1 - neighborhood_sums / background_population)
53
59
  )
54
- # Avoid division by zero
55
- std_dev[std_dev == 0] = np.nan # Mark invalid computations
60
+ std_dev[std_dev == 0] = np.nan # Avoid division by zero
56
61
  # Compute Z-scores
57
62
  z_scores = (observed - expected) / std_dev
63
+
58
64
  # Convert Z-scores to depletion and enrichment p-values
59
65
  enrichment_pvals = norm.sf(z_scores) # Upper tail
60
66
  depletion_pvals = norm.cdf(z_scores) # Lower tail
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: risk-network
3
- Version: 0.0.9b24
3
+ Version: 0.0.9b25
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
@@ -0,0 +1,44 @@
1
+ risk/__init__.py,sha256=Rgsnt64VMhbyTleA_DXr5VcFJG6748i86zd0VZHy9r4,127
2
+ risk/constants.py,sha256=XInRaH78Slnw_sWgAsBFbUHkyA0h0jL0DKGuQNbOvjM,550
3
+ risk/risk.py,sha256=s827_lRknFseOP9O4zW8sP-IcCd2EzrpV_tnVY_tz5s,1104
4
+ risk/annotations/__init__.py,sha256=parsbcux1U4urpUqh9AdzbDWuLj9HlMidycMPkpSQFo,179
5
+ risk/annotations/annotations.py,sha256=XmVuLL5NFAj6F30fZY22N8nb4LK6sig7fE0NXL1iZp8,14497
6
+ risk/annotations/io.py,sha256=z1AJySsU-KL_IYuHa7j3nvuczmOHgK3WfaQ4TRunvrA,10499
7
+ risk/log/__init__.py,sha256=7LxDysQu7doi0LAvlY2YbjN6iJH0fNknqy8lSLgeljo,217
8
+ risk/log/console.py,sha256=PgjyEvyhYLUSHXPUKEqOmxsDsfrjPICIgqo_cAHq0N8,4575
9
+ risk/log/parameters.py,sha256=VtwfMzLU1xI4yji3-Ch5vHjH-KdwTfwaEMmi7hFQTs0,5716
10
+ risk/neighborhoods/__init__.py,sha256=Q74HwTH7okI-vaskJPy2bYwb5sNjGASTzJ6m8V8arCU,234
11
+ risk/neighborhoods/api.py,sha256=KdUouMHJPwvePJGdz7Ck1GWYhN96QDb_SuPyTt3KwAc,23515
12
+ risk/neighborhoods/community.py,sha256=VIDvB-SsMDDvWkUaYXf_E-gcg0HELMVv2MKshPwJAFQ,15480
13
+ risk/neighborhoods/domains.py,sha256=MufM4cbvP3HrJyESOuGT0wYD_cz3rjT0SGqEnbytkh8,12523
14
+ risk/neighborhoods/neighborhoods.py,sha256=bBUY7hXqcsOoAEkPdRoRNuj36WsllXicmz_LxZfEuyw,21186
15
+ risk/network/__init__.py,sha256=oVi3FA1XXKD84014Cykq-9bpX4_s0F3aAUfNOU-07Qw,73
16
+ risk/network/geometry.py,sha256=omyb9afSKMUtQ-RKVHUoRyxJifOW0ASenHjyCjg43kg,6836
17
+ risk/network/io.py,sha256=JV5hqf1oIwWUVw07BjhD0qACQGbtIeA8NSMDcFql88k,23465
18
+ risk/network/graph/__init__.py,sha256=ziGJew3yhtqvrb9LUuneDu_LwW2Wa9vd4UuhoL5l1CA,91
19
+ risk/network/graph/api.py,sha256=Ag4PjFTX6BUvmW7ZdfIgwdsr8URigX9jD9yEFRXUxrU,8220
20
+ risk/network/graph/network.py,sha256=KdIBM_-flHMWcBK4RUjU_QRfOZIf_yv9fv4L7AOLkqU,12199
21
+ risk/network/graph/summary.py,sha256=8IenFZfhyzcg5aGNJp7Zjb0Umy0mFNmJlfwXcO7y8MU,10311
22
+ risk/network/plotter/__init__.py,sha256=ixXQxpBVpNIz1y9tUHZ7CiJmGfewvbvjuB1LQ-AIf1s,93
23
+ risk/network/plotter/api.py,sha256=cLZHq-rn_5FJwIWM5hYlQMobPmaxCE-P2iqgxTDIOTQ,1860
24
+ risk/network/plotter/canvas.py,sha256=l-Se86DMDJMHh8Yn-_hsl0_ipoazHLJGRCqXcc9HK4M,13498
25
+ risk/network/plotter/contour.py,sha256=svi76suYlVYq2VoDQxXmun8Hmo0lI2CQRjAyHg0qdhk,15490
26
+ risk/network/plotter/labels.py,sha256=QesD1ybseA6ldLmWMqVaAqSPR34yVEgEzXzg1AKQD6o,45513
27
+ risk/network/plotter/network.py,sha256=wcBf1GaM1wPzW-iXTrLzOmlG2_9wwfll_hJUzUO2u2Y,19917
28
+ risk/network/plotter/utils/colors.py,sha256=EFlIUZ3MGSKoHeZi9cgR6uLKK5GGJ4QzE6lmnrHViLw,18967
29
+ risk/network/plotter/utils/layout.py,sha256=2P4Bqi1dGiX9KsriLYqiq1KlHpsMdZemAUza4WcYoNA,3634
30
+ risk/stats/__init__.py,sha256=1CPRtT1LDwudrvFgkVtSom8cp4cM7b4X6b4fHPaNHw0,405
31
+ risk/stats/binom.py,sha256=8Qwcxnq1u-AycwQs_sQxwuxgkgDpES-A-kIcj4fRc3g,2032
32
+ risk/stats/chi2.py,sha256=MGFNrWP40i9TxnMsZYbDgqdMrN_Fe0xFsnWU8xNsVSs,3046
33
+ risk/stats/hypergeom.py,sha256=VfQBtpgSGG826uBP1WyBMavP3ylZnhponUZ2rHFdGAE,2502
34
+ risk/stats/poisson.py,sha256=_KHe9g8XNRD4-Q486zx2UgHCO2QyvBOiHuX3hRZLEqc,2050
35
+ risk/stats/stats.py,sha256=y2DMJF3uKRIWRyYiCd2Kwxa-EqOzX5HsMBms_Vw6wK8,7322
36
+ risk/stats/zscore.py,sha256=Jx9cLKAHiDnrgW_Su9KZYYQiTVsuyJMC7vXBusnEI-c,2648
37
+ risk/stats/permutation/__init__.py,sha256=OLmYLm2uj96hPsSaUs0vUqFYw6Thwch_aHtpL7L0ZFw,127
38
+ risk/stats/permutation/permutation.py,sha256=693DyWPNz6L_wCL06F7gj2u1df0qVc4F3Na36jCLYMI,10577
39
+ risk/stats/permutation/test_functions.py,sha256=D3XMPM8CasUNytWSRce22TI6KK6XulYn5uGG4lWxaHs,3120
40
+ risk_network-0.0.9b25.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
41
+ risk_network-0.0.9b25.dist-info/METADATA,sha256=XJSNAooxsGNwoMnp-6Nx0YCnp1zBWVm9ej2yjtUUPDg,47627
42
+ risk_network-0.0.9b25.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
43
+ risk_network-0.0.9b25.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
44
+ risk_network-0.0.9b25.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- """
2
- risk/network/plot
3
- ~~~~~~~~~~~~~~~~~
4
- """
5
-
6
- from risk.network.plot.io import PlotterIO
@@ -1,44 +0,0 @@
1
- risk/__init__.py,sha256=m6D5527XrsqQAj0y2VgVhiWu-5vdX8wBFcTqP397l7g,127
2
- risk/constants.py,sha256=XInRaH78Slnw_sWgAsBFbUHkyA0h0jL0DKGuQNbOvjM,550
3
- risk/risk.py,sha256=M7OxLNrkIwmI7yvbdMGkSACwIgJFRbPnB_L5ZaayhrE,1035
4
- risk/annotations/__init__.py,sha256=parsbcux1U4urpUqh9AdzbDWuLj9HlMidycMPkpSQFo,179
5
- risk/annotations/annotations.py,sha256=rekG32mUOyh9wQvVrye037vz9f6IiPT3JPFDzSV5ROg,14534
6
- risk/annotations/io.py,sha256=z1AJySsU-KL_IYuHa7j3nvuczmOHgK3WfaQ4TRunvrA,10499
7
- risk/log/__init__.py,sha256=7LxDysQu7doi0LAvlY2YbjN6iJH0fNknqy8lSLgeljo,217
8
- risk/log/console.py,sha256=PgjyEvyhYLUSHXPUKEqOmxsDsfrjPICIgqo_cAHq0N8,4575
9
- risk/log/parameters.py,sha256=VtwfMzLU1xI4yji3-Ch5vHjH-KdwTfwaEMmi7hFQTs0,5716
10
- risk/neighborhoods/__init__.py,sha256=GUS9uT4BIqTOs2_htYjKM_7MmyS_LtHnVqLWCNRkRrU,232
11
- risk/neighborhoods/community.py,sha256=VIDvB-SsMDDvWkUaYXf_E-gcg0HELMVv2MKshPwJAFQ,15480
12
- risk/neighborhoods/domains.py,sha256=MufM4cbvP3HrJyESOuGT0wYD_cz3rjT0SGqEnbytkh8,12523
13
- risk/neighborhoods/io.py,sha256=oL_Xn5qyqwdXapHHQQlyqcUshE3wb9fn1WWePiFNC2g,23429
14
- risk/neighborhoods/neighborhoods.py,sha256=bBUY7hXqcsOoAEkPdRoRNuj36WsllXicmz_LxZfEuyw,21186
15
- risk/network/__init__.py,sha256=70tppeoGooNknUwAlJudtwlnG8G0jZ_bOR7TZpLOvs0,152
16
- risk/network/geometry.py,sha256=omyb9afSKMUtQ-RKVHUoRyxJifOW0ASenHjyCjg43kg,6836
17
- risk/network/io.py,sha256=JV5hqf1oIwWUVw07BjhD0qACQGbtIeA8NSMDcFql88k,23465
18
- risk/network/graph/__init__.py,sha256=ljD51OEvJ4HrJQJbOZJ5kwyB_9oltJnc-Znfo87xgPM,89
19
- risk/network/graph/io.py,sha256=yundGlbKx9SCuE0vf5NeqY3ADyUHGp-h15xbKNEpGGs,8216
20
- risk/network/graph/network.py,sha256=KdIBM_-flHMWcBK4RUjU_QRfOZIf_yv9fv4L7AOLkqU,12199
21
- risk/network/graph/summary.py,sha256=8ukRlCPLZjkzvVc80QlNmXVRFY7A9yCnDWpfT3IV7ek,10067
22
- risk/network/plot/__init__.py,sha256=v_rKZgpq5tnOVNEMdOLhyZvBeWl7Mgc9ZXkZ9LQ8Mso,88
23
- risk/network/plot/canvas.py,sha256=sqLFUIwDRPb-va2oQMwp7mF-NefrA3pWUuajQgvSEug,13492
24
- risk/network/plot/contour.py,sha256=dO_zaVMaEY1WKaJXwulZrh9Ybvk3A3531ZQ6gXWMuuM,15487
25
- risk/network/plot/io.py,sha256=1_zJqbDvJra33PjJ0jMaA-WhqW0x1OXcf2kSPpaccOM,1853
26
- risk/network/plot/labels.py,sha256=re9hvwFG5fLjjtbKb6Vs_CkwqWeSWwyaPpDiwDERNqY,45507
27
- risk/network/plot/network.py,sha256=XUYuz8ER_ilvs7ZvRzsYfrQafsCD5P8f3EqWuWijWnU,19902
28
- risk/network/plot/utils/colors.py,sha256=EFlIUZ3MGSKoHeZi9cgR6uLKK5GGJ4QzE6lmnrHViLw,18967
29
- risk/network/plot/utils/layout.py,sha256=2P4Bqi1dGiX9KsriLYqiq1KlHpsMdZemAUza4WcYoNA,3634
30
- risk/stats/__init__.py,sha256=1CPRtT1LDwudrvFgkVtSom8cp4cM7b4X6b4fHPaNHw0,405
31
- risk/stats/binom.py,sha256=b70_C4-AnXJiHfjMOas0tDP8N3-M4_0zYYaZkDKVPxU,2015
32
- risk/stats/chi2.py,sha256=mhdppli1KI_F-dIvX6MALgnWpjS_89aZVENofa6u41o,2604
33
- risk/stats/hypergeom.py,sha256=oc39f02ViB1vQ-uaDrxG_tzAT6dxQBRjc88EK2EGn78,2282
34
- risk/stats/poisson.py,sha256=polLgwS08MTCNzupYdmMUoEUYrJOjAbcYtYwjlfeE5Y,1803
35
- risk/stats/stats.py,sha256=y2DMJF3uKRIWRyYiCd2Kwxa-EqOzX5HsMBms_Vw6wK8,7322
36
- risk/stats/zscore.py,sha256=D_v1tBnhLbmrRG0Ats3e1L75GKsen_ja0uTv6xEpWGA,2570
37
- risk/stats/permutation/__init__.py,sha256=OLmYLm2uj96hPsSaUs0vUqFYw6Thwch_aHtpL7L0ZFw,127
38
- risk/stats/permutation/permutation.py,sha256=UBR0j6sqFowwR0tLWWr0bLtCWgRx7LLv-0P0OPmA4a4,10943
39
- risk/stats/permutation/test_functions.py,sha256=lftOude6hee0pyR80HlBD32522JkDoN5hrKQ9VEbuoY,2345
40
- risk_network-0.0.9b24.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
41
- risk_network-0.0.9b24.dist-info/METADATA,sha256=vkkt_9ILy_6UdguZ8AXwMi_3MeQoHJ4eySAR7pSs-7o,47627
42
- risk_network-0.0.9b24.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
43
- risk_network-0.0.9b24.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
44
- risk_network-0.0.9b24.dist-info/RECORD,,
File without changes
File without changes