risk-network 0.0.8b26__py3-none-any.whl → 0.0.9b26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. risk/__init__.py +2 -2
  2. risk/annotations/__init__.py +2 -2
  3. risk/annotations/annotations.py +74 -47
  4. risk/annotations/io.py +47 -31
  5. risk/log/__init__.py +4 -2
  6. risk/log/{config.py → console.py} +5 -3
  7. risk/log/{params.py → parameters.py} +17 -42
  8. risk/neighborhoods/__init__.py +3 -5
  9. risk/neighborhoods/api.py +446 -0
  10. risk/neighborhoods/community.py +255 -77
  11. risk/neighborhoods/domains.py +62 -31
  12. risk/neighborhoods/neighborhoods.py +156 -160
  13. risk/network/__init__.py +1 -3
  14. risk/network/geometry.py +65 -57
  15. risk/network/graph/__init__.py +6 -0
  16. risk/network/graph/api.py +194 -0
  17. risk/network/{graph.py → graph/network.py} +87 -37
  18. risk/network/graph/summary.py +254 -0
  19. risk/network/io.py +56 -47
  20. risk/network/plotter/__init__.py +6 -0
  21. risk/network/plotter/api.py +54 -0
  22. risk/network/{plot → plotter}/canvas.py +7 -4
  23. risk/network/{plot → plotter}/contour.py +22 -19
  24. risk/network/{plot → plotter}/labels.py +69 -74
  25. risk/network/{plot → plotter}/network.py +170 -34
  26. risk/network/{plot/utils/color.py → plotter/utils/colors.py} +104 -112
  27. risk/network/{plot → plotter}/utils/layout.py +8 -5
  28. risk/risk.py +11 -500
  29. risk/stats/__init__.py +8 -4
  30. risk/stats/binom.py +51 -0
  31. risk/stats/chi2.py +69 -0
  32. risk/stats/hypergeom.py +27 -17
  33. risk/stats/permutation/__init__.py +1 -1
  34. risk/stats/permutation/permutation.py +44 -38
  35. risk/stats/permutation/test_functions.py +25 -17
  36. risk/stats/poisson.py +15 -9
  37. risk/stats/stats.py +15 -13
  38. risk/stats/zscore.py +68 -0
  39. {risk_network-0.0.8b26.dist-info → risk_network-0.0.9b26.dist-info}/METADATA +9 -5
  40. risk_network-0.0.9b26.dist-info/RECORD +44 -0
  41. {risk_network-0.0.8b26.dist-info → risk_network-0.0.9b26.dist-info}/WHEEL +1 -1
  42. risk/network/plot/__init__.py +0 -6
  43. risk/network/plot/plotter.py +0 -137
  44. risk_network-0.0.8b26.dist-info/RECORD +0 -37
  45. {risk_network-0.0.8b26.dist-info → risk_network-0.0.9b26.dist-info}/LICENSE +0 -0
  46. {risk_network-0.0.8b26.dist-info → risk_network-0.0.9b26.dist-info}/top_level.txt +0 -0
risk/risk.py CHANGED
@@ -3,35 +3,21 @@ risk/risk
3
3
  ~~~~~~~~~
4
4
  """
5
5
 
6
- import copy
7
- from typing import Any, Dict, List, Tuple, Union
6
+ from risk.network import NetworkIO
7
+ from risk.annotations import AnnotationsIO
8
+ from risk.neighborhoods import NeighborhoodsAPI
9
+ from risk.network.graph import GraphAPI
10
+ from risk.network.plotter import PlotterAPI
8
11
 
9
- import networkx as nx
10
- import numpy as np
11
- import pandas as pd
12
+ from risk.log import params, set_global_verbosity
12
13
 
13
- from risk.annotations import AnnotationsIO, define_top_annotations
14
- from risk.log import params, logger, log_header, set_global_verbosity
15
- from risk.neighborhoods import (
16
- define_domains,
17
- get_network_neighborhoods,
18
- process_neighborhoods,
19
- trim_domains_and_top_annotations,
20
- )
21
- from risk.network import NetworkIO, NetworkGraph, NetworkPlotter
22
- from risk.stats import (
23
- calculate_significance_matrices,
24
- compute_hypergeom_test,
25
- compute_permutation_test,
26
- compute_poisson_test,
27
- )
28
14
 
29
-
30
- class RISK(NetworkIO, AnnotationsIO):
15
+ class RISK(NetworkIO, AnnotationsIO, NeighborhoodsAPI, GraphAPI, PlotterAPI):
31
16
  """RISK: A class for network analysis and visualization.
32
17
 
33
18
  The RISK class integrates functionalities for loading networks, processing annotations,
34
- and performing network-based statistical analysis, such as neighborhood significance testing.
19
+ performing network-based statistical analysis to quantify neighborhood relationships,
20
+ and visualizing networks and their properties.
35
21
  """
36
22
 
37
23
  def __init__(self, verbose: bool = True):
@@ -42,481 +28,6 @@ class RISK(NetworkIO, AnnotationsIO):
42
28
  """
43
29
  # Set global verbosity for logging
44
30
  set_global_verbosity(verbose)
45
- # Initialize and log network parameters
46
- params.initialize()
31
+ # Provide public access to network parameters
32
+ self.params = params
47
33
  super().__init__()
48
-
49
- @property
50
- def params(self) -> params:
51
- """Access the logged network parameters.
52
-
53
- Returns:
54
- Params: An instance of the Params class with logged parameters and methods to access or update them.
55
- """
56
- return params
57
-
58
- def load_neighborhoods_by_hypergeom(
59
- self,
60
- network: nx.Graph,
61
- annotations: Dict[str, Any],
62
- distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
63
- louvain_resolution: float = 0.1,
64
- edge_length_threshold: Union[float, List, Tuple, np.ndarray] = 0.5,
65
- null_distribution: str = "network",
66
- random_seed: int = 888,
67
- ) -> Dict[str, Any]:
68
- """Load significant neighborhoods for the network using the hypergeometric test.
69
-
70
- Args:
71
- network (nx.Graph): The network graph.
72
- annotations (Dict[str, Any]): The annotations associated with the network.
73
- distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
74
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
75
- 'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
76
- louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
77
- edge_length_threshold (float, List, Tuple, or np.ndarray, optional): Edge length threshold(s) for creating subgraphs.
78
- Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
79
- Defaults to 0.5.
80
- null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
81
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
82
-
83
- Returns:
84
- Dict[str, Any]: Computed significance of neighborhoods.
85
- """
86
- log_header("Running hypergeometric test")
87
- # Log neighborhood analysis parameters
88
- params.log_neighborhoods(
89
- distance_metric=distance_metric,
90
- louvain_resolution=louvain_resolution,
91
- edge_length_threshold=edge_length_threshold,
92
- statistical_test_function="hypergeom",
93
- null_distribution=null_distribution,
94
- random_seed=random_seed,
95
- )
96
-
97
- # Make a copy of the network to avoid modifying the original
98
- network = copy.deepcopy(network)
99
-
100
- # Load neighborhoods based on the network and distance metric
101
- neighborhoods = self._load_neighborhoods(
102
- network,
103
- distance_metric,
104
- louvain_resolution=louvain_resolution,
105
- edge_length_threshold=edge_length_threshold,
106
- random_seed=random_seed,
107
- )
108
- # Run hypergeometric test to compute neighborhood significance
109
- neighborhood_significance = compute_hypergeom_test(
110
- neighborhoods=neighborhoods,
111
- annotations=annotations["matrix"],
112
- null_distribution=null_distribution,
113
- )
114
-
115
- # Return the computed neighborhood significance
116
- return neighborhood_significance
117
-
118
- def load_neighborhoods_by_poisson(
119
- self,
120
- network: nx.Graph,
121
- annotations: Dict[str, Any],
122
- distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
123
- louvain_resolution: float = 0.1,
124
- edge_length_threshold: Union[float, List, Tuple, np.ndarray] = 0.5,
125
- null_distribution: str = "network",
126
- random_seed: int = 888,
127
- ) -> Dict[str, Any]:
128
- """Load significant neighborhoods for the network using the Poisson test.
129
-
130
- Args:
131
- network (nx.Graph): The network graph.
132
- annotations (Dict[str, Any]): The annotations associated with the network.
133
- distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
134
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
135
- 'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
136
- louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
137
- edge_length_threshold (float, List, Tuple, or np.ndarray, optional): Edge length threshold(s) for creating subgraphs.
138
- Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
139
- Defaults to 0.5.
140
- null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
141
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
142
-
143
- Returns:
144
- Dict[str, Any]: Computed significance of neighborhoods.
145
- """
146
- log_header("Running Poisson test")
147
- # Log neighborhood analysis parameters
148
- params.log_neighborhoods(
149
- distance_metric=distance_metric,
150
- louvain_resolution=louvain_resolution,
151
- edge_length_threshold=edge_length_threshold,
152
- statistical_test_function="poisson",
153
- null_distribution=null_distribution,
154
- random_seed=random_seed,
155
- )
156
-
157
- # Make a copy of the network to avoid modifying the original
158
- network = copy.deepcopy(network)
159
-
160
- # Load neighborhoods based on the network and distance metric
161
- neighborhoods = self._load_neighborhoods(
162
- network,
163
- distance_metric,
164
- louvain_resolution=louvain_resolution,
165
- edge_length_threshold=edge_length_threshold,
166
- random_seed=random_seed,
167
- )
168
- # Run Poisson test to compute neighborhood significance
169
- neighborhood_significance = compute_poisson_test(
170
- neighborhoods=neighborhoods,
171
- annotations=annotations["matrix"],
172
- null_distribution=null_distribution,
173
- )
174
-
175
- # Return the computed neighborhood significance
176
- return neighborhood_significance
177
-
178
- def load_neighborhoods_by_permutation(
179
- self,
180
- network: nx.Graph,
181
- annotations: Dict[str, Any],
182
- distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
183
- louvain_resolution: float = 0.1,
184
- edge_length_threshold: Union[float, List, Tuple, np.ndarray] = 0.5,
185
- score_metric: str = "sum",
186
- null_distribution: str = "network",
187
- num_permutations: int = 1000,
188
- random_seed: int = 888,
189
- max_workers: int = 1,
190
- ) -> Dict[str, Any]:
191
- """Load significant neighborhoods for the network using the permutation test.
192
-
193
- Args:
194
- network (nx.Graph): The network graph.
195
- annotations (Dict[str, Any]): The annotations associated with the network.
196
- distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
197
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
198
- 'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
199
- louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
200
- edge_length_threshold (float, List, Tuple, or np.ndarray, optional): Edge length threshold(s) for creating subgraphs.
201
- Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
202
- Defaults to 0.5.
203
- score_metric (str, optional): Scoring metric for neighborhood significance. Defaults to "sum".
204
- null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
205
- num_permutations (int, optional): Number of permutations for significance testing. Defaults to 1000.
206
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
207
- max_workers (int, optional): Maximum number of workers for parallel computation. Defaults to 1.
208
-
209
- Returns:
210
- Dict[str, Any]: Computed significance of neighborhoods.
211
- """
212
- log_header("Running permutation test")
213
- # Log neighborhood analysis parameters
214
- params.log_neighborhoods(
215
- distance_metric=distance_metric,
216
- louvain_resolution=louvain_resolution,
217
- edge_length_threshold=edge_length_threshold,
218
- statistical_test_function="permutation",
219
- score_metric=score_metric,
220
- null_distribution=null_distribution,
221
- num_permutations=num_permutations,
222
- random_seed=random_seed,
223
- max_workers=max_workers,
224
- )
225
-
226
- # Make a copy of the network to avoid modifying the original
227
- network = copy.deepcopy(network)
228
-
229
- # Load neighborhoods based on the network and distance metric
230
- neighborhoods = self._load_neighborhoods(
231
- network,
232
- distance_metric,
233
- louvain_resolution=louvain_resolution,
234
- edge_length_threshold=edge_length_threshold,
235
- random_seed=random_seed,
236
- )
237
-
238
- # Log and display permutation test settings
239
- logger.debug(f"Neighborhood scoring metric: '{score_metric}'")
240
- logger.debug(f"Null distribution: '{null_distribution}'")
241
- logger.debug(f"Number of permutations: {num_permutations}")
242
- logger.debug(f"Maximum workers: {max_workers}")
243
- # Run permutation test to compute neighborhood significance
244
- neighborhood_significance = compute_permutation_test(
245
- neighborhoods=neighborhoods,
246
- annotations=annotations["matrix"],
247
- score_metric=score_metric,
248
- null_distribution=null_distribution,
249
- num_permutations=num_permutations,
250
- random_seed=random_seed,
251
- max_workers=max_workers,
252
- )
253
-
254
- # Return the computed neighborhood significance
255
- return neighborhood_significance
256
-
257
- def load_graph(
258
- self,
259
- network: nx.Graph,
260
- annotations: Dict[str, Any],
261
- neighborhoods: Dict[str, Any],
262
- tail: str = "right", # OPTIONS: "right" (enrichment), "left" (depletion), "both"
263
- pval_cutoff: float = 0.01, # OPTIONS: Any value between 0 to 1
264
- fdr_cutoff: float = 0.9999, # OPTIONS: Any value between 0 to 1
265
- impute_depth: int = 0,
266
- prune_threshold: float = 0.0,
267
- linkage_criterion: str = "distance",
268
- linkage_method: str = "average",
269
- linkage_metric: str = "yule",
270
- min_cluster_size: int = 5,
271
- max_cluster_size: int = 1000,
272
- ) -> NetworkGraph:
273
- """Load and process the network graph, defining top annotations and domains.
274
-
275
- Args:
276
- network (nx.Graph): The network graph.
277
- annotations (pd.DataFrame): DataFrame containing annotation data for the network.
278
- neighborhoods (Dict[str, Any]): Neighborhood enrichment data.
279
- tail (str, optional): Type of significance tail ("right", "left", "both"). Defaults to "right".
280
- pval_cutoff (float, optional): p-value cutoff for significance. Defaults to 0.01.
281
- fdr_cutoff (float, optional): FDR cutoff for significance. Defaults to 0.9999.
282
- impute_depth (int, optional): Depth for imputing neighbors. Defaults to 0.
283
- prune_threshold (float, optional): Distance threshold for pruning neighbors. Defaults to 0.0.
284
- linkage_criterion (str, optional): Clustering criterion for defining domains. Defaults to "distance".
285
- linkage_method (str, optional): Clustering method to use. Defaults to "average".
286
- linkage_metric (str, optional): Metric to use for calculating distances. Defaults to "yule".
287
- min_cluster_size (int, optional): Minimum size for clusters. Defaults to 5.
288
- max_cluster_size (int, optional): Maximum size for clusters. Defaults to 1000.
289
-
290
- Returns:
291
- NetworkGraph: A fully initialized and processed NetworkGraph object.
292
- """
293
- # Log the parameters and display headers
294
- log_header("Finding significant neighborhoods")
295
- params.log_graph(
296
- tail=tail,
297
- pval_cutoff=pval_cutoff,
298
- fdr_cutoff=fdr_cutoff,
299
- impute_depth=impute_depth,
300
- prune_threshold=prune_threshold,
301
- linkage_criterion=linkage_criterion,
302
- linkage_method=linkage_method,
303
- linkage_metric=linkage_metric,
304
- min_cluster_size=min_cluster_size,
305
- max_cluster_size=max_cluster_size,
306
- )
307
-
308
- # Make a copy of the network to avoid modifying the original
309
- network = copy.deepcopy(network)
310
-
311
- logger.debug(f"p-value cutoff: {pval_cutoff}")
312
- logger.debug(f"FDR BH cutoff: {fdr_cutoff}")
313
- logger.debug(
314
- f"Significance tail: '{tail}' ({'enrichment' if tail == 'right' else 'depletion' if tail == 'left' else 'both'})"
315
- )
316
- # Calculate significant neighborhoods based on the provided parameters
317
- significant_neighborhoods = calculate_significance_matrices(
318
- neighborhoods["depletion_pvals"],
319
- neighborhoods["enrichment_pvals"],
320
- tail=tail,
321
- pval_cutoff=pval_cutoff,
322
- fdr_cutoff=fdr_cutoff,
323
- )
324
-
325
- log_header("Processing neighborhoods")
326
- # Process neighborhoods by imputing and pruning based on the given settings
327
- processed_neighborhoods = process_neighborhoods(
328
- network=network,
329
- neighborhoods=significant_neighborhoods,
330
- impute_depth=impute_depth,
331
- prune_threshold=prune_threshold,
332
- )
333
-
334
- log_header("Finding top annotations")
335
- logger.debug(f"Min cluster size: {min_cluster_size}")
336
- logger.debug(f"Max cluster size: {max_cluster_size}")
337
- # Define top annotations based on processed neighborhoods
338
- top_annotations = self._define_top_annotations(
339
- network=network,
340
- annotations=annotations,
341
- neighborhoods=processed_neighborhoods,
342
- min_cluster_size=min_cluster_size,
343
- max_cluster_size=max_cluster_size,
344
- )
345
-
346
- log_header("Optimizing distance threshold for domains")
347
- # Define domains in the network using the specified clustering settings
348
- domains = self._define_domains(
349
- neighborhoods=processed_neighborhoods,
350
- top_annotations=top_annotations,
351
- linkage_criterion=linkage_criterion,
352
- linkage_method=linkage_method,
353
- linkage_metric=linkage_metric,
354
- )
355
- # Trim domains and top annotations based on cluster size constraints
356
- top_annotations, domains, trimmed_domains = trim_domains_and_top_annotations(
357
- domains=domains,
358
- top_annotations=top_annotations,
359
- min_cluster_size=min_cluster_size,
360
- max_cluster_size=max_cluster_size,
361
- )
362
-
363
- # Prepare node mapping and enrichment sums for the final NetworkGraph object
364
- ordered_nodes = annotations["ordered_nodes"]
365
- node_label_to_id = dict(zip(ordered_nodes, range(len(ordered_nodes))))
366
- node_enrichment_sums = processed_neighborhoods["node_enrichment_sums"]
367
-
368
- # Return the fully initialized NetworkGraph object
369
- return NetworkGraph(
370
- network=network,
371
- top_annotations=top_annotations,
372
- domains=domains,
373
- trimmed_domains=trimmed_domains,
374
- node_label_to_node_id_map=node_label_to_id,
375
- node_enrichment_sums=node_enrichment_sums,
376
- )
377
-
378
- def load_plotter(
379
- self,
380
- graph: NetworkGraph,
381
- figsize: Union[List, Tuple, np.ndarray] = (10, 10),
382
- background_color: str = "white",
383
- background_alpha: Union[float, None] = 1.0,
384
- pad: float = 0.3,
385
- ) -> NetworkPlotter:
386
- """Get a NetworkPlotter object for plotting.
387
-
388
- Args:
389
- graph (NetworkGraph): The graph to plot.
390
- figsize (List, Tuple, or np.ndarray, optional): Size of the plot. Defaults to (10, 10)., optional): Size of the figure. Defaults to (10, 10).
391
- background_color (str, optional): Background color of the plot. Defaults to "white".
392
- background_alpha (float, None, optional): Transparency level of the background color. If provided, it overrides
393
- any existing alpha values found in background_color. Defaults to 1.0.
394
- pad (float, optional): Padding value to adjust the axis limits. Defaults to 0.3.
395
-
396
- Returns:
397
- NetworkPlotter: A NetworkPlotter object configured with the given parameters.
398
- """
399
- log_header("Loading plotter")
400
-
401
- # Initialize and return a NetworkPlotter object
402
- return NetworkPlotter(
403
- graph,
404
- figsize=figsize,
405
- background_color=background_color,
406
- background_alpha=background_alpha,
407
- pad=pad,
408
- )
409
-
410
- def _load_neighborhoods(
411
- self,
412
- network: nx.Graph,
413
- distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
414
- louvain_resolution: float = 0.1,
415
- edge_length_threshold: Union[float, List, Tuple, np.ndarray] = 0.5,
416
- random_seed: int = 888,
417
- ) -> np.ndarray:
418
- """Load significant neighborhoods for the network.
419
-
420
- Args:
421
- network (nx.Graph): The network graph.
422
- annotations (pd.DataFrame): The matrix of annotations associated with the network.
423
- distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
424
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
425
- 'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
426
- louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
427
- edge_length_threshold (float, List, Tuple, or np.ndarray, optional): Edge length threshold(s) for creating subgraphs.
428
- Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
429
- Defaults to 0.5.
430
- random_seed (int, optional): Seed for random number generation. Defaults to 888.
431
-
432
- Returns:
433
- np.ndarray: Neighborhood matrix calculated based on the selected distance metric.
434
- """
435
- # Display the chosen distance metric
436
- if distance_metric == "louvain":
437
- for_print_distance_metric = f"louvain (resolution={louvain_resolution})"
438
- else:
439
- for_print_distance_metric = distance_metric
440
- # Log and display neighborhood settings
441
- logger.debug(f"Distance metric: '{for_print_distance_metric}'")
442
- logger.debug(f"Edge length threshold: {edge_length_threshold}")
443
- logger.debug(f"Random seed: {random_seed}")
444
-
445
- # Compute neighborhoods based on the network and distance metric
446
- neighborhoods = get_network_neighborhoods(
447
- network,
448
- distance_metric,
449
- edge_length_threshold,
450
- louvain_resolution=louvain_resolution,
451
- random_seed=random_seed,
452
- )
453
-
454
- # Return the computed neighborhoods
455
- return neighborhoods
456
-
457
- def _define_top_annotations(
458
- self,
459
- network: nx.Graph,
460
- annotations: Dict[str, Any],
461
- neighborhoods: Dict[str, Any],
462
- min_cluster_size: int = 5,
463
- max_cluster_size: int = 1000,
464
- ) -> pd.DataFrame:
465
- """Define top annotations for the network.
466
-
467
- Args:
468
- network (nx.Graph): The network graph.
469
- annotations (Dict[str, Any]): Annotations data for the network.
470
- neighborhoods (Dict[str, Any]): Neighborhood enrichment data.
471
- min_cluster_size (int, optional): Minimum size for clusters. Defaults to 5.
472
- max_cluster_size (int, optional): Maximum size for clusters. Defaults to 1000.
473
-
474
- Returns:
475
- Dict[str, Any]: Top annotations identified within the network.
476
- """
477
- # Extract necessary data from annotations and neighborhoods
478
- ordered_annotations = annotations["ordered_annotations"]
479
- neighborhood_enrichment_sums = neighborhoods["neighborhood_enrichment_counts"]
480
- significant_enrichment_matrix = neighborhoods["significant_enrichment_matrix"]
481
- significant_binary_enrichment_matrix = neighborhoods["significant_binary_enrichment_matrix"]
482
- # Call external function to define top annotations
483
- return define_top_annotations(
484
- network=network,
485
- ordered_annotation_labels=ordered_annotations,
486
- neighborhood_enrichment_sums=neighborhood_enrichment_sums,
487
- significant_enrichment_matrix=significant_enrichment_matrix,
488
- significant_binary_enrichment_matrix=significant_binary_enrichment_matrix,
489
- min_cluster_size=min_cluster_size,
490
- max_cluster_size=max_cluster_size,
491
- )
492
-
493
- def _define_domains(
494
- self,
495
- neighborhoods: Dict[str, Any],
496
- top_annotations: pd.DataFrame,
497
- linkage_criterion: str,
498
- linkage_method: str,
499
- linkage_metric: str,
500
- ) -> pd.DataFrame:
501
- """Define domains in the network based on enrichment data.
502
-
503
- Args:
504
- neighborhoods (Dict[str, Any]): Enrichment data for neighborhoods.
505
- top_annotations (pd.DataFrame): Enrichment matrix for top annotations.
506
- linkage_criterion (str): Clustering criterion for defining domains.
507
- linkage_method (str): Clustering method to use.
508
- linkage_metric (str): Metric to use for calculating distances.
509
-
510
- Returns:
511
- pd.DataFrame: Matrix of defined domains.
512
- """
513
- # Extract the significant enrichment matrix from the neighborhoods data
514
- significant_neighborhoods_enrichment = neighborhoods["significant_enrichment_matrix"]
515
- # Call external function to define domains based on the extracted data
516
- return define_domains(
517
- top_annotations=top_annotations,
518
- significant_neighborhoods_enrichment=significant_neighborhoods_enrichment,
519
- linkage_criterion=linkage_criterion,
520
- linkage_method=linkage_method,
521
- linkage_metric=linkage_metric,
522
- )
risk/stats/__init__.py CHANGED
@@ -3,7 +3,11 @@ risk/stats
3
3
  ~~~~~~~~~~
4
4
  """
5
5
 
6
- from .hypergeom import compute_hypergeom_test
7
- from .permutation import compute_permutation_test
8
- from .poisson import compute_poisson_test
9
- from .stats import calculate_significance_matrices
6
+ from risk.stats.binom import compute_binom_test
7
+ from risk.stats.chi2 import compute_chi2_test
8
+ from risk.stats.hypergeom import compute_hypergeom_test
9
+ from risk.stats.permutation import compute_permutation_test
10
+ from risk.stats.poisson import compute_poisson_test
11
+ from risk.stats.zscore import compute_zscore_test
12
+
13
+ from risk.stats.stats import calculate_significance_matrices
risk/stats/binom.py ADDED
@@ -0,0 +1,51 @@
1
+ """
2
+ risk/stats/binomial
3
+ ~~~~~~~~~~~~~~~~~~~
4
+ """
5
+
6
+ from typing import Any, Dict
7
+
8
+ from scipy.sparse import csr_matrix
9
+ from scipy.stats import binom
10
+
11
+
12
+ def compute_binom_test(
13
+ neighborhoods: csr_matrix,
14
+ annotations: csr_matrix,
15
+ null_distribution: str = "network",
16
+ ) -> Dict[str, Any]:
17
+ """Compute Binomial test for enrichment and depletion in neighborhoods with selectable null distribution.
18
+
19
+ Args:
20
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
21
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
22
+ null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
23
+
24
+ Returns:
25
+ Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
26
+ """
27
+ # Get the total number of nodes in the network
28
+ total_nodes = neighborhoods.shape[1]
29
+
30
+ # Compute sums (remain sparse here)
31
+ neighborhood_sizes = neighborhoods.sum(axis=1) # Row sums
32
+ annotation_totals = annotations.sum(axis=0) # Column sums
33
+ # Compute probabilities (convert to dense)
34
+ if null_distribution == "network":
35
+ p_values = (annotation_totals / total_nodes).A.flatten() # Dense 1D array
36
+ elif null_distribution == "annotations":
37
+ p_values = (annotation_totals / annotations.sum()).A.flatten() # Dense 1D array
38
+ else:
39
+ raise ValueError(
40
+ "Invalid null_distribution value. Choose either 'network' or 'annotations'."
41
+ )
42
+
43
+ # Observed counts (sparse matrix multiplication)
44
+ annotated_counts = neighborhoods @ annotations # Sparse result
45
+ annotated_counts_dense = annotated_counts.toarray() # Convert for dense operations
46
+
47
+ # Compute enrichment and depletion p-values
48
+ enrichment_pvals = 1 - binom.cdf(annotated_counts_dense - 1, neighborhood_sizes.A, p_values)
49
+ depletion_pvals = binom.cdf(annotated_counts_dense, neighborhood_sizes.A, p_values)
50
+
51
+ return {"enrichment_pvals": enrichment_pvals, "depletion_pvals": depletion_pvals}
risk/stats/chi2.py ADDED
@@ -0,0 +1,69 @@
1
+ """
2
+ risk/stats/chi2
3
+ ~~~~~~~~~~~~~~~
4
+ """
5
+
6
+ from typing import Any, Dict
7
+
8
+ import numpy as np
9
+ from scipy.sparse import csr_matrix
10
+ from scipy.stats import chi2
11
+
12
+
13
+ def compute_chi2_test(
14
+ neighborhoods: csr_matrix,
15
+ annotations: csr_matrix,
16
+ null_distribution: str = "network",
17
+ ) -> Dict[str, Any]:
18
+ """Compute chi-squared test for enrichment and depletion in neighborhoods with selectable null distribution.
19
+
20
+ Args:
21
+ neighborhoods (csr_matrix): Sparse binary matrix representing neighborhoods.
22
+ annotations (csr_matrix): Sparse binary matrix representing annotations.
23
+ null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
24
+
25
+ Returns:
26
+ Dict[str, Any]: Dictionary containing depletion and enrichment p-values.
27
+ """
28
+ # Total number of nodes in the network
29
+ total_node_count = neighborhoods.shape[0]
30
+
31
+ if null_distribution == "network":
32
+ # Case 1: Use all nodes as the background
33
+ background_population = total_node_count
34
+ neighborhood_sums = neighborhoods.sum(axis=0) # Column sums of neighborhoods
35
+ annotation_sums = annotations.sum(axis=0) # Column sums of annotations
36
+ elif null_distribution == "annotations":
37
+ # Case 2: Only consider nodes with at least one annotation
38
+ annotated_nodes = (
39
+ np.ravel(annotations.sum(axis=1)) > 0
40
+ ) # Row-wise sum to filter nodes with annotations
41
+ background_population = annotated_nodes.sum() # Total number of annotated nodes
42
+ neighborhood_sums = neighborhoods[annotated_nodes].sum(
43
+ axis=0
44
+ ) # Neighborhood sums for annotated nodes
45
+ annotation_sums = annotations[annotated_nodes].sum(
46
+ axis=0
47
+ ) # Annotation sums for annotated nodes
48
+ else:
49
+ raise ValueError(
50
+ "Invalid null_distribution value. Choose either 'network' or 'annotations'."
51
+ )
52
+
53
+ # Convert to dense arrays for downstream computations
54
+ neighborhood_sums = np.asarray(neighborhood_sums).reshape(-1, 1) # Ensure column vector shape
55
+ annotation_sums = np.asarray(annotation_sums).reshape(1, -1) # Ensure row vector shape
56
+
57
+ # Observed values: number of annotated nodes in each neighborhood
58
+ observed = neighborhoods.T @ annotations # Shape: (neighborhoods, annotations)
59
+ # Expected values under the null
60
+ expected = (neighborhood_sums @ annotation_sums) / background_population
61
+ # Chi-squared statistic: sum((observed - expected)^2 / expected)
62
+ with np.errstate(divide="ignore", invalid="ignore"): # Handle divide-by-zero
63
+ chi2_stat = np.where(expected > 0, np.power(observed - expected, 2) / expected, 0)
64
+
65
+ # Compute p-values for enrichment (upper tail) and depletion (lower tail)
66
+ enrichment_pvals = chi2.sf(chi2_stat, df=1) # Survival function for upper tail
67
+ depletion_pvals = chi2.cdf(chi2_stat, df=1) # Cumulative distribution for lower tail
68
+
69
+ return {"depletion_pvals": depletion_pvals, "enrichment_pvals": enrichment_pvals}