risk-network 0.0.9b14__py3-none-any.whl → 0.0.9b16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
risk/__init__.py CHANGED
@@ -2,9 +2,9 @@
2
2
  risk
3
3
  ~~~~
4
4
 
5
- RISK: RISK Infers Spatial Kinships
5
+ RISK: Regional Inference of Significant Kinships
6
6
  """
7
7
 
8
8
  from risk.risk import RISK
9
9
 
10
- __version__ = "0.0.9-beta.14"
10
+ __version__ = "0.0.9-beta.16"
@@ -13,21 +13,21 @@ from networkx.algorithms.community import greedy_modularity_communities
13
13
 
14
14
 
15
15
  def calculate_greedy_modularity_neighborhoods(
16
- network: nx.Graph, edge_rank_percentile: float = 1.0
16
+ network: nx.Graph, fraction_shortest_edges: float = 1.0
17
17
  ) -> np.ndarray:
18
18
  """Calculate neighborhoods using the Greedy Modularity method.
19
19
 
20
20
  Args:
21
21
  network (nx.Graph): The network graph.
22
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
22
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
23
23
  subgraphs before clustering.
24
24
 
25
25
  Returns:
26
26
  np.ndarray: A binary neighborhood matrix where nodes in the same community have 1, and others have 0.
27
27
  """
28
- # Create a subgraph with the shortest edges based on the rank percentile
28
+ # Create a subgraph with the shortest edges based on the rank fraction
29
29
  subnetwork = _create_percentile_limited_subgraph(
30
- network, edge_rank_percentile=edge_rank_percentile
30
+ network, fraction_shortest_edges=fraction_shortest_edges
31
31
  )
32
32
  # Detect communities using the Greedy Modularity method
33
33
  communities = greedy_modularity_communities(subnetwork)
@@ -52,21 +52,21 @@ def calculate_greedy_modularity_neighborhoods(
52
52
 
53
53
 
54
54
  def calculate_label_propagation_neighborhoods(
55
- network: nx.Graph, edge_rank_percentile: float = 1.0
55
+ network: nx.Graph, fraction_shortest_edges: float = 1.0
56
56
  ) -> np.ndarray:
57
57
  """Apply Label Propagation to the network to detect communities.
58
58
 
59
59
  Args:
60
60
  network (nx.Graph): The network graph.
61
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
61
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
62
62
  subgraphs before clustering.
63
63
 
64
64
  Returns:
65
65
  np.ndarray: A binary neighborhood matrix on Label Propagation.
66
66
  """
67
- # Create a subgraph with the shortest edges based on the rank percentile
67
+ # Create a subgraph with the shortest edges based on the rank fraction
68
68
  subnetwork = _create_percentile_limited_subgraph(
69
- network, edge_rank_percentile=edge_rank_percentile
69
+ network, fraction_shortest_edges=fraction_shortest_edges
70
70
  )
71
71
  # Apply Label Propagation for community detection
72
72
  communities = nx.algorithms.community.label_propagation.label_propagation_communities(
@@ -93,7 +93,7 @@ def calculate_label_propagation_neighborhoods(
93
93
  def calculate_leiden_neighborhoods(
94
94
  network: nx.Graph,
95
95
  resolution: float = 1.0,
96
- edge_rank_percentile: float = 1.0,
96
+ fraction_shortest_edges: float = 1.0,
97
97
  random_seed: int = 888,
98
98
  ) -> np.ndarray:
99
99
  """Calculate neighborhoods using the Leiden method.
@@ -101,16 +101,16 @@ def calculate_leiden_neighborhoods(
101
101
  Args:
102
102
  network (nx.Graph): The network graph.
103
103
  resolution (float, optional): Resolution parameter for the Leiden method. Defaults to 1.0.
104
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
104
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
105
105
  subgraphs before clustering.
106
106
  random_seed (int, optional): Random seed for reproducibility. Defaults to 888.
107
107
 
108
108
  Returns:
109
109
  np.ndarray: A binary neighborhood matrix where nodes in the same community have 1, and others have 0.
110
110
  """
111
- # Create a subgraph with the shortest edges based on the rank percentile
111
+ # Create a subgraph with the shortest edges based on the rank fraction
112
112
  subnetwork = _create_percentile_limited_subgraph(
113
- network, edge_rank_percentile=edge_rank_percentile
113
+ network, fraction_shortest_edges=fraction_shortest_edges
114
114
  )
115
115
  # Convert NetworkX graph to iGraph
116
116
  igraph_network = ig.Graph.from_networkx(subnetwork)
@@ -142,7 +142,7 @@ def calculate_leiden_neighborhoods(
142
142
  def calculate_louvain_neighborhoods(
143
143
  network: nx.Graph,
144
144
  resolution: float = 0.1,
145
- edge_rank_percentile: float = 1.0,
145
+ fraction_shortest_edges: float = 1.0,
146
146
  random_seed: int = 888,
147
147
  ) -> np.ndarray:
148
148
  """Calculate neighborhoods using the Louvain method.
@@ -150,16 +150,16 @@ def calculate_louvain_neighborhoods(
150
150
  Args:
151
151
  network (nx.Graph): The network graph.
152
152
  resolution (float, optional): Resolution parameter for the Louvain method. Defaults to 0.1.
153
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
153
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
154
154
  subgraphs before clustering.
155
155
  random_seed (int, optional): Random seed for reproducibility. Defaults to 888.
156
156
 
157
157
  Returns:
158
158
  np.ndarray: A binary neighborhood matrix on the Louvain method.
159
159
  """
160
- # Create a subgraph with the shortest edges based on the rank percentile
160
+ # Create a subgraph with the shortest edges based on the rank fraction
161
161
  subnetwork = _create_percentile_limited_subgraph(
162
- network, edge_rank_percentile=edge_rank_percentile
162
+ network, fraction_shortest_edges=fraction_shortest_edges
163
163
  )
164
164
  # Apply Louvain method to partition the network
165
165
  partition = community_louvain.best_partition(
@@ -189,21 +189,21 @@ def calculate_louvain_neighborhoods(
189
189
 
190
190
 
191
191
  def calculate_markov_clustering_neighborhoods(
192
- network: nx.Graph, edge_rank_percentile: float = 1.0
192
+ network: nx.Graph, fraction_shortest_edges: float = 1.0
193
193
  ) -> np.ndarray:
194
194
  """Apply Markov Clustering (MCL) to the network and return a binary neighborhood matrix.
195
195
 
196
196
  Args:
197
197
  network (nx.Graph): The network graph.
198
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
198
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
199
199
  subgraphs before clustering.
200
200
 
201
201
  Returns:
202
202
  np.ndarray: A binary neighborhood matrix on Markov Clustering.
203
203
  """
204
- # Create a subgraph with the shortest edges based on the rank percentile
204
+ # Create a subgraph with the shortest edges based on the rank fraction
205
205
  subnetwork = _create_percentile_limited_subgraph(
206
- network, edge_rank_percentile=edge_rank_percentile
206
+ network, fraction_shortest_edges=fraction_shortest_edges
207
207
  )
208
208
  # Step 1: Convert the subnetwork to an adjacency matrix
209
209
  subnetwork_nodes = list(subnetwork.nodes())
@@ -234,21 +234,21 @@ def calculate_markov_clustering_neighborhoods(
234
234
 
235
235
 
236
236
  def calculate_spinglass_neighborhoods(
237
- network: nx.Graph, edge_rank_percentile: float = 1.0
237
+ network: nx.Graph, fraction_shortest_edges: float = 1.0
238
238
  ) -> np.ndarray:
239
239
  """Apply Spinglass Community Detection to the network, handling disconnected components.
240
240
 
241
241
  Args:
242
242
  network (nx.Graph): The network graph.
243
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
243
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
244
244
  subgraphs before clustering.
245
245
 
246
246
  Returns:
247
247
  np.ndarray: A binary neighborhood matrix based on Spinglass communities.
248
248
  """
249
- # Create a subgraph with the shortest edges based on the rank percentile
249
+ # Create a subgraph with the shortest edges based on the rank fraction
250
250
  subnetwork = _create_percentile_limited_subgraph(
251
- network, edge_rank_percentile=edge_rank_percentile
251
+ network, fraction_shortest_edges=fraction_shortest_edges
252
252
  )
253
253
  # Step 1: Find connected components in the graph
254
254
  components = list(nx.connected_components(subnetwork))
@@ -288,21 +288,21 @@ def calculate_spinglass_neighborhoods(
288
288
 
289
289
 
290
290
  def calculate_walktrap_neighborhoods(
291
- network: nx.Graph, edge_rank_percentile: float = 1.0
291
+ network: nx.Graph, fraction_shortest_edges: float = 1.0
292
292
  ) -> np.ndarray:
293
293
  """Apply Walktrap Community Detection to the network.
294
294
 
295
295
  Args:
296
296
  network (nx.Graph): The network graph.
297
- edge_rank_percentile (float, optional): Shortest edge rank percentile threshold for creating
297
+ fraction_shortest_edges (float, optional): Shortest edge rank fraction threshold for creating
298
298
  subgraphs before clustering.
299
299
 
300
300
  Returns:
301
301
  np.ndarray: A binary neighborhood matrix on Walktrap communities.
302
302
  """
303
- # Create a subgraph with the shortest edges based on the rank percentile
303
+ # Create a subgraph with the shortest edges based on the rank fraction
304
304
  subnetwork = _create_percentile_limited_subgraph(
305
- network, edge_rank_percentile=edge_rank_percentile
305
+ network, fraction_shortest_edges=fraction_shortest_edges
306
306
  )
307
307
  # Convert NetworkX graph to iGraph
308
308
  igraph_network = ig.Graph.from_networkx(subnetwork)
@@ -326,17 +326,17 @@ def calculate_walktrap_neighborhoods(
326
326
  return neighborhoods
327
327
 
328
328
 
329
- def _create_percentile_limited_subgraph(G: nx.Graph, edge_rank_percentile: float) -> nx.Graph:
330
- """Create a subgraph containing the shortest edges based on the specified rank percentile
329
+ def _create_percentile_limited_subgraph(G: nx.Graph, fraction_shortest_edges: float) -> nx.Graph:
330
+ """Create a subgraph containing the shortest edges based on the specified rank fraction
331
331
  of all edge lengths in the input graph.
332
332
 
333
333
  Args:
334
334
  G (nx.Graph): The input graph with 'length' attributes on edges.
335
- edge_rank_percentile (float): The rank percentile (between 0 and 1) to filter edges.
335
+ fraction_shortest_edges (float): The rank fraction (between 0 and 1) to filter edges.
336
336
 
337
337
  Returns:
338
338
  nx.Graph: A subgraph with nodes and edges where the edges are within the shortest
339
- specified rank percentile.
339
+ specified rank fraction.
340
340
  """
341
341
  # Step 1: Extract edges with their lengths
342
342
  edges_with_length = [(u, v, d) for u, v, d in G.edges(data=True) if "length" in d]
@@ -347,12 +347,12 @@ def _create_percentile_limited_subgraph(G: nx.Graph, edge_rank_percentile: float
347
347
 
348
348
  # Step 2: Sort edges by length in ascending order
349
349
  edges_with_length.sort(key=lambda x: x[2]["length"])
350
- # Step 3: Calculate the cutoff index for the given rank percentile
351
- cutoff_index = int(edge_rank_percentile * len(edges_with_length))
350
+ # Step 3: Calculate the cutoff index for the given rank fraction
351
+ cutoff_index = int(fraction_shortest_edges * len(edges_with_length))
352
352
  if cutoff_index == 0:
353
- raise ValueError("The rank percentile is too low, resulting in no edges being included.")
353
+ raise ValueError("The rank fraction is too low, resulting in no edges being included.")
354
354
 
355
- # Step 4: Create the subgraph by selecting only the shortest edges within the rank percentile
355
+ # Step 4: Create the subgraph by selecting only the shortest edges within the rank fraction
356
356
  subgraph = nx.Graph()
357
357
  subgraph.add_nodes_from(G.nodes(data=True)) # Retain all nodes from the original graph
358
358
  subgraph.add_edges_from(edges_with_length[:cutoff_index])
@@ -360,8 +360,6 @@ def _create_percentile_limited_subgraph(G: nx.Graph, edge_rank_percentile: float
360
360
  subgraph.remove_nodes_from(list(nx.isolates(subgraph)))
361
361
  # Step 6: Check if the resulting subgraph has no edges and issue a warning
362
362
  if subgraph.number_of_edges() == 0:
363
- raise Warning(
364
- "The resulting subgraph has no edges. Consider adjusting the rank percentile."
365
- )
363
+ raise Warning("The resulting subgraph has no edges. Consider adjusting the rank fraction.")
366
364
 
367
365
  return subgraph
@@ -30,7 +30,7 @@ warnings.filterwarnings(action="ignore", category=DataConversionWarning)
30
30
  def get_network_neighborhoods(
31
31
  network: nx.Graph,
32
32
  distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
33
- edge_rank_percentile: Union[float, List, Tuple, np.ndarray] = 1.0,
33
+ fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 1.0,
34
34
  louvain_resolution: float = 0.1,
35
35
  leiden_resolution: float = 1.0,
36
36
  random_seed: int = 888,
@@ -40,7 +40,7 @@ def get_network_neighborhoods(
40
40
  Args:
41
41
  network (nx.Graph): The network graph.
42
42
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use.
43
- edge_rank_percentile (float, List, Tuple, or np.ndarray, optional): Shortest edge rank percentile threshold(s) for creating subgraphs.
43
+ fraction_shortest_edges (float, List, Tuple, or np.ndarray, optional): Shortest edge rank fraction threshold(s) for creating subgraphs.
44
44
  louvain_resolution (float, optional): Resolution parameter for the Louvain method.
45
45
  leiden_resolution (float, optional): Resolution parameter for the Leiden method.
46
46
  random_seed (int, optional): Random seed for methods requiring random initialization.
@@ -55,11 +55,11 @@ def get_network_neighborhoods(
55
55
  # Ensure distance_metric is a list/tuple for multi-algorithm handling
56
56
  if isinstance(distance_metric, (str, np.ndarray)):
57
57
  distance_metric = [distance_metric]
58
- # Ensure edge_rank_percentile is a list/tuple for multi-threshold handling
59
- if isinstance(edge_rank_percentile, (float, int)):
60
- edge_rank_percentile = [edge_rank_percentile] * len(distance_metric)
58
+ # Ensure fraction_shortest_edges is a list/tuple for multi-threshold handling
59
+ if isinstance(fraction_shortest_edges, (float, int)):
60
+ fraction_shortest_edges = [fraction_shortest_edges] * len(distance_metric)
61
61
  # Check that the number of distance metrics matches the number of edge length thresholds
62
- if len(distance_metric) != len(edge_rank_percentile):
62
+ if len(distance_metric) != len(fraction_shortest_edges):
63
63
  raise ValueError(
64
64
  "The number of distance metrics must match the number of edge length thresholds."
65
65
  )
@@ -68,42 +68,42 @@ def get_network_neighborhoods(
68
68
  num_nodes = network.number_of_nodes()
69
69
  combined_neighborhoods = np.zeros((num_nodes, num_nodes), dtype=int)
70
70
 
71
- # Loop through each distance metric and corresponding edge rank percentile
72
- for metric, percentile in zip(distance_metric, edge_rank_percentile):
71
+ # Loop through each distance metric and corresponding edge rank fraction
72
+ for metric, percentile in zip(distance_metric, fraction_shortest_edges):
73
73
  # Call the appropriate neighborhood function based on the metric
74
74
  if metric == "greedy_modularity":
75
75
  neighborhoods = calculate_greedy_modularity_neighborhoods(
76
- network, edge_rank_percentile=percentile
76
+ network, fraction_shortest_edges=percentile
77
77
  )
78
78
  elif metric == "label_propagation":
79
79
  neighborhoods = calculate_label_propagation_neighborhoods(
80
- network, edge_rank_percentile=percentile
80
+ network, fraction_shortest_edges=percentile
81
81
  )
82
82
  elif metric == "leiden":
83
83
  neighborhoods = calculate_leiden_neighborhoods(
84
84
  network,
85
85
  resolution=leiden_resolution,
86
- edge_rank_percentile=percentile,
86
+ fraction_shortest_edges=percentile,
87
87
  random_seed=random_seed,
88
88
  )
89
89
  elif metric == "louvain":
90
90
  neighborhoods = calculate_louvain_neighborhoods(
91
91
  network,
92
92
  resolution=louvain_resolution,
93
- edge_rank_percentile=percentile,
93
+ fraction_shortest_edges=percentile,
94
94
  random_seed=random_seed,
95
95
  )
96
96
  elif metric == "markov_clustering":
97
97
  neighborhoods = calculate_markov_clustering_neighborhoods(
98
- network, edge_rank_percentile=percentile
98
+ network, fraction_shortest_edges=percentile
99
99
  )
100
100
  elif metric == "spinglass":
101
101
  neighborhoods = calculate_spinglass_neighborhoods(
102
- network, edge_rank_percentile=percentile
102
+ network, fraction_shortest_edges=percentile
103
103
  )
104
104
  elif metric == "walktrap":
105
105
  neighborhoods = calculate_walktrap_neighborhoods(
106
- network, edge_rank_percentile=percentile
106
+ network, fraction_shortest_edges=percentile
107
107
  )
108
108
  else:
109
109
  raise ValueError(
@@ -262,14 +262,15 @@ def _impute_neighbors_with_similarity(
262
262
  # Iterate over all significant nodes
263
263
  for row_index in range(significant_binary_significance_matrix.shape[0]):
264
264
  if significant_binary_significance_matrix[row_index].sum() != 0:
265
- significance_matrix, significant_binary_significance_matrix = (
266
- _process_node_imputation(
267
- row_index,
268
- network,
269
- significance_matrix,
270
- significant_binary_significance_matrix,
271
- depth,
272
- )
265
+ (
266
+ significance_matrix,
267
+ significant_binary_significance_matrix,
268
+ ) = _process_node_imputation(
269
+ row_index,
270
+ network,
271
+ significance_matrix,
272
+ significant_binary_significance_matrix,
273
+ depth,
273
274
  )
274
275
 
275
276
  # Update rows to impute for the next iteration
@@ -476,7 +477,7 @@ def _calculate_threshold(median_distances: List, distance_threshold: float) -> f
476
477
  """
477
478
  # Sort the median distances
478
479
  sorted_distances = np.sort(median_distances)
479
- # Compute the rank percentiles for the sorted distances
480
+ # Compute the rank fractions for the sorted distances
480
481
  rank_percentiles = np.linspace(0, 1, len(sorted_distances))
481
482
  # Interpolating the ranks to 1000 evenly spaced percentiles
482
483
  interpolated_percentiles = np.linspace(0, 1, 1000)
@@ -147,6 +147,22 @@ class AnalysisSummary:
147
147
  .reset_index(drop=True)
148
148
  )
149
149
 
150
+ # Convert annotations list to a DataFrame for comparison
151
+ ordered_annotations = pd.DataFrame({"Annotation": self.annotations["ordered_annotations"]})
152
+ # Merge to ensure all annotations are present, filling missing rows with defaults
153
+ results = pd.merge(ordered_annotations, results, on="Annotation", how="left").fillna(
154
+ {
155
+ "Domain ID": -1,
156
+ "Annotation Members in Network": "",
157
+ "Annotation Members in Network Count": 0,
158
+ "Summed Significance Score": 0,
159
+ "Enrichment P-value": 1.0,
160
+ "Enrichment Q-value": 1.0,
161
+ "Depletion P-value": 1.0,
162
+ "Depletion Q-value": 1.0,
163
+ }
164
+ )
165
+
150
166
  return results
151
167
 
152
168
  @staticmethod
@@ -270,8 +270,20 @@ class Contour:
270
270
  )
271
271
 
272
272
  # Set linewidth for the contour lines to 0 for levels other than the base level
273
- for i in range(1, len(contour_levels)):
274
- c.collections[i].set_linewidth(0)
273
+ c.set_linewidth(0)
274
+ try:
275
+ # Try setting linewidth directly on the QuadContourSet
276
+ c.set_linewidth(0)
277
+ except AttributeError:
278
+ # Fallback: Iterate over collections if the direct method fails
279
+ if hasattr(c, "collections"):
280
+ for i, collection in enumerate(c.collections):
281
+ collection.set_linewidth(0)
282
+ else:
283
+ # Raise an error if 'collections' is also not available
284
+ raise AttributeError(
285
+ "'QuadContourSet' object has neither 'set_linewidth' nor 'collections'"
286
+ )
275
287
 
276
288
  def get_annotated_contour_colors(
277
289
  self,
risk/risk.py CHANGED
@@ -53,7 +53,7 @@ class RISK(NetworkIO, AnnotationsIO):
53
53
  distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
54
54
  louvain_resolution: float = 0.1,
55
55
  leiden_resolution: float = 1.0,
56
- edge_rank_percentile: Union[float, List, Tuple, np.ndarray] = 0.5,
56
+ fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 0.5,
57
57
  null_distribution: str = "network",
58
58
  random_seed: int = 888,
59
59
  ) -> Dict[str, Any]:
@@ -63,11 +63,11 @@ class RISK(NetworkIO, AnnotationsIO):
63
63
  network (nx.Graph): The network graph.
64
64
  annotations (Dict[str, Any]): The annotations associated with the network.
65
65
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
66
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
66
+ metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'leiden', 'label_propagation',
67
67
  'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
68
68
  louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
69
69
  leiden_resolution (float, optional): Resolution parameter for Leiden clustering. Defaults to 1.0.
70
- edge_rank_percentile (float, List, Tuple, or np.ndarray, optional): Shortest edge rank percentile threshold(s) for creating subgraphs.
70
+ fraction_shortest_edges (float, List, Tuple, or np.ndarray, optional): Shortest edge rank fraction threshold(s) for creating subgraphs.
71
71
  Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
72
72
  Defaults to 0.5.
73
73
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
@@ -82,7 +82,7 @@ class RISK(NetworkIO, AnnotationsIO):
82
82
  distance_metric=distance_metric,
83
83
  louvain_resolution=louvain_resolution,
84
84
  leiden_resolution=leiden_resolution,
85
- edge_rank_percentile=edge_rank_percentile,
85
+ fraction_shortest_edges=fraction_shortest_edges,
86
86
  statistical_test_function="hypergeom",
87
87
  null_distribution=null_distribution,
88
88
  random_seed=random_seed,
@@ -97,7 +97,7 @@ class RISK(NetworkIO, AnnotationsIO):
97
97
  distance_metric,
98
98
  louvain_resolution=louvain_resolution,
99
99
  leiden_resolution=leiden_resolution,
100
- edge_rank_percentile=edge_rank_percentile,
100
+ fraction_shortest_edges=fraction_shortest_edges,
101
101
  random_seed=random_seed,
102
102
  )
103
103
  # Run hypergeometric test to compute neighborhood significance
@@ -117,7 +117,7 @@ class RISK(NetworkIO, AnnotationsIO):
117
117
  distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
118
118
  louvain_resolution: float = 0.1,
119
119
  leiden_resolution: float = 1.0,
120
- edge_rank_percentile: Union[float, List, Tuple, np.ndarray] = 0.5,
120
+ fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 0.5,
121
121
  null_distribution: str = "network",
122
122
  random_seed: int = 888,
123
123
  ) -> Dict[str, Any]:
@@ -127,11 +127,11 @@ class RISK(NetworkIO, AnnotationsIO):
127
127
  network (nx.Graph): The network graph.
128
128
  annotations (Dict[str, Any]): The annotations associated with the network.
129
129
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
130
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
130
+ metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'leiden', 'label_propagation',
131
131
  'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
132
132
  louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
133
133
  leiden_resolution (float, optional): Resolution parameter for Leiden clustering. Defaults to 1.0.
134
- edge_rank_percentile (float, List, Tuple, or np.ndarray, optional): Shortest edge rank percentile threshold(s) for creating subgraphs.
134
+ fraction_shortest_edges (float, List, Tuple, or np.ndarray, optional): Shortest edge rank fraction threshold(s) for creating subgraphs.
135
135
  Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
136
136
  Defaults to 0.5.
137
137
  null_distribution (str, optional): Type of null distribution ('network' or 'annotations'). Defaults to "network".
@@ -146,7 +146,7 @@ class RISK(NetworkIO, AnnotationsIO):
146
146
  distance_metric=distance_metric,
147
147
  louvain_resolution=louvain_resolution,
148
148
  leiden_resolution=leiden_resolution,
149
- edge_rank_percentile=edge_rank_percentile,
149
+ fraction_shortest_edges=fraction_shortest_edges,
150
150
  statistical_test_function="poisson",
151
151
  null_distribution=null_distribution,
152
152
  random_seed=random_seed,
@@ -161,7 +161,7 @@ class RISK(NetworkIO, AnnotationsIO):
161
161
  distance_metric,
162
162
  louvain_resolution=louvain_resolution,
163
163
  leiden_resolution=leiden_resolution,
164
- edge_rank_percentile=edge_rank_percentile,
164
+ fraction_shortest_edges=fraction_shortest_edges,
165
165
  random_seed=random_seed,
166
166
  )
167
167
  # Run Poisson test to compute neighborhood significance
@@ -181,7 +181,7 @@ class RISK(NetworkIO, AnnotationsIO):
181
181
  distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
182
182
  louvain_resolution: float = 0.1,
183
183
  leiden_resolution: float = 1.0,
184
- edge_rank_percentile: Union[float, List, Tuple, np.ndarray] = 0.5,
184
+ fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 0.5,
185
185
  score_metric: str = "sum",
186
186
  null_distribution: str = "network",
187
187
  num_permutations: int = 1000,
@@ -194,11 +194,11 @@ class RISK(NetworkIO, AnnotationsIO):
194
194
  network (nx.Graph): The network graph.
195
195
  annotations (Dict[str, Any]): The annotations associated with the network.
196
196
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
197
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
197
+ metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'leiden', 'label_propagation',
198
198
  'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
199
199
  louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
200
200
  leiden_resolution (float, optional): Resolution parameter for Leiden clustering. Defaults to 1.0.
201
- edge_rank_percentile (float, List, Tuple, or np.ndarray, optional): Shortest edge rank percentile threshold(s) for creating subgraphs.
201
+ fraction_shortest_edges (float, List, Tuple, or np.ndarray, optional): Shortest edge rank fraction threshold(s) for creating subgraphs.
202
202
  Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
203
203
  Defaults to 0.5.
204
204
  score_metric (str, optional): Scoring metric for neighborhood significance. Defaults to "sum".
@@ -216,7 +216,7 @@ class RISK(NetworkIO, AnnotationsIO):
216
216
  distance_metric=distance_metric,
217
217
  louvain_resolution=louvain_resolution,
218
218
  leiden_resolution=leiden_resolution,
219
- edge_rank_percentile=edge_rank_percentile,
219
+ fraction_shortest_edges=fraction_shortest_edges,
220
220
  statistical_test_function="permutation",
221
221
  score_metric=score_metric,
222
222
  null_distribution=null_distribution,
@@ -234,7 +234,7 @@ class RISK(NetworkIO, AnnotationsIO):
234
234
  distance_metric,
235
235
  louvain_resolution=louvain_resolution,
236
236
  leiden_resolution=leiden_resolution,
237
- edge_rank_percentile=edge_rank_percentile,
237
+ fraction_shortest_edges=fraction_shortest_edges,
238
238
  random_seed=random_seed,
239
239
  )
240
240
 
@@ -421,7 +421,7 @@ class RISK(NetworkIO, AnnotationsIO):
421
421
  distance_metric: Union[str, List, Tuple, np.ndarray] = "louvain",
422
422
  louvain_resolution: float = 0.1,
423
423
  leiden_resolution: float = 1.0,
424
- edge_rank_percentile: Union[float, List, Tuple, np.ndarray] = 0.5,
424
+ fraction_shortest_edges: Union[float, List, Tuple, np.ndarray] = 0.5,
425
425
  random_seed: int = 888,
426
426
  ) -> np.ndarray:
427
427
  """Load significant neighborhoods for the network.
@@ -430,11 +430,11 @@ class RISK(NetworkIO, AnnotationsIO):
430
430
  network (nx.Graph): The network graph.
431
431
  annotations (pd.DataFrame): The matrix of annotations associated with the network.
432
432
  distance_metric (str, List, Tuple, or np.ndarray, optional): The distance metric(s) to use. Can be a string for one
433
- metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'label_propagation',
433
+ metric or a list/tuple/ndarray of metrics ('greedy_modularity', 'louvain', 'leiden', 'label_propagation',
434
434
  'markov_clustering', 'walktrap', 'spinglass'). Defaults to 'louvain'.
435
435
  louvain_resolution (float, optional): Resolution parameter for Louvain clustering. Defaults to 0.1.
436
436
  leiden_resolution (float, optional): Resolution parameter for Leiden clustering. Defaults to 1.0.
437
- edge_rank_percentile (float, List, Tuple, or np.ndarray, optional): Shortest edge rank percentile threshold(s) for creating subgraphs.
437
+ fraction_shortest_edges (float, List, Tuple, or np.ndarray, optional): Shortest edge rank fraction threshold(s) for creating subgraphs.
438
438
  Can be a single float for one threshold or a list/tuple of floats corresponding to multiple thresholds.
439
439
  Defaults to 0.5.
440
440
  random_seed (int, optional): Seed for random number generation. Defaults to 888.
@@ -451,14 +451,14 @@ class RISK(NetworkIO, AnnotationsIO):
451
451
  for_print_distance_metric = distance_metric
452
452
  # Log and display neighborhood settings
453
453
  logger.debug(f"Distance metric: '{for_print_distance_metric}'")
454
- logger.debug(f"Edge length threshold: {edge_rank_percentile}")
454
+ logger.debug(f"Edge length threshold: {fraction_shortest_edges}")
455
455
  logger.debug(f"Random seed: {random_seed}")
456
456
 
457
457
  # Compute neighborhoods based on the network and distance metric
458
458
  neighborhoods = get_network_neighborhoods(
459
459
  network,
460
460
  distance_metric,
461
- edge_rank_percentile,
461
+ fraction_shortest_edges,
462
462
  louvain_resolution=louvain_resolution,
463
463
  leiden_resolution=leiden_resolution,
464
464
  random_seed=random_seed,
@@ -1,10 +1,10 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: risk-network
3
- Version: 0.0.9b14
3
+ Version: 0.0.9b16
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
7
- License: GNU GENERAL PUBLIC LICENSE
7
+ License: GNU GENERAL PUBLIC LICENSE
8
8
  Version 3, 29 June 2007
9
9
 
10
10
  Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
@@ -696,7 +696,7 @@ Description-Content-Type: text/markdown
696
696
  License-File: LICENSE
697
697
  Requires-Dist: ipywidgets
698
698
  Requires-Dist: leidenalg
699
- Requires-Dist: markov-clustering
699
+ Requires-Dist: markov_clustering
700
700
  Requires-Dist: matplotlib
701
701
  Requires-Dist: networkx
702
702
  Requires-Dist: nltk==3.8.1
@@ -710,6 +710,8 @@ Requires-Dist: scipy
710
710
  Requires-Dist: statsmodels
711
711
  Requires-Dist: threadpoolctl
712
712
  Requires-Dist: tqdm
713
+ Dynamic: author
714
+ Dynamic: requires-python
713
715
 
714
716
  # RISK Network
715
717
 
@@ -726,7 +728,7 @@ Requires-Dist: tqdm
726
728
  ![Downloads](https://img.shields.io/pypi/dm/risk-network)
727
729
  ![Platforms](https://img.shields.io/badge/platform-linux%20%7C%20macos%20%7C%20windows-lightgrey)
728
730
 
729
- **RISK (RISK Infers Spatial Kinships)** is a next-generation tool designed to streamline the analysis of biological and non-biological networks. RISK enhances network analysis with its modular architecture, extensive file format support, and advanced clustering algorithms. It simplifies the creation of publication-quality figures, making it an important tool for researchers across disciplines.
731
+ **RISK** (Regional Inference of Significant Kinships) is a next-generation tool designed to streamline the analysis of biological and non-biological networks. RISK enhances network analysis with its modular architecture, extensive file format support, and advanced clustering algorithms. It simplifies the creation of publication-quality figures, making it an important tool for researchers across disciplines.
730
732
 
731
733
  ## Documentation and Tutorial
732
734
 
@@ -1,6 +1,6 @@
1
- risk/__init__.py,sha256=Iidh958zzqC-3UdCs4vJpPixUJXUPBHxeQJYNHO4IHI,113
1
+ risk/__init__.py,sha256=Y97538nwTi67cMHhP4b1tYJYhXdZHwHd4V63ET61HzA,127
2
2
  risk/constants.py,sha256=XInRaH78Slnw_sWgAsBFbUHkyA0h0jL0DKGuQNbOvjM,550
3
- risk/risk.py,sha256=MXu8T93NUgMDl3NaZDbm0j9c4KWwzx-kmp9Rd1ax0N4,23534
3
+ risk/risk.py,sha256=nodTixy31O3VK7ld-g74Fb9YEiyvKs_Vbor2lE2ADJ8,23632
4
4
  risk/annotations/__init__.py,sha256=kXgadEXaCh0z8OyhOhTj7c3qXGmWgOhaSZ4gSzSb59U,147
5
5
  risk/annotations/annotations.py,sha256=WVT9wzTm8lTpMw_3SnbyljWR77yExo0rb1zVgJza8nw,14284
6
6
  risk/annotations/io.py,sha256=tk1dAsxIwW5oLxB294ppiuZd4__Y5pj8se8KhitRSNA,10554
@@ -8,18 +8,18 @@ risk/log/__init__.py,sha256=gy7C5L6D222AYUChq5lkc0LsCJ_QMQPaFiBJKbecdac,201
8
8
  risk/log/console.py,sha256=C52s3FgQ2e9kQWcXL8m7rs_pnKXt5Yy8PBHmQkOTiNo,4537
9
9
  risk/log/parameters.py,sha256=o4StqYCa0kt7_Ht4mKa1DwwvhGUwkC_dGBaiUIc0GB0,5683
10
10
  risk/neighborhoods/__init__.py,sha256=C-SD0G-9skSLjLFdAB6v6lAjO8la2v6Fqy63h2MY28k,186
11
- risk/neighborhoods/community.py,sha256=hsWr6sNW3lCZn9L2f8oYBVmIANnJpoAL9194fg6K1eQ,15408
11
+ risk/neighborhoods/community.py,sha256=3H_PiACvuAy85PV7Jp2b2VAZYFXBVDQb91NU_SHkm4o,15437
12
12
  risk/neighborhoods/domains.py,sha256=t91xSpx9Ty9hSlhRq2_XwyPpBP7sjKhovcPPvkwWtf0,11398
13
- risk/neighborhoods/neighborhoods.py,sha256=XB2Gd0xghKKBNkwp1H-1138NegTlAiyOqAkv_vaLEZM,21150
13
+ risk/neighborhoods/neighborhoods.py,sha256=bBUY7hXqcsOoAEkPdRoRNuj36WsllXicmz_LxZfEuyw,21186
14
14
  risk/network/__init__.py,sha256=iEPeJdZfqp0toxtbElryB8jbz9_t_k4QQ3iDvKE8C_0,126
15
15
  risk/network/geometry.py,sha256=gFtYUj9j9aul4paKq_qSGJn39Nazxu_MXv8m-tYYtrk,6840
16
16
  risk/network/io.py,sha256=Dl-Xwv1gWdmvDkZ77581s_JYO5ujZjvmHG7wq_-ApZU,23239
17
17
  risk/network/graph/__init__.py,sha256=H0YEiwqZ02LBTkH4blPwUjQ-DOUnhaTTNHM0BcXii6U,81
18
18
  risk/network/graph/network.py,sha256=JzYbrgJLiNWFyPIR6_qNSjMtmXmfzRv2FwWSdyg8HjY,12205
19
- risk/network/graph/summary.py,sha256=h2bpUjfwI1NMflkKwplGQEGPswfAtunormdTIEQYbvs,8987
19
+ risk/network/graph/summary.py,sha256=7w5gNYNspv02SscDAfHxdx-D1CWKmelSonyBnB9hbrE,9737
20
20
  risk/network/plot/__init__.py,sha256=MfmaXJgAZJgXZ2wrhK8pXwzETlcMaLChhWXKAozniAo,98
21
21
  risk/network/plot/canvas.py,sha256=W8dFv4XYTzCWXBchgsc0esOQRn4usM4LkwNGPSDMobE,13357
22
- risk/network/plot/contour.py,sha256=VONX9l6owrZvWtR0mWQ6z2GSd1YXIv5wV_sf5ROQLT4,15581
22
+ risk/network/plot/contour.py,sha256=1Didhg9Z5seqdEITxWo7Av-Jg_TAuIHv_rPTMQZpkqo,16123
23
23
  risk/network/plot/labels.py,sha256=aU_ClDGVPHyQ3H5E_ygx8hsMhrpJB0i9Cn65PlLmw7s,45679
24
24
  risk/network/plot/network.py,sha256=0j7pAZgt9PBfFCnOz4QwXnYWTlnLjTrtMm-50I_1G8o,14028
25
25
  risk/network/plot/plotter.py,sha256=eS1vHqvOA2O001Rq7WiDcgqcehJ3fg4OPfvkezH4erw,5771
@@ -32,8 +32,8 @@ risk/stats/stats.py,sha256=z8NrhiVj4BzJ250bVLfytpmfC7RzYu7mBuIZD_l0aCA,7222
32
32
  risk/stats/permutation/__init__.py,sha256=neJp7FENC-zg_CGOXqv-iIvz1r5XUKI9Ruxhmq7kDOI,105
33
33
  risk/stats/permutation/permutation.py,sha256=meBNSrbRa9P8WJ54n485l0H7VQJlMSfHqdN4aCKYCtQ,10105
34
34
  risk/stats/permutation/test_functions.py,sha256=lftOude6hee0pyR80HlBD32522JkDoN5hrKQ9VEbuoY,2345
35
- risk_network-0.0.9b14.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
36
- risk_network-0.0.9b14.dist-info/METADATA,sha256=-H8Qrl5r36ZNGNJO3RHOzUOi_CzgplZf061rYGcIfJI,47552
37
- risk_network-0.0.9b14.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
38
- risk_network-0.0.9b14.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
39
- risk_network-0.0.9b14.dist-info/RECORD,,
35
+ risk_network-0.0.9b16.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
36
+ risk_network-0.0.9b16.dist-info/METADATA,sha256=pGpChS9mrqjv7i21ReWmVdOK0VyuLoB9HmhDrRupWSY,47627
37
+ risk_network-0.0.9b16.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
38
+ risk_network-0.0.9b16.dist-info/top_level.txt,sha256=NX7C2PFKTvC1JhVKv14DFlFAIFnKc6Lpsu1ZfxvQwVw,5
39
+ risk_network-0.0.9b16.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5