risk-network 0.0.6b10__tar.gz → 0.0.7b1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/PKG-INFO +1 -1
  2. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/__init__.py +1 -1
  3. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/annotations/annotations.py +19 -14
  4. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/neighborhoods/domains.py +13 -14
  5. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/neighborhoods/neighborhoods.py +133 -52
  6. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/network/plot.py +3 -1
  7. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/risk.py +2 -2
  8. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk_network.egg-info/PKG-INFO +1 -1
  9. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/LICENSE +0 -0
  10. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/MANIFEST.in +0 -0
  11. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/README.md +0 -0
  12. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/pyproject.toml +0 -0
  13. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/annotations/__init__.py +0 -0
  14. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/annotations/io.py +0 -0
  15. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/constants.py +0 -0
  16. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/log/__init__.py +0 -0
  17. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/log/console.py +0 -0
  18. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/log/params.py +0 -0
  19. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/neighborhoods/__init__.py +0 -0
  20. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/neighborhoods/community.py +0 -0
  21. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/network/__init__.py +0 -0
  22. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/network/geometry.py +0 -0
  23. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/network/graph.py +0 -0
  24. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/network/io.py +0 -0
  25. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/__init__.py +0 -0
  26. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/fisher_exact.py +0 -0
  27. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/hypergeom.py +0 -0
  28. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/permutation/__init__.py +0 -0
  29. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/permutation/permutation.py +0 -0
  30. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/permutation/test_functions.py +0 -0
  31. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk/stats/stats.py +0 -0
  32. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk_network.egg-info/SOURCES.txt +0 -0
  33. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk_network.egg-info/dependency_links.txt +0 -0
  34. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk_network.egg-info/requires.txt +0 -0
  35. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/risk_network.egg-info/top_level.txt +0 -0
  36. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/setup.cfg +0 -0
  37. {risk_network-0.0.6b10 → risk_network-0.0.7b1}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: risk-network
3
- Version: 0.0.6b10
3
+ Version: 0.0.7b1
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
@@ -7,4 +7,4 @@ RISK: RISK Infers Spatial Kinships
7
7
 
8
8
  from risk.risk import RISK
9
9
 
10
- __version__ = "0.0.6-beta.10"
10
+ __version__ = "0.0.7-beta.1"
@@ -132,27 +132,32 @@ def define_top_annotations(
132
132
  nx.connected_components(enriched_network), key=len, reverse=True
133
133
  )
134
134
  size_connected_components = np.array([len(c) for c in connected_components])
135
+
136
+ # Filter the size of connected components by min_cluster_size and max_cluster_size
137
+ filtered_size_connected_components = size_connected_components[
138
+ (size_connected_components >= min_cluster_size)
139
+ & (size_connected_components <= max_cluster_size)
140
+ ]
141
+ # Calculate the number of connected components and large connected components
135
142
  num_connected_components = len(connected_components)
136
- num_large_connected_components = np.sum(
137
- np.logical_and(
138
- size_connected_components >= min_cluster_size,
139
- size_connected_components <= max_cluster_size,
140
- )
141
- )
143
+ num_large_connected_components = len(filtered_size_connected_components)
144
+
145
+ # Assign the number of connected components
142
146
  annotations_enrichment_matrix.loc[attribute, "num connected components"] = (
143
147
  num_connected_components
144
148
  )
145
- annotations_enrichment_matrix.at[attribute, "size connected components"] = (
146
- size_connected_components
147
- )
149
+ # Filter out attributes with more than one connected component
150
+ annotations_enrichment_matrix.loc[
151
+ annotations_enrichment_matrix["num connected components"] > 1, "top attributes"
152
+ ] = False
153
+ # Assign the number of large connected components
148
154
  annotations_enrichment_matrix.loc[attribute, "num large connected components"] = (
149
155
  num_large_connected_components
150
156
  )
151
-
152
- # Filter out attributes with more than one connected component
153
- annotations_enrichment_matrix.loc[
154
- annotations_enrichment_matrix["num connected components"] > 1, "top attributes"
155
- ] = False
157
+ # Assign the size of connected components, ensuring it is always a list
158
+ annotations_enrichment_matrix.at[attribute, "size connected components"] = (
159
+ filtered_size_connected_components.tolist()
160
+ )
156
161
 
157
162
  return annotations_enrichment_matrix
158
163
 
@@ -23,7 +23,8 @@ def define_domains(
23
23
  linkage_method: str,
24
24
  linkage_metric: str,
25
25
  ) -> pd.DataFrame:
26
- """Define domains and assign nodes to these domains based on their enrichment scores and clustering.
26
+ """Define domains and assign nodes to these domains based on their enrichment scores and clustering,
27
+ handling errors by assigning unique domains when clustering fails.
27
28
 
28
29
  Args:
29
30
  top_annotations (pd.DataFrame): DataFrame of top annotations data for the network nodes.
@@ -35,31 +36,29 @@ def define_domains(
35
36
  Returns:
36
37
  pd.DataFrame: DataFrame with the primary domain for each node.
37
38
  """
38
- # Check if there's more than one column in significant_neighborhoods_enrichment
39
- if significant_neighborhoods_enrichment.shape[1] == 1:
40
- print("Single annotation detected. Skipping clustering.")
41
- top_annotations["domain"] = 1 # Assign a default domain or handle appropriately
42
- else:
43
- # Perform hierarchical clustering on the binary enrichment matrix
39
+ try:
40
+ # Transpose the matrix to cluster annotations
44
41
  m = significant_neighborhoods_enrichment[:, top_annotations["top attributes"]].T
45
42
  best_linkage, best_metric, best_threshold = _optimize_silhouette_across_linkage_and_metrics(
46
43
  m, linkage_criterion, linkage_method, linkage_metric
47
44
  )
48
- try:
49
- Z = linkage(m, method=best_linkage, metric=best_metric)
50
- except ValueError as e:
51
- raise ValueError("No significant annotations found.") from e
52
-
45
+ # Perform hierarchical clustering
46
+ Z = linkage(m, method=best_linkage, metric=best_metric)
53
47
  print(
54
48
  f"Linkage criterion: '{linkage_criterion}'\nLinkage method: '{best_linkage}'\nLinkage metric: '{best_metric}'"
55
49
  )
56
50
  print(f"Optimal linkage threshold: {round(best_threshold, 3)}")
57
-
51
+ # Calculate the optimal threshold for clustering
58
52
  max_d_optimal = np.max(Z[:, 2]) * best_threshold
59
- domains = fcluster(Z, max_d_optimal, criterion=linkage_criterion)
60
53
  # Assign domains to the annotations matrix
54
+ domains = fcluster(Z, max_d_optimal, criterion=linkage_criterion)
61
55
  top_annotations["domain"] = 0
62
56
  top_annotations.loc[top_annotations["top attributes"], "domain"] = domains
57
+ except ValueError:
58
+ # If a ValueError is encountered, handle it by assigning unique domains
59
+ n_rows = len(top_annotations)
60
+ print(f"Error encountered. Skipping clustering and assigning {n_rows} unique domains.")
61
+ top_annotations["domain"] = range(1, n_rows + 1) # Assign unique domains
63
62
 
64
63
  # Create DataFrames to store domain information
65
64
  node_to_enrichment = pd.DataFrame(
@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Tuple
9
9
  import networkx as nx
10
10
  import numpy as np
11
11
  from sklearn.exceptions import DataConversionWarning
12
+ from sklearn.metrics.pairwise import cosine_similarity
12
13
 
13
14
  from risk.neighborhoods.community import (
14
15
  calculate_dijkstra_neighborhoods,
@@ -93,7 +94,7 @@ def _create_percentile_limited_subgraph(G: nx.Graph, edge_length_percentile: flo
93
94
  def process_neighborhoods(
94
95
  network: nx.Graph,
95
96
  neighborhoods: Dict[str, Any],
96
- impute_depth: int = 1,
97
+ impute_depth: int = 0,
97
98
  prune_threshold: float = 0.0,
98
99
  ) -> Dict[str, Any]:
99
100
  """Process neighborhoods based on the imputation and pruning settings.
@@ -101,7 +102,7 @@ def process_neighborhoods(
101
102
  Args:
102
103
  network (nx.Graph): The network data structure used for imputing and pruning neighbors.
103
104
  neighborhoods (dict): Dictionary containing 'enrichment_matrix', 'binary_enrichment_matrix', and 'significant_enrichment_matrix'.
104
- impute_depth (int, optional): Depth for imputing neighbors. Defaults to 1.
105
+ impute_depth (int, optional): Depth for imputing neighbors. Defaults to 0.
105
106
  prune_threshold (float, optional): Distance threshold for pruning neighbors. Defaults to 0.0.
106
107
 
107
108
  Returns:
@@ -167,55 +168,135 @@ def _impute_neighbors(
167
168
  - np.ndarray: The imputed alpha threshold matrix.
168
169
  - np.ndarray: The significant enrichment matrix with non-significant entries set to zero.
169
170
  """
170
- # Calculate shortest distances for each node to determine the distance threshold
171
- shortest_distances = []
172
- for node in network.nodes():
173
- try:
174
- neighbors = [
175
- n for n in network.neighbors(node) if binary_enrichment_matrix[n].sum() != 0
176
- ]
177
- except IndexError as e:
178
- raise IndexError(
179
- f"Failed to find neighbors for node '{node}': Ensure that the node exists in the network and that the binary enrichment matrix is correctly indexed."
180
- ) from e
181
-
182
- # Calculate the shortest distance to a neighbor
183
- if neighbors:
184
- shortest_distance = min([_get_euclidean_distance(node, n, network) for n in neighbors])
185
- shortest_distances.append(shortest_distance)
171
+ # Calculate the distance threshold value based on the shortest distances
172
+ enrichment_matrix, binary_enrichment_matrix = _impute_neighbors_with_similarity(
173
+ network, enrichment_matrix, binary_enrichment_matrix, max_depth=max_depth
174
+ )
175
+ # Create a matrix where non-significant entries are set to zero
176
+ significant_enrichment_matrix = np.where(binary_enrichment_matrix == 1, enrichment_matrix, 0)
177
+
178
+ return enrichment_matrix, binary_enrichment_matrix, significant_enrichment_matrix
179
+
180
+
181
+ def _impute_neighbors_with_similarity(
182
+ network: nx.Graph,
183
+ enrichment_matrix: np.ndarray,
184
+ binary_enrichment_matrix: np.ndarray,
185
+ max_depth: int = 3,
186
+ ) -> Tuple[np.ndarray, np.ndarray]:
187
+ """Impute non-enriched nodes based on the closest enriched neighbors' profiles and their similarity.
186
188
 
189
+ Args:
190
+ network (nx.Graph): The network graph with nodes having IDs matching the matrix indices.
191
+ enrichment_matrix (np.ndarray): The enrichment matrix with rows to be imputed.
192
+ binary_enrichment_matrix (np.ndarray): The alpha threshold matrix to be imputed similarly.
193
+ max_depth (int): Maximum depth of nodes to traverse for imputing values.
194
+
195
+ Returns:
196
+ Tuple[np.ndarray, np.ndarray]: A tuple containing:
197
+ - The imputed enrichment matrix.
198
+ - The imputed alpha threshold matrix.
199
+ """
187
200
  depth = 1
188
201
  rows_to_impute = np.where(binary_enrichment_matrix.sum(axis=1) == 0)[0]
189
202
  while len(rows_to_impute) and depth <= max_depth:
190
203
  next_rows_to_impute = []
191
- for row_index in rows_to_impute:
192
- neighbors = nx.single_source_shortest_path_length(network, row_index, cutoff=depth)
193
- valid_neighbors = [
194
- n
195
- for n in neighbors
196
- if n != row_index
197
- and binary_enrichment_matrix[n].sum() != 0
198
- and enrichment_matrix[n].sum() != 0
199
- ]
200
- if valid_neighbors:
201
- closest_neighbor = min(
202
- valid_neighbors, key=lambda n: _get_euclidean_distance(row_index, n, network)
204
+ # Iterate over all enriched nodes
205
+ for row_index in range(binary_enrichment_matrix.shape[0]):
206
+ if binary_enrichment_matrix[row_index].sum() != 0:
207
+ enrichment_matrix, binary_enrichment_matrix = _process_node_imputation(
208
+ row_index, network, enrichment_matrix, binary_enrichment_matrix, depth
203
209
  )
204
- # Impute the row with the closest valid neighbor's data
205
- enrichment_matrix[row_index] = enrichment_matrix[closest_neighbor]
206
- binary_enrichment_matrix[row_index] = binary_enrichment_matrix[
207
- closest_neighbor
208
- ] / np.sqrt(depth + 1)
209
- else:
210
- next_rows_to_impute.append(row_index)
211
210
 
212
- rows_to_impute = next_rows_to_impute
211
+ # Update rows to impute for the next iteration
212
+ rows_to_impute = np.where(binary_enrichment_matrix.sum(axis=1) == 0)[0]
213
213
  depth += 1
214
214
 
215
- # Create a matrix where non-significant entries are set to zero
216
- significant_enrichment_matrix = np.where(binary_enrichment_matrix == 1, enrichment_matrix, 0)
215
+ return enrichment_matrix, binary_enrichment_matrix
217
216
 
218
- return enrichment_matrix, binary_enrichment_matrix, significant_enrichment_matrix
217
+
218
+ def _process_node_imputation(
219
+ row_index: int,
220
+ network: nx.Graph,
221
+ enrichment_matrix: np.ndarray,
222
+ binary_enrichment_matrix: np.ndarray,
223
+ depth: int,
224
+ ) -> Tuple[np.ndarray, np.ndarray]:
225
+ """Process the imputation for a single node based on its enriched neighbors.
226
+
227
+ Args:
228
+ row_index (int): The index of the enriched node being processed.
229
+ network (nx.Graph): The network graph with nodes having IDs matching the matrix indices.
230
+ enrichment_matrix (np.ndarray): The enrichment matrix with rows to be imputed.
231
+ binary_enrichment_matrix (np.ndarray): The alpha threshold matrix to be imputed similarly.
232
+ depth (int): Current depth for traversal.
233
+
234
+ Returns:
235
+ Tuple[np.ndarray, np.ndarray]: The modified enrichment matrix and binary threshold matrix.
236
+ """
237
+ # Check neighbors at the current depth
238
+ neighbors = nx.single_source_shortest_path_length(network, row_index, cutoff=depth)
239
+ # Filter annotated neighbors (already enriched)
240
+ annotated_neighbors = [
241
+ n
242
+ for n in neighbors
243
+ if n != row_index
244
+ and binary_enrichment_matrix[n].sum() != 0
245
+ and enrichment_matrix[n].sum() != 0
246
+ ]
247
+ # Filter non-enriched neighbors
248
+ valid_neighbors = [
249
+ n
250
+ for n in neighbors
251
+ if n != row_index
252
+ and binary_enrichment_matrix[n].sum() == 0
253
+ and enrichment_matrix[n].sum() == 0
254
+ ]
255
+ # If there are valid non-enriched neighbors
256
+ if valid_neighbors and annotated_neighbors:
257
+ # Calculate distances to annotated neighbors
258
+ distances_to_annotated = [
259
+ _get_euclidean_distance(row_index, n, network) for n in annotated_neighbors
260
+ ]
261
+ # Calculate the IQR to identify outliers
262
+ q1, q3 = np.percentile(distances_to_annotated, [25, 75])
263
+ iqr = q3 - q1
264
+ lower_bound = q1 - 1.5 * iqr
265
+ upper_bound = q3 + 1.5 * iqr
266
+ # Filter valid non-enriched neighbors that fall within the IQR bounds
267
+ valid_neighbors_within_iqr = [
268
+ n
269
+ for n in valid_neighbors
270
+ if lower_bound <= _get_euclidean_distance(row_index, n, network) <= upper_bound
271
+ ]
272
+ # If there are any valid neighbors within the IQR
273
+ if valid_neighbors_within_iqr:
274
+ # If more than one valid neighbor is within the IQR, compute pairwise cosine similarities
275
+ if len(valid_neighbors_within_iqr) > 1:
276
+ # Find the most similar neighbor based on pairwise cosine similarities
277
+ def sum_pairwise_cosine_similarities(neighbor):
278
+ return sum(
279
+ cosine_similarity(
280
+ enrichment_matrix[neighbor].reshape(1, -1),
281
+ enrichment_matrix[other_neighbor].reshape(1, -1),
282
+ )[0][0]
283
+ for other_neighbor in valid_neighbors_within_iqr
284
+ if other_neighbor != neighbor
285
+ )
286
+
287
+ most_similar_neighbor = max(
288
+ valid_neighbors_within_iqr, key=sum_pairwise_cosine_similarities
289
+ )
290
+ else:
291
+ most_similar_neighbor = valid_neighbors_within_iqr[0]
292
+
293
+ # Impute the most similar non-enriched neighbor with the enriched node's data, scaled by depth
294
+ enrichment_matrix[most_similar_neighbor] = enrichment_matrix[row_index] / np.sqrt(
295
+ depth + 1
296
+ )
297
+ binary_enrichment_matrix[most_similar_neighbor] = binary_enrichment_matrix[row_index]
298
+
299
+ return enrichment_matrix, binary_enrichment_matrix
219
300
 
220
301
 
221
302
  def _prune_neighbors(
@@ -240,27 +321,27 @@ def _prune_neighbors(
240
321
  """
241
322
  # Identify indices with non-zero rows in the binary enrichment matrix
242
323
  non_zero_indices = np.where(binary_enrichment_matrix.sum(axis=1) != 0)[0]
243
- average_distances = []
324
+ median_distances = []
244
325
  for node in non_zero_indices:
245
326
  neighbors = [n for n in network.neighbors(node) if binary_enrichment_matrix[n].sum() != 0]
246
327
  if neighbors:
247
- average_distance = np.mean(
328
+ median_distance = np.median(
248
329
  [_get_euclidean_distance(node, n, network) for n in neighbors]
249
330
  )
250
- average_distances.append(average_distance)
331
+ median_distances.append(median_distance)
251
332
 
252
333
  # Calculate the distance threshold value based on rank
253
- distance_threshold_value = _calculate_threshold(average_distances, 1 - distance_threshold)
334
+ distance_threshold_value = _calculate_threshold(median_distances, 1 - distance_threshold)
254
335
  # Prune nodes that are outliers based on the distance threshold
255
336
  for row_index in non_zero_indices:
256
337
  neighbors = [
257
338
  n for n in network.neighbors(row_index) if binary_enrichment_matrix[n].sum() != 0
258
339
  ]
259
340
  if neighbors:
260
- average_distance = np.mean(
341
+ median_distance = np.median(
261
342
  [_get_euclidean_distance(row_index, n, network) for n in neighbors]
262
343
  )
263
- if average_distance >= distance_threshold_value:
344
+ if median_distance >= distance_threshold_value:
264
345
  enrichment_matrix[row_index] = 0
265
346
  binary_enrichment_matrix[row_index] = 0
266
347
 
@@ -305,18 +386,18 @@ def _get_node_position(network: nx.Graph, node: Any) -> np.ndarray:
305
386
  )
306
387
 
307
388
 
308
- def _calculate_threshold(average_distances: List, distance_threshold: float) -> float:
309
- """Calculate the distance threshold based on the given average distances and a percentile threshold.
389
+ def _calculate_threshold(median_distances: List, distance_threshold: float) -> float:
390
+ """Calculate the distance threshold based on the given median distances and a percentile threshold.
310
391
 
311
392
  Args:
312
- average_distances (list): An array of average distances.
393
+ median_distances (list): An array of median distances.
313
394
  distance_threshold (float): A percentile threshold (0 to 1) used to determine the distance cutoff.
314
395
 
315
396
  Returns:
316
397
  float: The calculated distance threshold value.
317
398
  """
318
- # Sort the average distances
319
- sorted_distances = np.sort(average_distances)
399
+ # Sort the median distances
400
+ sorted_distances = np.sort(median_distances)
320
401
  # Compute the rank percentiles for the sorted distances
321
402
  rank_percentiles = np.linspace(0, 1, len(sorted_distances))
322
403
  # Interpolating the ranks to 1000 evenly spaced percentiles
@@ -1123,7 +1123,9 @@ def _to_rgba(
1123
1123
  """
1124
1124
  # Handle single color case (string, RGB, or RGBA)
1125
1125
  if isinstance(color, str) or (
1126
- isinstance(color, (list, tuple, np.ndarray)) and len(color) in [3, 4]
1126
+ isinstance(color, (list, tuple, np.ndarray))
1127
+ and len(color) in [3, 4]
1128
+ and not any(isinstance(c, (list, tuple, np.ndarray)) for c in color)
1127
1129
  ):
1128
1130
  rgba_color = np.array(mcolors.to_rgba(color))
1129
1131
  # Only set alpha if the input is an RGB color or a string (not RGBA)
@@ -237,7 +237,7 @@ class RISK(NetworkIO, AnnotationsIO):
237
237
  tail: str = "right", # OPTIONS: "right" (enrichment), "left" (depletion), "both"
238
238
  pval_cutoff: float = 0.01, # OPTIONS: Any value between 0 to 1
239
239
  fdr_cutoff: float = 0.9999, # OPTIONS: Any value between 0 to 1
240
- impute_depth: int = 1,
240
+ impute_depth: int = 0,
241
241
  prune_threshold: float = 0.0,
242
242
  linkage_criterion: str = "distance",
243
243
  linkage_method: str = "average",
@@ -254,7 +254,7 @@ class RISK(NetworkIO, AnnotationsIO):
254
254
  tail (str, optional): Type of significance tail ("right", "left", "both"). Defaults to "right".
255
255
  pval_cutoff (float, optional): p-value cutoff for significance. Defaults to 0.01.
256
256
  fdr_cutoff (float, optional): FDR cutoff for significance. Defaults to 0.9999.
257
- impute_depth (int, optional): Depth for imputing neighbors. Defaults to 1.
257
+ impute_depth (int, optional): Depth for imputing neighbors. Defaults to 0.
258
258
  prune_threshold (float, optional): Distance threshold for pruning neighbors. Defaults to 0.0.
259
259
  linkage_criterion (str, optional): Clustering criterion for defining domains. Defaults to "distance".
260
260
  linkage_method (str, optional): Clustering method to use. Defaults to "average".
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: risk-network
3
- Version: 0.0.6b10
3
+ Version: 0.0.7b1
4
4
  Summary: A Python package for biological network analysis
5
5
  Author: Ira Horecka
6
6
  Author-email: Ira Horecka <ira89@icloud.com>
File without changes
File without changes