risk-network 0.0.14b2__tar.gz → 0.0.14b3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. {risk_network-0.0.14b2/src/risk_network.egg-info → risk_network-0.0.14b3}/PKG-INFO +1 -1
  2. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/__init__.py +1 -1
  3. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_domains.py +77 -26
  4. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_neighborhoods.py +45 -23
  5. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_graph/_summary.py +18 -20
  6. {risk_network-0.0.14b2 → risk_network-0.0.14b3/src/risk_network.egg-info}/PKG-INFO +1 -1
  7. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_graph.py +88 -0
  8. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/LICENSE +0 -0
  9. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/README.md +0 -0
  10. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/pyproject.toml +0 -0
  11. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/setup.cfg +0 -0
  12. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_annotation/__init__.py +0 -0
  13. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_annotation/_annotation.py +0 -0
  14. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_annotation/_io.py +0 -0
  15. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_annotation/_nltk_setup.py +0 -0
  16. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_log/__init__.py +0 -0
  17. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_log/_console.py +0 -0
  18. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_log/_parameters.py +0 -0
  19. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/__init__.py +0 -0
  20. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_api.py +0 -0
  21. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_community.py +0 -0
  22. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_stats/__init__.py +0 -0
  23. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_stats/_permutation/__init__.py +0 -0
  24. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_stats/_permutation/_permutation.py +0 -0
  25. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_stats/_permutation/_test_functions.py +0 -0
  26. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_neighborhoods/_stats/_tests.py +0 -0
  27. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/__init__.py +0 -0
  28. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_graph/__init__.py +0 -0
  29. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_graph/_api.py +0 -0
  30. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_graph/_graph.py +0 -0
  31. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_graph/_stats.py +0 -0
  32. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_io.py +0 -0
  33. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/__init__.py +0 -0
  34. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_api.py +0 -0
  35. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_canvas.py +0 -0
  36. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_contour.py +0 -0
  37. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_labels.py +0 -0
  38. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_network.py +0 -0
  39. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_plotter.py +0 -0
  40. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_utils/__init__.py +0 -0
  41. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_utils/_colors.py +0 -0
  42. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_network/_plotter/_utils/_layout.py +0 -0
  43. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk/_risk.py +0 -0
  44. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk_network.egg-info/SOURCES.txt +0 -0
  45. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk_network.egg-info/dependency_links.txt +0 -0
  46. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk_network.egg-info/requires.txt +0 -0
  47. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/src/risk_network.egg-info/top_level.txt +0 -0
  48. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_annotation.py +0 -0
  49. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_io_combinations.py +0 -0
  50. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_neighborhoods.py +0 -0
  51. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_network.py +0 -0
  52. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_load_plotter.py +0 -0
  53. {risk_network-0.0.14b2 → risk_network-0.0.14b3}/tests/test_log.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: risk-network
3
- Version: 0.0.14b2
3
+ Version: 0.0.14b3
4
4
  Summary: A Python package for scalable network analysis and high-quality visualization.
5
5
  Author-email: Ira Horecka <ira89@icloud.com>
6
6
  License: GPL-3.0-or-later
@@ -8,4 +8,4 @@ RISK: Regional Inference of Significant Kinships
8
8
  from ._risk import RISK
9
9
 
10
10
  __all__ = ["RISK"]
11
- __version__ = "0.0.14-beta.2"
11
+ __version__ = "0.0.14-beta.3"
@@ -54,37 +54,48 @@ def define_domains(
54
54
  Raises:
55
55
  ValueError: If the clustering criterion is set to "off" or if an error occurs during clustering.
56
56
  """
57
- try:
58
- if linkage_criterion == "off":
59
- raise ValueError("Clustering is turned off.")
57
+ # Validate args first; let user mistakes raise immediately
58
+ clustering_off = _validate_clustering_args(
59
+ linkage_criterion, linkage_method, linkage_metric, linkage_threshold
60
+ )
60
61
 
62
+ # If clustering is turned off, assign unique domains and skip
63
+ if clustering_off:
64
+ n_rows = len(top_annotation)
65
+ logger.warning("Clustering is turned off. Skipping clustering.")
66
+ top_annotation["domain"] = range(1, n_rows + 1)
67
+ else:
61
68
  # Transpose the matrix to cluster annotations
62
69
  m = significant_neighborhoods_significance[:, top_annotation["significant_annotation"]].T
63
70
  # Safeguard the matrix by replacing NaN, Inf, and -Inf values
64
71
  m = _safeguard_matrix(m)
65
- # Optimize silhouette score across different linkage methods and distance metrics
66
- best_linkage, best_metric, best_threshold = _optimize_silhouette_across_linkage_and_metrics(
67
- m, linkage_criterion, linkage_method, linkage_metric, linkage_threshold
68
- )
69
- # Perform hierarchical clustering
70
- Z = linkage(m, method=best_linkage, metric=best_metric)
71
- logger.warning(
72
- f"Linkage criterion: '{linkage_criterion}'\nLinkage method: '{best_linkage}'\nLinkage metric: '{best_metric}'\nLinkage threshold: {round(best_threshold, 3)}"
73
- )
74
- # Calculate the optimal threshold for clustering
75
- max_d_optimal = np.max(Z[:, 2]) * best_threshold
76
- # Assign domains to the annotation matrix
77
- domains = fcluster(Z, max_d_optimal, criterion=linkage_criterion)
78
- top_annotation["domain"] = 0
79
- top_annotation.loc[top_annotation["significant_annotation"], "domain"] = domains
80
- except (ValueError, LinAlgError):
81
- # If a ValueError is encountered, handle it by assigning unique domains
82
- n_rows = len(top_annotation)
83
- if linkage_criterion == "off":
84
- logger.warning("Clustering is turned off. Skipping clustering.")
85
- else:
86
- logger.error("Error encountered. Skipping clustering.")
87
- top_annotation["domain"] = range(1, n_rows + 1) # Assign unique domains
72
+ try:
73
+ # Optimize silhouette score across different linkage methods and distance metrics
74
+ (
75
+ best_linkage,
76
+ best_metric,
77
+ best_threshold,
78
+ ) = _optimize_silhouette_across_linkage_and_metrics(
79
+ m, linkage_criterion, linkage_method, linkage_metric, linkage_threshold
80
+ )
81
+ # Perform hierarchical clustering
82
+ Z = linkage(m, method=best_linkage, metric=best_metric)
83
+ logger.warning(
84
+ f"Linkage criterion: '{linkage_criterion}'\nLinkage method: '{best_linkage}'\nLinkage metric: '{best_metric}'\nLinkage threshold: {round(best_threshold, 3)}"
85
+ )
86
+ # Calculate the optimal threshold for clustering
87
+ max_d_optimal = np.max(Z[:, 2]) * best_threshold
88
+ # Assign domains to the annotation matrix
89
+ domains = fcluster(Z, max_d_optimal, criterion=linkage_criterion)
90
+ top_annotation["domain"] = 0
91
+ top_annotation.loc[top_annotation["significant_annotation"], "domain"] = domains
92
+ except (LinAlgError, ValueError):
93
+ # Numerical errors or degenerate input are handled gracefully (not user error)
94
+ n_rows = len(top_annotation)
95
+ logger.error(
96
+ "Clustering failed due to numerical or data degeneracy. Assigning unique domains."
97
+ )
98
+ top_annotation["domain"] = range(1, n_rows + 1)
88
99
 
89
100
  # Create DataFrames to store domain information
90
101
  node_to_significance = pd.DataFrame(
@@ -184,6 +195,46 @@ def trim_domains(
184
195
  return valid_domains, valid_trimmed_domains_matrix
185
196
 
186
197
 
198
+ def _validate_clustering_args(
199
+ linkage_criterion: str,
200
+ linkage_method: str,
201
+ linkage_metric: str,
202
+ linkage_threshold: Union[float, str],
203
+ ) -> bool:
204
+ """
205
+ Validate user-provided clustering arguments.
206
+
207
+ Returns:
208
+ bool: True if clustering is turned off (criterion == 'off'); False otherwise.
209
+
210
+ Raises:
211
+ ValueError: If any argument is invalid (user error).
212
+ """
213
+ # Allow opting out of clustering without raising
214
+ if linkage_criterion == "off":
215
+ return True
216
+ # Validate linkage method (allow "auto")
217
+ if linkage_method != "auto" and linkage_method not in LINKAGE_METHODS:
218
+ raise ValueError(
219
+ f"Invalid linkage_method '{linkage_method}'. Allowed values are 'auto' or one of: {sorted(LINKAGE_METHODS)}"
220
+ )
221
+ # Validate linkage metric (allow "auto")
222
+ if linkage_metric != "auto" and linkage_metric not in LINKAGE_METRICS:
223
+ raise ValueError(
224
+ f"Invalid linkage_metric '{linkage_metric}'. Allowed values are 'auto' or one of: {sorted(LINKAGE_METRICS)}"
225
+ )
226
+ # Validate linkage threshold (allow "auto"; otherwise must be float in (0, 1])
227
+ if linkage_threshold != "auto":
228
+ try:
229
+ lt = float(linkage_threshold)
230
+ except (TypeError, ValueError):
231
+ raise ValueError("linkage_threshold must be 'auto' or a float in the interval (0, 1].")
232
+ if not (0.0 < lt <= 1.0):
233
+ raise ValueError(f"linkage_threshold must be within (0, 1]. Received: {lt}")
234
+
235
+ return False
236
+
237
+
187
238
  def _safeguard_matrix(matrix: np.ndarray) -> np.ndarray:
188
239
  """
189
240
  Safeguard the matrix by replacing NaN, Inf, and -Inf values.
@@ -394,34 +394,33 @@ def _prune_neighbors(
394
394
  # Identify indices with non-zero rows in the binary significance matrix
395
395
  non_zero_indices = np.where(significant_binary_significance_matrix.sum(axis=1) != 0)[0]
396
396
  median_distances = []
397
+ distance_lookup = {}
397
398
  for node in non_zero_indices:
398
- neighbors = [
399
- n
400
- for n in network.neighbors(node)
401
- if significant_binary_significance_matrix[n].sum() != 0
402
- ]
403
- if neighbors:
404
- median_distance = np.median(
405
- [_get_euclidean_distance(node, n, network) for n in neighbors]
406
- )
407
- median_distances.append(median_distance)
399
+ dist = _median_distance_to_significant_neighbors(
400
+ node, network, significant_binary_significance_matrix
401
+ )
402
+ if dist is not None:
403
+ median_distances.append(dist)
404
+ distance_lookup[node] = dist
405
+
406
+ if not median_distances:
407
+ logger.warning("No significant neighbors found for pruning.")
408
+ significant_significance_matrix = np.where(
409
+ significant_binary_significance_matrix == 1, significance_matrix, 0
410
+ )
411
+ return (
412
+ significance_matrix,
413
+ significant_binary_significance_matrix,
414
+ significant_significance_matrix,
415
+ )
408
416
 
409
417
  # Calculate the distance threshold value based on rank
410
418
  distance_threshold_value = _calculate_threshold(median_distances, 1 - distance_threshold)
411
419
  # Prune nodes that are outliers based on the distance threshold
412
- for row_index in non_zero_indices:
413
- neighbors = [
414
- n
415
- for n in network.neighbors(row_index)
416
- if significant_binary_significance_matrix[n].sum() != 0
417
- ]
418
- if neighbors:
419
- median_distance = np.median(
420
- [_get_euclidean_distance(row_index, n, network) for n in neighbors]
421
- )
422
- if median_distance >= distance_threshold_value:
423
- significance_matrix[row_index] = 0
424
- significant_binary_significance_matrix[row_index] = 0
420
+ for node, dist in distance_lookup.items():
421
+ if dist >= distance_threshold_value:
422
+ significance_matrix[node] = 0
423
+ significant_binary_significance_matrix[node] = 0
425
424
 
426
425
  # Create a matrix where non-significant entries are set to zero
427
426
  significant_significance_matrix = np.where(
@@ -435,6 +434,29 @@ def _prune_neighbors(
435
434
  )
436
435
 
437
436
 
437
+ def _median_distance_to_significant_neighbors(
438
+ node, network, significance_mask
439
+ ) -> Union[float, None]:
440
+ """
441
+ Calculate the median distance from a node to its significant neighbors.
442
+
443
+ Args:
444
+ node (Any): The node for which the median distance is being calculated.
445
+ network (nx.Graph): The network graph containing the nodes.
446
+ significance_mask (np.ndarray): Binary matrix indicating significant nodes.
447
+
448
+ Returns:
449
+ Union[float, None]: The median distance to significant neighbors, or None if no significant neighbors exist.
450
+ """
451
+ neighbors = [n for n in network.neighbors(node) if significance_mask[n].sum() != 0]
452
+ if not neighbors:
453
+ return None
454
+ # Calculate distances to significant neighbors
455
+ distances = [_get_euclidean_distance(node, n, network) for n in neighbors]
456
+
457
+ return np.median(distances)
458
+
459
+
438
460
  def _get_euclidean_distance(node1: Any, node2: Any, network: nx.Graph) -> float:
439
461
  """
440
462
  Calculate the Euclidean distance between two nodes in the network.
@@ -84,7 +84,7 @@ class Summary:
84
84
 
85
85
  Returns:
86
86
  pd.DataFrame: Processed DataFrame containing significance scores, p-values, q-values,
87
- and annotation member information.
87
+ and matched annotation members information.
88
88
  """
89
89
  log_header("Loading analysis summary")
90
90
  # Calculate significance and depletion q-values from p-value matrices in annotation
@@ -109,9 +109,9 @@ class Summary:
109
109
  # Add minimum p-values and q-values to DataFrame
110
110
  results[
111
111
  [
112
- "Enrichment P-Value",
112
+ "Enrichment P-value",
113
113
  "Enrichment Q-value",
114
- "Depletion P-Value",
114
+ "Depletion P-value",
115
115
  "Depletion Q-value",
116
116
  ]
117
117
  ] = results.apply(
@@ -126,13 +126,13 @@ class Summary:
126
126
  axis=1,
127
127
  result_type="expand",
128
128
  )
129
- # Add annotation members and their counts
130
- results["Annotation Members in Network"] = results["Annotation"].apply(
129
+ # Add matched annotation members and their counts
130
+ results["Matched Members"] = results["Annotation"].apply(
131
131
  lambda desc: self._get_annotation_members(desc)
132
132
  )
133
- results["Annotation Members in Network Count"] = results[
134
- "Annotation Members in Network"
135
- ].apply(lambda x: len(x.split(";")) if x else 0)
133
+ results["Matched Count"] = results["Matched Members"].apply(
134
+ lambda x: len(x.split(";")) if x else 0
135
+ )
136
136
 
137
137
  # Reorder columns and drop rows with NaN values
138
138
  results = (
@@ -140,12 +140,12 @@ class Summary:
140
140
  [
141
141
  "Domain ID",
142
142
  "Annotation",
143
- "Annotation Members in Network",
144
- "Annotation Members in Network Count",
143
+ "Matched Members",
144
+ "Matched Count",
145
145
  "Summed Significance Score",
146
- "Enrichment P-Value",
146
+ "Enrichment P-value",
147
147
  "Enrichment Q-value",
148
- "Depletion P-Value",
148
+ "Depletion P-value",
149
149
  "Depletion Q-value",
150
150
  ]
151
151
  ]
@@ -159,20 +159,18 @@ class Summary:
159
159
  results = pd.merge(ordered_annotation, results, on="Annotation", how="left").fillna(
160
160
  {
161
161
  "Domain ID": -1,
162
- "Annotation Members in Network": "",
163
- "Annotation Members in Network Count": 0,
162
+ "Matched Members": "",
163
+ "Matched Count": 0,
164
164
  "Summed Significance Score": 0.0,
165
- "Enrichment P-Value": 1.0,
165
+ "Enrichment P-value": 1.0,
166
166
  "Enrichment Q-value": 1.0,
167
- "Depletion P-Value": 1.0,
167
+ "Depletion P-value": 1.0,
168
168
  "Depletion Q-value": 1.0,
169
169
  }
170
170
  )
171
- # Convert "Domain ID" and "Annotation Members in Network Count" to integers
171
+ # Convert "Domain ID" and "Matched Count" to integers
172
172
  results["Domain ID"] = results["Domain ID"].astype(int)
173
- results["Annotation Members in Network Count"] = results[
174
- "Annotation Members in Network Count"
175
- ].astype(int)
173
+ results["Matched Count"] = results["Matched Count"].astype(int)
176
174
 
177
175
  return results
178
176
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: risk-network
3
- Version: 0.0.14b2
3
+ Version: 0.0.14b3
4
4
  Summary: A Python package for scalable network analysis and high-quality visualization.
5
5
  Author-email: Ira Horecka <ira89@icloud.com>
6
6
  License: GPL-3.0-or-later
@@ -378,6 +378,94 @@ def test_pop_domain(graph):
378
378
  ), f"{domain_id_to_remove} should be removed from node_id_to_domain_ids_and_significance_map['significances']"
379
379
 
380
380
 
381
+ @pytest.mark.parametrize(
382
+ "bad_kwargs",
383
+ [
384
+ {"linkage_method": "not_a_method"},
385
+ {"linkage_metric": "not_a_metric"},
386
+ {"linkage_threshold": "bad"},
387
+ {"linkage_threshold": 0.0}, # out of (0, 1]
388
+ {"linkage_threshold": 1.5}, # out of (0, 1]
389
+ ],
390
+ )
391
+ def test_invalid_clustering_args_raise(risk_obj, cytoscape_network, json_annotation, bad_kwargs):
392
+ """
393
+ Validate that invalid clustering options raise a ValueError (user error).
394
+
395
+ Args:
396
+ risk_obj: The RISK object instance used for loading neighborhoods and graphs.
397
+ cytoscape_network: The network object to be used for neighborhood and graph generation.
398
+ json_annotation: The JSON annotation associated with the network.
399
+ bad_kwargs: A dict containing an intentionally invalid clustering parameter.
400
+ """
401
+ neighborhoods = risk_obj.load_neighborhoods_binom(
402
+ network=cytoscape_network,
403
+ annotation=json_annotation,
404
+ distance_metric="louvain",
405
+ louvain_resolution=1.0,
406
+ fraction_shortest_edges=0.75,
407
+ null_distribution="network",
408
+ random_seed=888,
409
+ )
410
+
411
+ with pytest.raises(ValueError):
412
+ risk_obj.load_graph(
413
+ network=cytoscape_network,
414
+ annotation=json_annotation,
415
+ neighborhoods=neighborhoods,
416
+ tail="right",
417
+ pval_cutoff=0.05,
418
+ fdr_cutoff=1.0,
419
+ impute_depth=1,
420
+ prune_threshold=0.1,
421
+ linkage_criterion="distance",
422
+ linkage_method=bad_kwargs.get("linkage_method", "average"),
423
+ linkage_metric=bad_kwargs.get("linkage_metric", "yule"),
424
+ linkage_threshold=bad_kwargs.get("linkage_threshold", 0.2),
425
+ min_cluster_size=5,
426
+ max_cluster_size=1000,
427
+ )
428
+
429
+
430
+ def test_off_criterion_bypasses_invalid_options(risk_obj, cytoscape_network, json_annotation):
431
+ """
432
+ Verify that setting linkage_criterion='off' cleanly bypasses clustering validation and does not raise.
433
+
434
+ Args:
435
+ risk_obj: The RISK object instance used for loading neighborhoods and graphs.
436
+ cytoscape_network: The network object to be used for neighborhood and graph generation.
437
+ json_annotation: The JSON annotation associated with the network.
438
+ """
439
+ neighborhoods = risk_obj.load_neighborhoods_binom(
440
+ network=cytoscape_network,
441
+ annotation=json_annotation,
442
+ distance_metric="louvain",
443
+ louvain_resolution=1.0,
444
+ fraction_shortest_edges=0.75,
445
+ null_distribution="network",
446
+ random_seed=888,
447
+ )
448
+
449
+ graph = risk_obj.load_graph(
450
+ network=cytoscape_network,
451
+ annotation=json_annotation,
452
+ neighborhoods=neighborhoods,
453
+ tail="right",
454
+ pval_cutoff=0.05,
455
+ fdr_cutoff=1.0,
456
+ impute_depth=1,
457
+ prune_threshold=0.1,
458
+ linkage_criterion="off",
459
+ linkage_method="not_a_method",
460
+ linkage_metric="not_a_metric",
461
+ linkage_threshold="bad",
462
+ min_cluster_size=5,
463
+ max_cluster_size=1000,
464
+ )
465
+
466
+ _validate_graph(graph)
467
+
468
+
381
469
  def _validate_graph(graph):
382
470
  """
383
471
  Validate that the graph is not None and contains nodes and edges.
File without changes