cellects 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +155 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +31 -0
  7. cellects/core/cellects_threads.py +1451 -0
  8. cellects/core/motion_analysis.py +2010 -0
  9. cellects/core/one_image_analysis.py +1061 -0
  10. cellects/core/one_video_per_blob.py +540 -0
  11. cellects/core/program_organizer.py +1316 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +790 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +2066 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/image_segmentation.py +706 -0
  29. cellects/image_analysis/morphological_operations.py +1635 -0
  30. cellects/image_analysis/network_functions.py +1757 -0
  31. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  32. cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
  33. cellects/image_analysis/shape_descriptors.py +1016 -0
  34. cellects/utils/__init__.py +0 -0
  35. cellects/utils/decorators.py +14 -0
  36. cellects/utils/formulas.py +637 -0
  37. cellects/utils/load_display_save.py +1054 -0
  38. cellects/utils/utilitarian.py +490 -0
  39. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  40. cellects-0.1.2.dist-info/METADATA +132 -0
  41. cellects-0.1.2.dist-info/RECORD +44 -0
  42. cellects-0.1.2.dist-info/WHEEL +5 -0
  43. cellects-0.1.2.dist-info/entry_points.txt +2 -0
  44. cellects-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1757 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Network detection and skeleton analysis for biological networks (such as Physarum polycephalum's) images.
4
+
5
+ This module provides tools for analyzing network structures in grayscale images of biological networks.
6
+ It implements vessel detection using Frangi/Sato filters, thresholding methods, and quality metrics to select optimal
7
+ network representations. Additional functionality includes pseudopod detection, skeletonization, loop removal,
8
+ edge identification, and network topology analysis through vertex/edge tracking.
9
+
10
+ Classes
11
+ -------
12
+ NetworkDetection : Detects vessels in images using multi-scale filters with parameter variations.
13
+ EdgeIdentification : Identifies edges between vertices in a skeletonized network structure.
14
+
15
+ Functions
16
+ ---------
17
+ get_skeleton_and_widths: Computes medial axis skeleton and distance transforms for networks.
18
+ remove_small_loops: Eliminates small loops from skeletons while preserving topology.
19
+ get_neighbor_comparisons: Analyzes pixel connectivity patterns in skeletons.
20
+ get_vertices_and_tips_from_skeleton: Identifies junctions and endpoints in network skeletons.
21
+ merge_network_with_pseudopods: Combines detected network structures with identified pseudopods.
22
+
23
+ Notes
24
+ -----
25
+ Uses morphological operations for network refinement, including hole closing, component labeling,
26
+ and distance transform analysis. Implements both Otsu thresholding and rolling window segmentation
27
+ methods for image processing workflows.
28
+ """
29
+
30
+ from cellects.image_analysis.morphological_operations import square_33, cross_33, rhombus_55, Ellipse, CompareNeighborsWithValue, get_contours, get_all_line_coordinates, close_holes, keep_one_connected_component
31
+ from cellects.utils.utilitarian import remove_coordinates
32
+ from cellects.utils.formulas import *
33
+ from cellects.utils.load_display_save import *
34
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination, rolling_window_segmentation, binary_quality_index, find_threshold_given_mask
35
+ from numba.typed import Dict as TDict
36
+ from skimage import morphology
37
+ from skimage.filters import frangi, sato, threshold_otsu
38
+ from collections import deque
39
+ from scipy.spatial.distance import cdist
40
+ from scipy.ndimage import distance_transform_edt
41
+ import networkx as nx
42
+
43
+ # 8-connectivity neighbors
44
+ neighbors_8 = [(-1, -1), (-1, 0), (-1, 1),
45
+ (0, -1), (0, 1),
46
+ (1, -1), (1, 0), (1, 1)]
47
+ neighbors_4 = [(-1, 0), (0, -1), (0, 1), (1, 0)]
48
+
49
+
50
+
51
+ class NetworkDetection:
52
+ """
53
+ NetworkDetection
54
+
55
+ Class for detecting vessels in images using Frangi and Sato filters with various parameter sets.
56
+ It applies different thresholding methods, calculates quality metrics, and selects the best detection method.
57
+ """
58
+ def __init__(self, greyscale_image: NDArray[np.uint8], possibly_filled_pixels: NDArray[np.uint8], add_rolling_window: bool=False, origin_to_add: NDArray[np.uint8]=None, best_result: dict=None):
59
+ """
60
+ Initialize the object with given parameters.
61
+
62
+ Parameters
63
+ ----------
64
+ greyscale_image : NDArray[np.uint8]
65
+ The input greyscale image.
66
+ possibly_filled_pixels : NDArray[np.uint8]
67
+ Image containing possibly filled pixels.
68
+ add_rolling_window : bool, optional
69
+ Flag to add rolling window. Defaults to False.
70
+ origin_to_add : NDArray[np.uint8], optional
71
+ Origin to add. Defaults to None.
72
+ best_result : dict, optional
73
+ Best result dictionary. Defaults to None.
74
+ """
75
+ self.greyscale_image = greyscale_image
76
+ self.possibly_filled_pixels = possibly_filled_pixels
77
+ self.best_result = best_result
78
+ self.add_rolling_window = add_rolling_window
79
+ self.origin_to_add = origin_to_add
80
+ self.frangi_beta = 1.
81
+ self.frangi_gamma = 1.
82
+ self.black_ridges = True
83
+
84
+ def apply_frangi_variations(self) -> list:
85
+ """
86
+ Applies various Frangi filter variations with different sigma values and thresholding methods.
87
+
88
+ This method applies the Frangi vesselness filter with multiple sets of sigma values
89
+ to detect vessels at different scales. It applies both Otsu thresholding and rolling window
90
+ segmentation to the filtered results and calculates binary quality indices.
91
+
92
+ Returns
93
+ -------
94
+ results : list of dict
95
+ A list containing dictionaries with the method name, binary result, quality index,
96
+ filtered image, filter type, rolling window flag, and sigma values used.
97
+ """
98
+ results = []
99
+
100
+ # Parameter variations for Frangi filter
101
+ frangi_sigmas = {
102
+ 's_fine_vessels': [0.75],
103
+ 'fine_vessels': [0.5, 1.0], # Very fine capillaries, thin fibers
104
+ 'small_vessels': [1.0, 2.0], # Small vessels, fine structures
105
+ 'multi_scale_medium': [1.0, 2.0, 3.0], # Standard multi-scale
106
+ 'ultra_fine': [0.3, 0.5, 0.8], # Ultra-fine structures
107
+ 'comprehensive': [0.5, 1.0, 2.0, 4.0], # Multi-scale
108
+ 'retinal_vessels': [1.0, 2.0, 4.0, 8.0], # Optimized for retinal imaging
109
+ 'microscopy': [0.5, 1.0, 1.5, 2.5], # Microscopy applications
110
+ 'broad_spectrum': [0.5, 1.5, 3.0, 6.0, 10.0]
111
+ }
112
+
113
+ for i, (key, sigmas) in enumerate(frangi_sigmas.items()):
114
+ # Apply Frangi filter
115
+ frangi_result = frangi(self.greyscale_image, sigmas=sigmas, beta=self.frangi_beta, gamma=self.frangi_gamma, black_ridges=self.black_ridges)
116
+
117
+ # Apply both thresholding methods
118
+ # Method 1: Otsu thresholding
119
+ thresh_otsu = threshold_otsu(frangi_result)
120
+ binary_otsu = frangi_result > thresh_otsu
121
+ quality_otsu = binary_quality_index(self.possibly_filled_pixels * binary_otsu)
122
+
123
+ # Method 2: Rolling window thresholding
124
+
125
+ # Store results
126
+ results.append({
127
+ 'method': f'f_{sigmas}_thresh',
128
+ 'binary': binary_otsu,
129
+ 'quality': quality_otsu,
130
+ 'filtered': frangi_result,
131
+ 'filter': f'frangi',
132
+ 'rolling_window': False,
133
+ 'sigmas': sigmas
134
+ })
135
+ # Method 2: Rolling window thresholding
136
+ if self.add_rolling_window:
137
+ binary_rolling = rolling_window_segmentation(frangi_result, self.possibly_filled_pixels, patch_size=(10, 10))
138
+ quality_rolling = binary_quality_index(binary_rolling)
139
+ results.append({
140
+ 'method': f'f_{sigmas}_roll',
141
+ 'binary': binary_rolling,
142
+ 'quality': quality_rolling,
143
+ 'filtered': frangi_result,
144
+ 'filter': f'frangi',
145
+ 'rolling_window': True,
146
+ 'sigmas': sigmas
147
+ })
148
+
149
+ return results
150
+
151
+
152
+ def apply_sato_variations(self) -> list:
153
+ """
154
+ Apply various Sato filter variations to an image and store the results.
155
+
156
+ This function applies different parameter sets for the Sato vesselness
157
+ filter to an image, applies two thresholding methods (Otsu and rolling window),
158
+ and stores the results. The function supports optional rolling window
159
+ segmentation based on a configuration flag.
160
+
161
+ Returns
162
+ -------
163
+ list of dict
164
+ A list containing dictionaries with the results for each filter variation.
165
+ Each dictionary includes method name, binary image, quality index,
166
+ filtered result, filter type, rolling window flag, and sigma values.
167
+ """
168
+ results = []
169
+
170
+ # Parameter variations for Frangi filter
171
+ sato_sigmas = {
172
+ 'super_small_tubes': [0.01, 0.05, 0.1, 0.15], #
173
+ 'small_tubes': [0.1, 0.2, 0.4, 0.8], #
174
+ 's_thick_ridges': [0.25, 0.75], # Thick ridges/tubes
175
+ 'small_multi_scale': [0.1, 0.2, 0.4, 0.8, 1.6], #
176
+ 'fine_ridges': [0.8, 1.5], # Fine ridge detection
177
+ 'medium_ridges': [1.5, 3.0], # Medium ridge structures
178
+ 'multi_scale_fine': [0.8, 1.5, 2.5], # Multi-scale fine detection
179
+ 'multi_scale_standard': [1.0, 2.5, 5.0], # Standard multi-scale
180
+ 'edge_enhanced': [0.5, 1.0, 2.0], # Edge-enhanced detection
181
+ 'noise_robust': [1.5, 2.5, 4.0], # Robust to noise
182
+ 'fingerprint': [1.0, 1.5, 2.0, 3.0], # Fingerprint ridge detection
183
+ 'geological': [2.0, 5.0, 10.0, 15.0] # Geological structures
184
+ }
185
+
186
+ for i, (key, sigmas) in enumerate(sato_sigmas.items()):
187
+ # Apply sato filter
188
+ sato_result = sato(self.greyscale_image, sigmas=sigmas, black_ridges=self.black_ridges, mode='reflect')
189
+
190
+ # Apply both thresholding methods
191
+ # Method 1: Otsu thresholding
192
+ thresh_otsu = threshold_otsu(sato_result)
193
+ binary_otsu = sato_result > thresh_otsu
194
+ quality_otsu = binary_quality_index(self.possibly_filled_pixels * binary_otsu)
195
+
196
+
197
+ # Store results
198
+ results.append({
199
+ 'method': f's_{sigmas}_thresh',
200
+ 'binary': binary_otsu,
201
+ 'quality': quality_otsu,
202
+ 'filtered': sato_result,
203
+ 'filter': f'sato',
204
+ 'rolling_window': False,
205
+ 'sigmas': sigmas
206
+ })
207
+
208
+ # Method 2: Rolling window thresholding
209
+ if self.add_rolling_window:
210
+ binary_rolling = rolling_window_segmentation(sato_result, self.possibly_filled_pixels, patch_size=(10, 10))
211
+ quality_rolling = binary_quality_index(binary_rolling)
212
+
213
+ results.append({
214
+ 'method': f's_{sigmas}_roll',
215
+ 'binary': binary_rolling,
216
+ 'quality': quality_rolling,
217
+ 'filtered': sato_result,
218
+ 'filter': f'sato',
219
+ 'rolling_window': True,
220
+ 'sigmas': sigmas
221
+ })
222
+
223
+ return results
224
+
225
+ def get_best_network_detection_method(self):
226
+ """
227
+ Get the best network detection method based on quality metrics.
228
+
229
+ This function applies Frangi and Sato variations, combines their results,
230
+ calculates quality metrics for each result, and selects the best method.
231
+
232
+ Attributes
233
+ ----------
234
+ all_results : list of dicts
235
+ Combined results from Frangi and Sato variations.
236
+ quality_metrics : ndarray of float64
237
+ Quality metrics for each detection result.
238
+ best_idx : int
239
+ Index of the best detection method based on quality metrics.
240
+ best_result : dict
241
+ The best detection result from all possible methods.
242
+ incomplete_network : ndarray of bool
243
+ Binary representation of the best detection result.
244
+
245
+ Examples
246
+ ----------
247
+ >>> possibly_filled_pixels = np.zeros((9, 9), dtype=np.uint8)
248
+ >>> possibly_filled_pixels[3:6, 3:6] = 1
249
+ >>> possibly_filled_pixels[1:6, 3] = 1
250
+ >>> possibly_filled_pixels[6:-1, 5] = 1
251
+ >>> possibly_filled_pixels[4, 1:-1] = 1
252
+ >>> greyscale_image = possibly_filled_pixels.copy()
253
+ >>> greyscale_image[greyscale_image > 0] = np.random.randint(170, 255, possibly_filled_pixels.sum())
254
+ >>> greyscale_image[greyscale_image == 0] = np.random.randint(0, 120, possibly_filled_pixels.size - possibly_filled_pixels.sum())
255
+ >>> add_rolling_window=False
256
+ >>> origin_to_add = np.zeros((9, 9), dtype=np.uint8)
257
+ >>> origin_to_add[3:6, 3:6] = 1
258
+ >>> NetDet = NetworkDetection(greyscale_image, possibly_filled_pixels, add_rolling_window, origin_to_add)
259
+ >>> NetDet.get_best_network_detection_method()
260
+ >>> print(NetDet.best_result['method'])
261
+ >>> print(NetDet.best_result['binary'])
262
+ >>> print(NetDet.best_result['quality'])
263
+ >>> print(NetDet.best_result['filtered'])
264
+ >>> print(NetDet.best_result['filter'])
265
+ >>> print(NetDet.best_result['rolling_window'])
266
+ >>> print(NetDet.best_result['sigmas'])
267
+ bgr_image = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)
268
+ """
269
+ frangi_res = self.apply_frangi_variations()
270
+ sato_res = self.apply_sato_variations()
271
+ self.all_results = frangi_res + sato_res
272
+ self.quality_metrics = np.array([result['quality'] for result in self.all_results])
273
+ self.best_idx = np.argmax(self.quality_metrics)
274
+ self.best_result = self.all_results[self.best_idx]
275
+ self.incomplete_network = self.best_result['binary'] * self.possibly_filled_pixels
276
+
277
+
278
+ def detect_network(self):
279
+ """
280
+ Process and detect network features in the greyscale image.
281
+
282
+ This method applies a frangi or sato filter based on the best result and
283
+ performs segmentation using either rolling window or Otsu's thresholding.
284
+ The final network detection result is stored in `self.incomplete_network`.
285
+ """
286
+ if self.best_result['filter'] == 'frangi':
287
+ filtered_result = frangi(self.greyscale_image, sigmas=self.best_result['sigmas'], beta=self.frangi_beta, gamma=self.frangi_gamma, black_ridges=self.black_ridges)
288
+ else:
289
+ filtered_result = sato(self.greyscale_image, sigmas=self.best_result['sigmas'], black_ridges=self.black_ridges, mode='reflect')
290
+
291
+ if self.best_result['rolling_window']:
292
+ binary_image = rolling_window_segmentation(filtered_result, self.possibly_filled_pixels, patch_size=(10, 10))
293
+ else:
294
+ thresh_otsu = threshold_otsu(filtered_result)
295
+ binary_image = filtered_result > thresh_otsu
296
+ self.incomplete_network = binary_image * self.possibly_filled_pixels
297
+
298
+ def change_greyscale(self, img: NDArray[np.uint8], c_space_dict: dict):
299
+ """
300
+ Change the image to greyscale using color space combinations.
301
+
302
+ This function converts an input image to greyscale by generating
303
+ and applying a combination of color spaces specified in the dictionary.
304
+ The resulting greyscale image is stored as an attribute of the instance.
305
+
306
+ Parameters
307
+ ----------
308
+ img : ndarray of uint8
309
+ The input image to be converted to greyscale.
310
+ c_space_dict : dict
311
+ A dictionary where keys are color space names and values
312
+ are parameters for those color spaces.
313
+
314
+ """
315
+ self.greyscale_image, g2 = generate_color_space_combination(img, list(c_space_dict.keys()), c_space_dict)
316
+
317
+ def detect_pseudopods(self, lighter_background: bool, pseudopod_min_width: int=5, pseudopod_min_size: int=50):
318
+ """
319
+ Detect and extract pseudopods from the image based on given parameters.
320
+
321
+ This method performs a series of morphological operations and distance
322
+ transformations to identify pseudopods in the image. It uses binary
323
+ dilation, connected components analysis, and thresholding to isolate
324
+ pseudopod structures.
325
+
326
+ Parameters
327
+ ----------
328
+ lighter_background : bool
329
+ Flag indicating whether the background is lighter than the foreground.
330
+ pseudopod_min_width : int, optional
331
+ Minimum width of pseudopods to be detected. Default is 5.
332
+ pseudopod_min_size : int, optional
333
+ Minimum size of pseudopods to be detected. Default is 50.
334
+
335
+ Attributes (modified)
336
+ ----------------------
337
+ self.pseudopods : ndarray
338
+ Updated to reflect the detected pseudopod regions.
339
+
340
+ Examples
341
+ --------
342
+ >>> possibly_filled_pixels = np.random.randint(0, 2, dims, dtype=np.uint8)
343
+ >>> possibly_filled_pixels = keep_one_connected_component(possibly_filled_pixels)
344
+ >>> origin_to_add = np.zeros(dims, dtype=np.uint8)
345
+ >>> mid = dims[0] // 2
346
+ >>> ite = 2
347
+ >>> while not origin_to_add.any():
348
+ >>> ite += 1
349
+ >>> origin_to_add[mid - ite: mid + ite, mid - ite: mid + ite] = possibly_filled_pixels[mid - ite: mid + ite, mid - ite: mid + ite]
350
+ >>> greyscale_image = possibly_filled_pixels.copy()
351
+ >>> greyscale_image[greyscale_image > 0] = np.random.randint(200, 255, possibly_filled_pixels.sum())
352
+ >>> greyscale_image[greyscale_image == 0] = np.random.randint(0, 50, possibly_filled_pixels.size - possibly_filled_pixels.sum())
353
+ >>> add_rolling_window = False
354
+ >>> NetDet = NetworkDetection(greyscale_image, possibly_filled_pixels, add_rolling_window, origin_to_add)
355
+ >>> NetDet.get_best_network_detection_method()
356
+ >>> lighter_background = True
357
+ >>> pseudopod_min_width = 1
358
+ >>> pseudopod_min_size = 3
359
+ >>> NetDet.detect_pseudopods(lighter_background, pseudopod_min_width, pseudopod_min_size)
360
+ >>> print(NetDet.pseudopods)
361
+ """
362
+
363
+ closed_im = close_holes(self.possibly_filled_pixels)
364
+ dist_trans = distance_transform_edt(closed_im)
365
+ dist_trans = dist_trans.max() - dist_trans
366
+ # Add dilatation of bracket of distances from medial_axis to the multiplication
367
+ if lighter_background:
368
+ grey = self.greyscale_image.max() - self.greyscale_image
369
+ else:
370
+ grey = self.greyscale_image
371
+ if self.origin_to_add is not None:
372
+ dist_trans_ori = distance_transform_edt(1 - self.origin_to_add)
373
+ scored_im = dist_trans * dist_trans_ori * grey
374
+ else:
375
+ scored_im = (dist_trans**2) * grey
376
+ scored_im = bracket_to_uint8_image_contrast(scored_im)
377
+ thresh = threshold_otsu(scored_im)
378
+ thresh = find_threshold_given_mask(scored_im, self.possibly_filled_pixels, min_threshold=thresh)
379
+ high_int_in_periphery = (scored_im > thresh).astype(np.uint8) * self.possibly_filled_pixels
380
+
381
+ _, pseudopod_widths = morphology.medial_axis(high_int_in_periphery, return_distance=True, rng=0)
382
+ bin_im = pseudopod_widths >= pseudopod_min_width
383
+ dil_bin_im = cv2.dilate(bin_im.astype(np.uint8), kernel=Ellipse((7, 7)).create().astype(np.uint8), iterations=1)
384
+ bin_im = high_int_in_periphery * dil_bin_im
385
+ nb, shapes, stats, centro = cv2.connectedComponentsWithStats(bin_im)
386
+ true_pseudopods = np.nonzero(stats[:, 4] > pseudopod_min_size)[0][1:]
387
+ true_pseudopods = np.isin(shapes, true_pseudopods)
388
+
389
+ # Make sure that the tubes connecting two pseudopods belong to pseudopods if removing pseudopods cuts the network
390
+ complete_network = np.logical_or(true_pseudopods, self.incomplete_network).astype(np.uint8)
391
+ complete_network = keep_one_connected_component(complete_network)
392
+ without_pseudopods = complete_network.copy()
393
+ without_pseudopods[true_pseudopods] = 0
394
+ only_connected_network = keep_one_connected_component(without_pseudopods)
395
+ self.pseudopods = (1 - only_connected_network) * complete_network * self.possibly_filled_pixels
396
+
397
+ def merge_network_with_pseudopods(self):
398
+ """
399
+ Merge the incomplete network with pseudopods.
400
+
401
+ This method combines the incomplete network and pseudopods to form
402
+ the complete network. The incomplete network is updated by subtracting
403
+ areas where pseudopods are present.
404
+ """
405
+ self.complete_network = np.logical_or(self.incomplete_network, self.pseudopods).astype(np.uint8)
406
+ self.incomplete_network *= (1 - self.pseudopods)
407
+
408
+
409
+ def get_skeleton_and_widths(pad_network: NDArray[np.uint8], pad_origin: NDArray[np.uint8]=None, pad_origin_centroid: NDArray=None) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
410
+ """
411
+ Get skeleton and widths from a network.
412
+
413
+ This function computes the morphological skeleton of a network and calculates
414
+ the distances to the closest zero pixel for each non-zero pixel using medial_axis.
415
+ If pad_origin is provided, it adds a central contour. Finally, the function
416
+ removes small loops and keeps only one connected component.
417
+
418
+ Parameters
419
+ ----------
420
+ pad_network : ndarray of uint8
421
+ The binary pad network image.
422
+ pad_origin : ndarray of uint8, optional
423
+ An array indicating the origin for adding central contour.
424
+ pad_origin_centroid : ndarray, optional
425
+ The centroid of the pad origin. Defaults to None.
426
+
427
+ Returns
428
+ -------
429
+ out : tuple(ndarray of uint8, ndarray of uint8, ndarray of uint8)
430
+ A tuple containing:
431
+ - pad_skeleton: The skeletonized image.
432
+ - pad_distances: The distances to the closest zero pixel.
433
+ - pad_origin_contours: The contours of the central origin, or None if not
434
+ used.
435
+
436
+ Examples
437
+ --------
438
+ >>> pad_network = np.array([[0, 1], [1, 0]])
439
+ >>> skeleton, distances, contours = get_skeleton_and_widths(pad_network)
440
+ >>> print(skeleton)
441
+ """
442
+ pad_skeleton, pad_distances = morphology.medial_axis(pad_network, return_distance=True, rng=0)
443
+ pad_skeleton = pad_skeleton.astype(np.uint8)
444
+ if pad_origin is not None:
445
+ pad_skeleton, pad_distances, pad_origin_contours = _add_central_contour(pad_skeleton, pad_distances, pad_origin, pad_network, pad_origin_centroid)
446
+ else:
447
+ pad_origin_contours = None
448
+ pad_skeleton, pad_distances = remove_small_loops(pad_skeleton, pad_distances)
449
+ pad_skeleton = keep_one_connected_component(pad_skeleton)
450
+ pad_distances *= pad_skeleton
451
+ return pad_skeleton, pad_distances, pad_origin_contours
452
+
453
+
454
+ def remove_small_loops(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64]=None):
455
+ """
456
+ Remove small loops from a skeletonized image.
457
+
458
+ This function identifies and removes small loops in a skeletonized image, returning the modified skeleton.
459
+ If distance information is provided, it updates that as well.
460
+
461
+ Parameters
462
+ ----------
463
+ pad_skeleton : ndarray of uint8
464
+ The skeletonized image with potential small loops.
465
+ pad_distances : ndarray of float64, optional
466
+ The distance map corresponding to the skeleton image. Default is `None`.
467
+
468
+ Returns
469
+ -------
470
+ out : ndarray of uint8 or tuple(ndarray of uint8, ndarray of float64)
471
+ If `pad_distances` is None, returns the modified skeleton. Otherwise,
472
+ returns a tuple of the modified skeleton and updated distances.
473
+ """
474
+ cnv4, cnv8 = get_neighbor_comparisons(pad_skeleton)
475
+ # potential_tips = get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8)
476
+
477
+ cnv_diag_0 = CompareNeighborsWithValue(pad_skeleton, 0)
478
+ cnv_diag_0.is_equal(0, and_itself=True)
479
+
480
+ cnv4_false = CompareNeighborsWithValue(pad_skeleton, 4)
481
+ cnv4_false.is_equal(1, and_itself=False)
482
+
483
+ loop_centers = np.logical_and((cnv4_false.equal_neighbor_nb == 4), cnv_diag_0.equal_neighbor_nb > 2).astype(np.uint8)
484
+
485
+ surrounding = cv2.dilate(loop_centers, kernel=square_33)
486
+ surrounding -= loop_centers
487
+ surrounding = surrounding * cnv8.equal_neighbor_nb
488
+
489
+ # Every 2 can be replaced by 0 if the loop center becomes 1
490
+ filled_loops = pad_skeleton.copy()
491
+ filled_loops[surrounding == 2] = 0
492
+ filled_loops += loop_centers
493
+
494
+ new_pad_skeleton = morphology.skeletonize(filled_loops, method='lee')
495
+
496
+ # Put the new pixels in pad_distances
497
+ new_pixels = new_pad_skeleton * (1 - pad_skeleton)
498
+ pad_skeleton = new_pad_skeleton.astype(np.uint8)
499
+ if pad_distances is None:
500
+ return pad_skeleton
501
+ else:
502
+ pad_distances[np.nonzero(new_pixels)] = np.nan # 2. # Put nearest value instead?
503
+ pad_distances *= pad_skeleton
504
+ # for yi, xi in zip(npY, npX): # yi, xi = npY[0], npX[0]
505
+ # distances[yi, xi] = 2.
506
+ return pad_skeleton, pad_distances
507
+
508
+
509
+ def get_neighbor_comparisons(pad_skeleton: NDArray[np.uint8]) -> Tuple[object, object]:
510
+ """
511
+ Get neighbor comparisons for a padded skeleton.
512
+
513
+ This function creates two `CompareNeighborsWithValue` objects with different
514
+ neighborhood sizes (4 and 8) and checks if the neighbors are equal to 1. It
515
+ returns both comparison objects.
516
+
517
+ Parameters
518
+ ----------
519
+ pad_skeleton : ndarray of uint8
520
+ The input padded skeleton array.
521
+
522
+ Returns
523
+ -------
524
+ out : tuple of CompareNeighborsWithValue, CompareNeighborsWithValue
525
+ Two comparison objects for 4 and 8 neighbors.
526
+
527
+ Examples
528
+ --------
529
+ >>> cnv4, cnv8 = get_neighbor_comparisons(pad_skeleton)
530
+ """
531
+ cnv4 = CompareNeighborsWithValue(pad_skeleton, 4)
532
+ cnv4.is_equal(1, and_itself=True)
533
+ cnv8 = CompareNeighborsWithValue(pad_skeleton, 8)
534
+ cnv8.is_equal(1, and_itself=True)
535
+ return cnv4, cnv8
536
+
537
+
538
+ def get_vertices_and_tips_from_skeleton(pad_skeleton: NDArray[np.uint8]) -> Tuple[NDArray[np.uint8], NDArray[np.uint8]]:
539
+ """
540
+ Get vertices and tips from a padded skeleton.
541
+
542
+ This function identifies the vertices and tips of a skeletonized image.
543
+ Tips are endpoints of the skeleton while vertices include tips and points where three or more edges meet.
544
+
545
+ Parameters
546
+ ----------
547
+ pad_skeleton : ndarray of uint8
548
+ Input skeleton image that has been padded.
549
+
550
+ Returns
551
+ -------
552
+ out : tuple (ndarray of uint8, ndarray of uint8)
553
+ Tuple containing arrays of vertex points and tip points.
554
+ """
555
+ cnv4, cnv8 = get_neighbor_comparisons(pad_skeleton)
556
+ potential_tips = get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8)
557
+ pad_vertices, pad_tips = get_inner_vertices(pad_skeleton, potential_tips, cnv4, cnv8)
558
+ return pad_vertices, pad_tips
559
+
560
+
561
+ def get_terminations_and_their_connected_nodes(pad_skeleton: NDArray[np.uint8], cnv4: object, cnv8: object) -> NDArray[np.uint8]:
562
+ """
563
+ Get terminations in a skeleton and their connected nodes.
564
+
565
+ This function identifies termination points in a padded skeleton array
566
+ based on pixel connectivity, marking them and their connected nodes.
567
+
568
+ Parameters
569
+ ----------
570
+ pad_skeleton : ndarray of uint8
571
+ The padded skeleton array where terminations are to be identified.
572
+ cnv4 : object
573
+ Convolution object with 4-connectivity for neighbor comparison.
574
+ cnv8 : object
575
+ Convolution object with 8-connectivity for neighbor comparison.
576
+
577
+ Returns
578
+ -------
579
+ out : ndarray of uint8
580
+ Array containing marked terminations and their connected nodes.
581
+
582
+ Examples
583
+ --------
584
+ >>> result = get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8)
585
+ >>> print(result)
586
+ """
587
+ # All pixels having only one neighbor, and containing the value 1, are terminations for sure
588
+ potential_tips = np.zeros(pad_skeleton.shape, dtype=np.uint8)
589
+ potential_tips[cnv8.equal_neighbor_nb == 1] = 1
590
+ # Add more terminations using 4-connectivity
591
+ # If a pixel is 1 (in 4) and all its neighbors are neighbors (in 4), it is a termination
592
+
593
+ coord1_4 = cnv4.equal_neighbor_nb == 1
594
+ if np.any(coord1_4):
595
+ coord1_4 = np.nonzero(coord1_4)
596
+ for y1, x1 in zip(coord1_4[0], coord1_4[1]): # y1, x1 = 3,5
597
+ # If, in the neighborhood of the 1 (in 4), all (in 8) its neighbors are 4-connected together, and none of them are terminations, the 1 is a termination
598
+ is_4neigh = cnv4.equal_neighbor_nb[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] != 0
599
+ all_4_connected = pad_skeleton[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] == is_4neigh
600
+ is_not_term = 1 - potential_tips[y1, x1]
601
+ if np.all(all_4_connected * is_not_term):
602
+ is_4neigh[1, 1] = 0
603
+ is_4neigh = np.pad(is_4neigh, [(1,), (1,)], mode='constant')
604
+ cnv_4con = CompareNeighborsWithValue(is_4neigh, 4)
605
+ cnv_4con.is_equal(1, and_itself=True)
606
+ all_connected = (is_4neigh.sum() - (cnv_4con.equal_neighbor_nb > 0).sum()) == 0
607
+ # If they are connected, it can be a termination
608
+ if all_connected:
609
+ # If its closest neighbor is above 3 (in 8), this one is also a node
610
+ is_closest_above_3 = cnv8.equal_neighbor_nb[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] * cross_33 > 3
611
+ if np.any(is_closest_above_3):
612
+ Y, X = np.nonzero(is_closest_above_3)
613
+ Y += y1 - 1
614
+ X += x1 - 1
615
+ potential_tips[Y, X] = 1
616
+ potential_tips[y1, x1] = 1
617
+ return potential_tips
618
+
619
+
620
+ def get_inner_vertices(pad_skeleton: NDArray[np.uint8], potential_tips: NDArray[np.uint8], cnv4: object, cnv8: object) -> Tuple[NDArray[np.uint8], NDArray[np.uint8]]: # potential_tips=pad_tips
621
+ """
622
+ Get inner vertices from skeleton image.
623
+
624
+ This function identifies and returns the inner vertices of a skeletonized image.
625
+ It processes potential tips to determine which pixels should be considered as
626
+ vertices based on their neighbor count and connectivity.
627
+
628
+ Parameters
629
+ ----------
630
+ pad_skeleton : ndarray of uint8
631
+ The padded skeleton image.
632
+ potential_tips : ndarray of uint8, optional
633
+ Potential tip points in the skeleton. Defaults to pad_tips.
634
+ cnv4 : object
635
+ Object for handling 4-connections.
636
+ cnv8 : object
637
+ Object for handling 8-connections.
638
+
639
+ Returns
640
+ -------
641
+ out : tuple of ndarray of uint8, ndarray of uint8
642
+ A tuple containing the final vertices matrix and the updated potential tips.
643
+
644
+ Examples
645
+ --------
646
+ >>> pad_vertices, potential_tips = get_inner_vertices(pad_skeleton, potential_tips)
647
+ >>> print(pad_vertices)
648
+ """
649
+
650
+ # Initiate the vertices final matrix as a copy of the potential_tips
651
+ pad_vertices = deepcopy(potential_tips)
652
+ for neighbor_nb in [8, 7, 6, 5, 4]:
653
+ # All pixels having neighbor_nb neighbor are potential vertices
654
+ potential_vertices = np.zeros(potential_tips.shape, dtype=np.uint8)
655
+
656
+ potential_vertices[cnv8.equal_neighbor_nb == neighbor_nb] = 1
657
+ # remove the false intersections that are a neighbor of a previously detected intersection
658
+ # Dilate vertices to make sure that no neighbors of the current potential vertices are already vertices.
659
+ dilated_previous_intersections = cv2.dilate(pad_vertices, cross_33, iterations=1)
660
+ potential_vertices *= (1 - dilated_previous_intersections)
661
+ pad_vertices[np.nonzero(potential_vertices)] = 1
662
+
663
+ # Having 3 neighbors is ambiguous
664
+ with_3_neighbors = cnv8.equal_neighbor_nb == 3
665
+ if np.any(with_3_neighbors):
666
+ # We compare 8-connections with 4-connections
667
+ # We loop over all 3 connected
668
+ coord_3 = np.nonzero(with_3_neighbors)
669
+ for y3, x3 in zip(coord_3[0], coord_3[1]): # y3, x3 = 3,7
670
+ # If, in the neighborhood of the 3, there is at least a 2 (in 8) that is 0 (in 4), and not a termination: the 3 is a node
671
+ has_2_8neigh = cnv8.equal_neighbor_nb[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)] > 0 # 1
672
+ has_2_8neigh_without_focal = has_2_8neigh.copy()
673
+ has_2_8neigh_without_focal[1, 1] = 0
674
+ node_but_not_term = pad_vertices[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)] * (1 - potential_tips[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)])
675
+ all_are_node_but_not_term = np.array_equal(has_2_8neigh_without_focal, node_but_not_term)
676
+ if np.any(has_2_8neigh * (1 - all_are_node_but_not_term)):
677
+ # At least 3 of the 8neigh are not connected:
678
+ has_2_8neigh_without_focal = np.pad(has_2_8neigh_without_focal, [(1,), (1,)], mode='constant')
679
+ cnv_8con = CompareNeighborsWithValue(has_2_8neigh_without_focal, 4)
680
+ cnv_8con.is_equal(1, and_itself=True)
681
+ disconnected_nb = has_2_8neigh_without_focal.sum() - (cnv_8con.equal_neighbor_nb > 0).sum()
682
+ if disconnected_nb > 2:
683
+ pad_vertices[y3, x3] = 1
684
+ # Now there may be too many vertices:
685
+ # - Those that are 4-connected:
686
+ nb, sh, st, ce = cv2.connectedComponentsWithStats(pad_vertices, connectivity=4)
687
+ problematic_vertices = np.nonzero(st[:, 4] > 1)[0][1:]
688
+ for prob_v in problematic_vertices:
689
+ vertices_group = sh == prob_v
690
+ # If there is a tip in the group, do
691
+ if np.any(potential_tips[vertices_group]):
692
+ # Change the most connected one from tip to vertex
693
+ curr_neighbor_nb = cnv8.equal_neighbor_nb * vertices_group
694
+ wrong_tip = np.nonzero(curr_neighbor_nb == curr_neighbor_nb.max())
695
+ potential_tips[wrong_tip] = 0
696
+ else:
697
+ # otherwise do:
698
+ # Find the most 4-connected one, and check whether
699
+ # its 4 connected neighbors have 1 or more other connexions
700
+ # 1. # Find the most 4-connected one:
701
+ vertices_group_4 = cnv4.equal_neighbor_nb * vertices_group
702
+ max_con = vertices_group_4.max()
703
+ most_con = np.nonzero(vertices_group_4 == max_con)
704
+ # 2. Check its 4-connected neighbors and remove those having only 1 other 8-connexion
705
+ skel_copy = pad_skeleton.copy()
706
+ skel_copy[most_con] = 0
707
+ skel_copy[most_con[0] - 1, most_con[1]] = 0
708
+ skel_copy[most_con[0] + 1, most_con[1]] = 0
709
+ skel_copy[most_con[0], most_con[1] - 1] = 0
710
+ skel_copy[most_con[0], most_con[1] + 1] = 0
711
+ sub_cnv8 = CompareNeighborsWithValue(skel_copy, 8)
712
+ sub_cnv8.is_equal(1, and_itself=False)
713
+ # Remove those having
714
+ v_to_remove = ((vertices_group_4 > 0) * sub_cnv8.equal_neighbor_nb) == 1
715
+ pad_vertices[v_to_remove] = 0
716
+
717
+ # Other vertices to remove:
718
+ # - Those that are forming a cross with 0 at the center while the skeleton contains 1
719
+ cnv4_false = CompareNeighborsWithValue(pad_vertices, 4)
720
+ cnv4_false.is_equal(1, and_itself=False)
721
+ cross_vertices = cnv4_false.equal_neighbor_nb == 4
722
+ wrong_cross_vertices = cross_vertices * pad_skeleton
723
+ if wrong_cross_vertices.any():
724
+ pad_vertices[np.nonzero(wrong_cross_vertices)] = 1
725
+ cross_fix = cv2.dilate(wrong_cross_vertices, kernel=cross_33, iterations=1)
726
+ # Remove the 4-connected vertices that have no more than 4 8-connected neighbors
727
+ # i.e. the three on the side of the surrounded 0 and only one on edge on the other side
728
+ cross_fix = ((cnv8.equal_neighbor_nb * cross_fix) == 4) * (1 - wrong_cross_vertices)
729
+ pad_vertices *= (1 - cross_fix)
730
+ return pad_vertices, potential_tips
731
+
732
+
733
+ def get_branches_and_tips_coord(pad_vertices: NDArray[np.uint8], pad_tips: NDArray[np.uint8]) -> Tuple[NDArray, NDArray]:
734
+ """
735
+ Extracts the coordinates of branches and tips from vertices and tips binary images.
736
+
737
+ This function calculates branch coordinates by subtracting
738
+ tips from vertices. Then it finds and outputs the non-zero indices of branches and tips separatly.
739
+
740
+ Parameters
741
+ ----------
742
+ pad_vertices : ndarray
743
+ Array containing the vertices to be padded.
744
+ pad_tips : ndarray
745
+ Array containing the tips of the padding.
746
+
747
+ Returns
748
+ -------
749
+ branch_v_coord : ndarray
750
+ Coordinates of branches derived from subtracting tips from vertices.
751
+ tips_coord : ndarray
752
+ Coordinates of the tips.
753
+
754
+ Examples
755
+ --------
756
+ >>> branch_v, tip_c = get_branches_and_tips_coord(pad_vertices, pad_tips)
757
+ >>> branch_v
758
+ >>> tip_c
759
+ """
760
+ pad_branches = pad_vertices - pad_tips
761
+ branch_v_coord = np.transpose(np.array(np.nonzero(pad_branches)))
762
+ tips_coord = np.transpose(np.array(np.nonzero(pad_tips)))
763
+ return branch_v_coord, tips_coord
764
+
765
+
766
+ class EdgeIdentification:
767
+ """Initialize the class with skeleton and distance arrays.
768
+
769
+ This class is used to identify edges within a skeleton structure based on
770
+ provided skeleton and distance arrays. It performs various operations to
771
+ refine and label edges, ultimately producing a fully identified network.
772
+ """
773
+ def __init__(self, pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64]):
774
+ """
775
+ Initialize the class with skeleton and distance arrays.
776
+
777
+ Parameters
778
+ ----------
779
+ pad_skeleton : ndarray of uint8
780
+ Array representing the skeleton to pad.
781
+ pad_distances : ndarray of float64
782
+ Array representing distances corresponding to the skeleton.
783
+
784
+ Attributes
785
+ ----------
786
+ remaining_vertices : None
787
+ Remaining vertices. Initialized as `None`.
788
+ vertices : None
789
+ Vertices. Initialized as `None`.
790
+ growing_vertices : None
791
+ Growing vertices. Initialized as `None`.
792
+ im_shape : tuple of ints
793
+ Shape of the skeleton array.
794
+ """
795
+ self.pad_skeleton = pad_skeleton
796
+ self.pad_distances = pad_distances
797
+ self.remaining_vertices = None
798
+ self.vertices = None
799
+ self.growing_vertices = None
800
+ self.im_shape = pad_skeleton.shape
801
+
802
+ def run_edge_identification(self):
803
+ """
804
+ Run the edge identification process.
805
+
806
+ This method orchestrates a series of steps to identify and label edges
807
+ within the graph structure. Each step handles a specific aspect of edge
808
+ identification, ultimately leading to a clearer and more refined edge network.
809
+
810
+ Steps involved:
811
+ 1. Get vertices and tips coordinates.
812
+ 2. Identify tipped edges.
813
+ 3. Remove tipped edges smaller than branch width.
814
+ 4. Label tipped edges and their vertices.
815
+ 5. Label edges connected with vertex clusters.
816
+ 6. Label edges connecting vertex clusters.
817
+ 7. Label edges from known vertices iteratively.
818
+ 8. Label edges looping on 1 vertex.
819
+ 9. Clear areas with 1 or 2 unidentified pixels.
820
+ 10. Clear edge duplicates.
821
+ 11. Clear vertices connecting 2 edges.
822
+ """
823
+ self.get_vertices_and_tips_coord()
824
+ self.get_tipped_edges()
825
+ self.remove_tipped_edge_smaller_than_branch_width()
826
+ self.label_tipped_edges_and_their_vertices()
827
+ self.label_edges_connected_with_vertex_clusters()
828
+ self.label_edges_connecting_vertex_clusters()
829
+ self.label_edges_from_known_vertices_iteratively()
830
+ self.label_edges_looping_on_1_vertex()
831
+ self.clear_areas_of_1_or_2_unidentified_pixels()
832
+ self.clear_edge_duplicates()
833
+ self.clear_vertices_connecting_2_edges()
834
+
835
+ def get_vertices_and_tips_coord(self):
836
+ """Process skeleton data to extract non-tip vertices and tip coordinates.
837
+
838
+ This method processes the skeleton stored in `self.pad_skeleton` by first
839
+ extracting all vertices and tips. It then separates these into branch points
840
+ (non-tip vertices) and specific tip coordinates using internal processing.
841
+
842
+ Attributes
843
+ ----------
844
+ self.non_tip_vertices : array-like
845
+ Coordinates of non-tip (branch) vertices.
846
+ self.tips_coord : array-like
847
+ Coordinates of identified tips in the skeleton.
848
+ """
849
+ pad_vertices, pad_tips = get_vertices_and_tips_from_skeleton(self.pad_skeleton)
850
+ self.non_tip_vertices, self.tips_coord = get_branches_and_tips_coord(pad_vertices, pad_tips)
851
+
852
+ def get_tipped_edges(self):
853
+ """
854
+ get_tipped_edges : method to extract skeleton edges connecting branching points and tips.
855
+
856
+ Makes sure that there is only one connected component constituting the skeleton of the network and
857
+ identifies all edges that are connected to a tip.
858
+
859
+ Attributes
860
+ ----------
861
+ pad_skeleton : ndarray of bool, modified
862
+ Boolean mask representing the pruned skeleton after isolating the largest connected component.
863
+ vertices_branching_tips : ndarray of int, shape (N, 2)
864
+ Coordinates of branching points that connect to tips in the skeleton structure.
865
+ edge_lengths : ndarray of float, shape (M,)
866
+ Lengths of edges connecting non-tip vertices to identified tip locations.
867
+ edge_pix_coord : list of array of int
868
+ Pixel coordinates for each edge path between connected skeleton elements.
869
+
870
+ """
871
+ self.pad_skeleton = keep_one_connected_component(self.pad_skeleton)
872
+ self.vertices_branching_tips, self.edge_lengths, self.edge_pix_coord = _find_closest_vertices(self.pad_skeleton,
873
+ self.non_tip_vertices,
874
+ self.tips_coord[:, :2])
875
+
876
+ def remove_tipped_edge_smaller_than_branch_width(self):
877
+ """Remove very short edges from the skeleton.
878
+
879
+ This method focuses on edges connecting tips. When too short, they are considered are noise and
880
+ removed from the skeleton and distances matrices. These edges are considered too short when their length is
881
+ smaller than the width of the nearest network branch (an information included in pad_distances).
882
+ This method also updates internal data structures (skeleton, edge coordinates, vertex/tip positions)
883
+ accordingly through pixel-wise analysis and connectivity checks.
884
+
885
+ Parameters
886
+ ----------
887
+ pad_distances : ndarray of float64
888
+ 2D array containing the network width (in pixels) at each position occupied by the skeleton
889
+ """
890
+ # Identify edges that are smaller than the width of the branch it is attached to
891
+ tipped_edges_to_remove = np.zeros(self.edge_lengths.shape[0], dtype=bool)
892
+ # connecting_vertices_to_remove = np.zeros(self.vertices_branching_tips.shape[0], dtype=bool)
893
+ branches_to_remove = np.zeros(self.non_tip_vertices.shape[0], dtype=bool)
894
+ new_edge_pix_coord = []
895
+ remaining_tipped_edges_nb = 0
896
+ if self.edge_pix_coord.shape[0] == 0:
897
+ for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
898
+ Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
899
+ if np.nanmax(self.pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
900
+ tipped_edges_to_remove[i] = True
901
+ # Remove the tip
902
+ self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
903
+ # check whether the connecting vertex remains a vertex of not
904
+ pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
905
+ mode='constant')
906
+ sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
907
+ # If the vertex does not connect at least 3 edges anymore, remove its vertex label
908
+ if sub_vertices[3, 3] == 0:
909
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
910
+ branches_to_remove[vertex_to_remove] = True
911
+ # If that pixel became a tip connected to another vertex remove it from the skeleton
912
+ if sub_tips[3, 3]:
913
+ if sub_vertices[2:5, 2:5].sum() > 1:
914
+ self.pad_skeleton[Y, X] = 0
915
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
916
+ branches_to_remove[vertex_to_remove] = True
917
+ else:
918
+ remaining_tipped_edges_nb += 1
919
+ else:
920
+ for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
921
+ Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
922
+ edge_bool = self.edge_pix_coord[:, 2] == i + 1
923
+ eY, eX = self.edge_pix_coord[edge_bool, 0], self.edge_pix_coord[edge_bool, 1]
924
+ if np.nanmax(self.pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
925
+ tipped_edges_to_remove[i] = True
926
+ # Remove the edge
927
+ self.pad_skeleton[eY, eX] = 0
928
+ # Remove the tip
929
+ self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
930
+
931
+ # Remove the coordinates corresponding to that edge
932
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, edge_bool, 0)
933
+
934
+ # check whether the connecting vertex remains a vertex of not
935
+ pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
936
+ mode='constant')
937
+ sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
938
+ # If the vertex does not connect at least 3 edges anymore, remove its vertex label
939
+ if sub_vertices[3, 3] == 0:
940
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
941
+ branches_to_remove[vertex_to_remove] = True
942
+ # If that pixel became a tip connected to another vertex remove it from the skeleton
943
+ if sub_tips[3, 3]:
944
+ if sub_vertices[2:5, 2:5].sum() > 1:
945
+ self.pad_skeleton[Y, X] = 0
946
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, np.all(self.edge_pix_coord[:, :2] == [Y, X], axis=1), 0)
947
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
948
+ branches_to_remove[vertex_to_remove] = True
949
+ else:
950
+ remaining_tipped_edges_nb += 1
951
+ new_edge_pix_coord.append(np.stack((eY, eX, np.repeat(remaining_tipped_edges_nb, len(eY))), axis=1))
952
+
953
+ # Check that excedent connected components are 1 pixel size, if so:
954
+ # It means that they were neighbors to removed tips and not necessary for the skeleton
955
+ nb, sh = cv2.connectedComponents(self.pad_skeleton)
956
+ if nb > 2:
957
+ for i in range(2, nb):
958
+ excedent = sh == i
959
+ if (excedent).sum() == 1:
960
+ self.pad_skeleton[excedent] = 0
961
+ # else:
962
+ # print("More than one pixel area excedent components exists")
963
+
964
+ # Remove in distances the pixels removed in skeleton:
965
+ self.pad_distances *= self.pad_skeleton
966
+
967
+ # update edge_pix_coord
968
+ if len(new_edge_pix_coord) > 0:
969
+ self.edge_pix_coord = np.vstack(new_edge_pix_coord)
970
+
971
+ # Remove tips connected to very small edges
972
+ self.tips_coord = np.delete(self.tips_coord, tipped_edges_to_remove, 0)
973
+ # Add corresponding edge names
974
+ self.tips_coord = np.hstack((self.tips_coord, np.arange(1, len(self.tips_coord) + 1)[:, None]))
975
+
976
+ # Within all branching (non-tip) vertices, keep those that did not lose their vertex status because of the edge removal
977
+ self.non_tip_vertices = np.delete(self.non_tip_vertices, branches_to_remove, 0)
978
+
979
+ # Get the branching vertices who kept their typped edge
980
+ self.vertices_branching_tips = np.delete(self.vertices_branching_tips, tipped_edges_to_remove, 0)
981
+
982
+ # Within all branching (non-tip) vertices, keep those that do not connect a tipped edge.
983
+ v_branching_tips_in_branching_v = find_common_coord(self.non_tip_vertices, self.vertices_branching_tips[:, :2])
984
+ self.remaining_vertices = np.delete(self.non_tip_vertices, v_branching_tips_in_branching_v, 0)
985
+ ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.remaining_vertices))
986
+
987
+ # tips = self.tips_coord
988
+ # branching_any_edge = self.non_tip_vertices
989
+ # branching_typped_edges = self.vertices_branching_tips
990
+ # branching_no_typped_edges = self.remaining_vertices
991
+
992
+ self.get_vertices_and_tips_coord()
993
+ self.get_tipped_edges()
994
+
995
+ def label_tipped_edges_and_their_vertices(self):
996
+ """Label edges connecting tip vertices to branching vertices and assign unique labels to all relevant vertices.
997
+
998
+ Processes vertex coordinates by stacking tips, vertices branching from tips, and remaining non-tip vertices.
999
+ Assigns unique sequential identifiers to these vertices in a new array. Constructs an array of edge-label information,
1000
+ where each row contains the edge label (starting at 1), corresponding tip label, and connected vertex label.
1001
+
1002
+ Attributes
1003
+ ----------
1004
+ tip_number : int
1005
+ The number of tip coordinates available in `tips_coord`.
1006
+
1007
+ ordered_v_coord : ndarray of float
1008
+ Stack of unique vertex coordinates ordered by: tips first, vertices branching tips second, non-tip vertices third.
1009
+
1010
+ numbered_vertices : ndarray of uint32
1011
+ 2D array where each coordinate position is labeled with a sequential integer (starting at 1) based on the order in `ordered_v_coord`.
1012
+
1013
+ edges_labels : ndarray of uint32
1014
+ Array of shape (n_edges, 3). Each row contains:
1015
+ - Edge label (sequential from 1 to n_edges)
1016
+ - Label of the tip vertex for that edge.
1017
+ - Label of the vertex branching the tip.
1018
+
1019
+ vertices_branching_tips : ndarray of float
1020
+ Unique coordinates of vertices directly connected to tips after removing duplicates.
1021
+ """
1022
+ self.tip_number = self.tips_coord.shape[0]
1023
+
1024
+ # Stack vertex coordinates in that order: 1. Tips, 2. Vertices branching tips, 3. All remaining vertices
1025
+ ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.non_tip_vertices))
1026
+ ordered_v_coord = np.unique(ordered_v_coord, axis=0)
1027
+
1028
+ # Create arrays to store edges and vertices labels
1029
+ self.numbered_vertices = np.zeros(self.im_shape, dtype=np.uint32)
1030
+ self.numbered_vertices[ordered_v_coord[:, 0], ordered_v_coord[:, 1]] = np.arange(1, ordered_v_coord.shape[0] + 1)
1031
+ self.vertices = None
1032
+
1033
+ # Name edges from 1 to the number of edges connecting tips and set the vertices labels from all tips to their connected vertices:
1034
+ self.edges_labels = np.zeros((self.tip_number, 3), dtype=np.uint32)
1035
+ # edge label:
1036
+ self.edges_labels[:, 0] = np.arange(self.tip_number) + 1
1037
+ # tip label:
1038
+ self.edges_labels[:, 1] = self.numbered_vertices[self.tips_coord[:, 0], self.tips_coord[:, 1]]
1039
+ # vertex branching tip label:
1040
+ self.edges_labels[:, 2] = self.numbered_vertices[self.vertices_branching_tips[:, 0], self.vertices_branching_tips[:, 1]]
1041
+
1042
+ # Remove duplicates in vertices_branching_tips
1043
+ self.vertices_branching_tips = np.unique(self.vertices_branching_tips[:, :2], axis=0)
1044
+
1045
+ def label_edges_connected_with_vertex_clusters(self):
1046
+ """
1047
+ Identify edges connected to touching vertices by processing vertex clusters.
1048
+
1049
+ This function processes the skeleton to identify edges connecting vertices
1050
+ that are part of touching clusters. It creates a cropped version of the skeleton
1051
+ by removing already detected edges and their tips, then iterates through vertex
1052
+ clusters to explore and identify nearby edges.
1053
+ """
1054
+ # I.1. Identify edges connected to touching vertices:
1055
+ # First, create another version of these arrays, where we remove every already detected edge and their tips
1056
+ cropped_skeleton = self.pad_skeleton.copy()
1057
+ cropped_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 0
1058
+ cropped_skeleton[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
1059
+
1060
+ # non_tip_vertices does not need to be updated yet, because it only contains verified branching vertices
1061
+ cropped_non_tip_vertices = self.non_tip_vertices.copy()
1062
+
1063
+ self.new_level_vertices = None
1064
+ # The problem with vertex_to_vertex_connexion is that since they are not separated by zeros,
1065
+ # they always atract each other instead of exploring other paths.
1066
+ # To fix this, we loop over each vertex group to
1067
+ # 1. Add one edge per inter-vertex connexion inside the group
1068
+ # 2. Remove all except one, and loop as many time as necessary.
1069
+ # Inside that second loop, we explore and identify every edge nearby.
1070
+ # Find every vertex_to_vertex_connexion
1071
+ v_cluster_nb, self.v_cluster_lab, self.v_cluster_stats, vgc = cv2.connectedComponentsWithStats(
1072
+ (self.numbered_vertices > 0).astype(np.uint8), connectivity=8)
1073
+ max_v_nb = np.max(self.v_cluster_stats[1:, 4])
1074
+ cropped_skeleton_list = []
1075
+ starting_vertices_list = []
1076
+ for v_nb in range(2, max_v_nb + 1):
1077
+ labels = np.nonzero(self.v_cluster_stats[:, 4] == v_nb)[0]
1078
+ coord_list = []
1079
+ for lab in labels: # lab=labels[0]
1080
+ coord_list.append(np.nonzero(self.v_cluster_lab == lab))
1081
+ for iter in range(v_nb):
1082
+ for lab_ in range(labels.shape[0]): # lab=labels[0]
1083
+ cs = cropped_skeleton.copy()
1084
+ sv = []
1085
+ v_c = coord_list[lab_]
1086
+ # Save the current coordinate in the starting vertices array of this iteration
1087
+ sv.append([v_c[0][iter], v_c[1][iter]])
1088
+ # Remove one vertex coordinate to keep it from cs
1089
+ v_y, v_x = np.delete(v_c[0], iter), np.delete(v_c[1], iter)
1090
+ cs[v_y, v_x] = 0
1091
+ cropped_skeleton_list.append(cs)
1092
+ starting_vertices_list.append(np.array(sv))
1093
+ for cropped_skeleton, starting_vertices in zip(cropped_skeleton_list, starting_vertices_list):
1094
+ _, _ = self._identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices)
1095
+
1096
+ def label_edges_connecting_vertex_clusters(self):
1097
+ """
1098
+ Label edges connecting vertex clusters.
1099
+
1100
+ This method identifies the connections between connected vertices within
1101
+ vertex clusters and labels these edges. It uses the previously found connected
1102
+ vertices, creates an image of the connections, and then identifies
1103
+ and labels the edges between these touching vertices.
1104
+ """
1105
+ # I.2. Identify the connexions between connected vertices:
1106
+ all_connected_vertices = np.nonzero(self.v_cluster_stats[:, 4] > 1)[0][1:]
1107
+ all_con_v_im = np.zeros_like(self.pad_skeleton)
1108
+ for v_group in all_connected_vertices:
1109
+ all_con_v_im[self.v_cluster_lab == v_group] = 1
1110
+ cropped_skeleton = all_con_v_im
1111
+ self.vertex_clusters_coord = np.transpose(np.array(np.nonzero(cropped_skeleton)))
1112
+ _, _ = self._identify_edges_connecting_a_vertex_list(cropped_skeleton, self.vertex_clusters_coord, self.vertex_clusters_coord)
1113
+ # self.edges_labels
1114
+ del self.v_cluster_stats
1115
+ del self.v_cluster_lab
1116
+
1117
+ def label_edges_from_known_vertices_iteratively(self):
1118
+ """
1119
+ Label edges iteratively from known vertices.
1120
+
1121
+ This method labels edges in an iterative process starting from
1122
+ known vertices. It handles the removal of detected edges and
1123
+ updates the skeleton accordingly, to avoid detecting edges twice.
1124
+ """
1125
+ # II/ Identify all remaining edges
1126
+ if self.new_level_vertices is not None:
1127
+ starting_vertices_coord = np.vstack((self.new_level_vertices[:, :2], self.vertices_branching_tips))
1128
+ starting_vertices_coord = np.unique(starting_vertices_coord, axis=0)
1129
+ else:
1130
+ # We start from the vertices connecting tips
1131
+ starting_vertices_coord = self.vertices_branching_tips.copy()
1132
+ # Remove the detected edges from cropped_skeleton:
1133
+ cropped_skeleton = self.pad_skeleton.copy()
1134
+ cropped_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 0
1135
+ cropped_skeleton[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
1136
+ cropped_skeleton[self.vertex_clusters_coord[:, 0], self.vertex_clusters_coord[:, 1]] = 0
1137
+
1138
+ # Reinitialize cropped_non_tip_vertices to browse all vertices except tips and groups
1139
+ cropped_non_tip_vertices = self.non_tip_vertices.copy()
1140
+ cropped_non_tip_vertices = remove_coordinates(cropped_non_tip_vertices, self.vertex_clusters_coord)
1141
+ del self.vertex_clusters_coord
1142
+ remaining_v = cropped_non_tip_vertices.shape[0] + 1
1143
+ while remaining_v > cropped_non_tip_vertices.shape[0]:
1144
+ remaining_v = cropped_non_tip_vertices.shape[0]
1145
+ cropped_skeleton, cropped_non_tip_vertices = self._identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord)
1146
+ if self.new_level_vertices is not None:
1147
+ starting_vertices_coord = np.unique(self.new_level_vertices[:, :2], axis=0)
1148
+
1149
+ def label_edges_looping_on_1_vertex(self):
1150
+ """
1151
+ Identify and handle edges that form loops around a single vertex.
1152
+ This method processes the skeleton image to find looping edges and updates
1153
+ the edge data structure accordingly.
1154
+ """
1155
+ self.identified = np.zeros_like(self.numbered_vertices)
1156
+ self.identified[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 1
1157
+ self.identified[self.non_tip_vertices[:, 0], self.non_tip_vertices[:, 1]] = 1
1158
+ self.identified[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 1
1159
+ unidentified = (1 - self.identified) * self.pad_skeleton
1160
+
1161
+ # Find out the remaining non-identified pixels
1162
+ nb, self.unidentified_shapes, self.unidentified_stats, ce = cv2.connectedComponentsWithStats(unidentified.astype(np.uint8))
1163
+ # Handle the cases were edges are loops over only one vertex
1164
+ looping_edges = np.nonzero(self.unidentified_stats[:, 4 ] > 2)[0][1:]
1165
+ for loop_i in looping_edges: # loop_i = looping_edges[0]
1166
+ edge_i = (self.unidentified_shapes == loop_i).astype(np.uint8)
1167
+ dil_edge_i = cv2.dilate(edge_i, square_33)
1168
+ unique_vertices_im = self.numbered_vertices.copy()
1169
+ unique_vertices_im[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
1170
+ unique_vertices_im = dil_edge_i * unique_vertices_im
1171
+ unique_vertices = np.unique(unique_vertices_im)
1172
+ unique_vertices = unique_vertices[unique_vertices > 0]
1173
+ if len(unique_vertices) == 1:
1174
+ start, end = unique_vertices[0], unique_vertices[0]
1175
+ new_edge_lengths = edge_i.sum()
1176
+ new_edge_pix_coord = np.transpose(np.vstack((np.nonzero(edge_i))))
1177
+ new_edge_pix_coord = np.hstack((new_edge_pix_coord, np.repeat(1, new_edge_pix_coord.shape[0])[:, None])) # np.arange(1, new_edge_pix_coord.shape[0] + 1)[:, None]))
1178
+ self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1179
+ else:
1180
+ logging.error(f"Other long edges cannot be identified: i={loop_i} of len={edge_i.sum()}")
1181
+ self.identified[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 1
1182
+
1183
+ def clear_areas_of_1_or_2_unidentified_pixels(self):
1184
+ """Removes 1 or 2 pixel size non-identified areas from the skeleton.
1185
+
1186
+ This function checks whether small non-identified areas (1 or 2 pixels)
1187
+ can be removed without breaking the skeleton structure. It performs
1188
+ a series of operations to ensure only safe removals are made, logging
1189
+ errors if the final skeleton is not fully connected or if some unidentified pixels remain.
1190
+ """
1191
+ # Check whether the 1 or 2 pixel size non-identified areas can be removed without breaking the skel
1192
+ one_pix = np.nonzero(self.unidentified_stats[:, 4 ] <= 2)[0] # == 1)[0]
1193
+ cutting_removal = []
1194
+ for pix_i in one_pix: #pix_i=one_pix[0]
1195
+ skel_copy = self.pad_skeleton.copy()
1196
+ y1, y2, x1, x2 = self.unidentified_stats[pix_i, 1], self.unidentified_stats[pix_i, 1] + self.unidentified_stats[pix_i, 3], self.unidentified_stats[pix_i, 0], self.unidentified_stats[pix_i, 0] + self.unidentified_stats[pix_i, 2]
1197
+ skel_copy[y1:y2, x1:x2][self.unidentified_shapes[y1:y2, x1:x2] == pix_i] = 0
1198
+ nb1, sh1 = cv2.connectedComponents(skel_copy.astype(np.uint8), connectivity=8)
1199
+ if nb1 > 2:
1200
+ cutting_removal.append(pix_i)
1201
+ else:
1202
+ self.pad_skeleton[y1:y2, x1:x2][self.unidentified_shapes[y1:y2, x1:x2] == pix_i] = 0
1203
+ if len(cutting_removal) > 0:
1204
+ logging.error(f"These pixels break the skeleton when removed: {cutting_removal}")
1205
+ if (self.identified > 0).sum() != self.pad_skeleton.sum():
1206
+ logging.error(f"Proportion of identified pixels in the skeleton: {(self.identified > 0).sum() / self.pad_skeleton.sum()}")
1207
+ self.pad_distances *= self.pad_skeleton
1208
+ del self.identified
1209
+ del self.unidentified_stats
1210
+ del self.unidentified_shapes
1211
+
1212
+
1213
+ def _identify_edges_connecting_a_vertex_list(self, cropped_skeleton: NDArray[np.uint8], cropped_non_tip_vertices: NDArray, starting_vertices_coord: NDArray) -> Tuple[NDArray[np.uint8], NDArray]:
1214
+ """Identify edges connecting a list of vertices within a cropped skeleton.
1215
+
1216
+ This function iteratively connects the closest vertices from starting_vertices_coord to their nearest neighbors,
1217
+ updating the skeleton and removing already connected vertices until no new connections can be made or
1218
+ a maximum number of connections is reached.
1219
+
1220
+ Parameters
1221
+ ----------
1222
+ cropped_skeleton : ndarray of uint8
1223
+ A binary skeleton image where skeletal pixels are marked as 1.
1224
+ cropped_non_tip_vertices : ndarray of int
1225
+ Coordinates of non-tip vertices in the cropped skeleton.
1226
+ starting_vertices_coord : ndarray of int
1227
+ Coordinates of vertices from which to find connections.
1228
+
1229
+ Returns
1230
+ -------
1231
+ cropped_skeleton : ndarray of uint8
1232
+ Updated skeleton with edges marked as 0.
1233
+ cropped_non_tip_vertices : ndarray of int
1234
+ Updated list of non-tip vertices after removing those that have been connected.
1235
+ """
1236
+ explored_connexions_per_vertex = 0 # the maximal edge number that can connect a vertex
1237
+ new_connexions = True
1238
+ while new_connexions and explored_connexions_per_vertex < 5 and np.any(cropped_non_tip_vertices) and np.any(starting_vertices_coord):
1239
+ # print(new_connexions)
1240
+ explored_connexions_per_vertex += 1
1241
+ # 1. Find the ith closest vertex to each focal vertex
1242
+ ending_vertices_coord, new_edge_lengths, new_edge_pix_coord = _find_closest_vertices(
1243
+ cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord)
1244
+ if np.isnan(new_edge_lengths).sum() + (new_edge_lengths == 0).sum() == new_edge_lengths.shape[0]:
1245
+ new_connexions = False
1246
+ else:
1247
+ # In new_edge_lengths, zeros are duplicates and nan are lone vertices (from starting_vertices_coord)
1248
+ # Find out which starting_vertices_coord should be kept and which one should be used to save edges
1249
+ no_new_connexion = np.isnan(new_edge_lengths)
1250
+ no_found_connexion = np.logical_or(no_new_connexion, new_edge_lengths == 0)
1251
+ found_connexion = np.logical_not(no_found_connexion)
1252
+
1253
+ # Any vertex_to_vertex_connexions must be analyzed only once. We remove them with the non-connectable vertices
1254
+ vertex_to_vertex_connexions = new_edge_lengths == 1
1255
+
1256
+ # Save edge data
1257
+ start = self.numbered_vertices[
1258
+ starting_vertices_coord[found_connexion, 0], starting_vertices_coord[found_connexion, 1]]
1259
+ end = self.numbered_vertices[
1260
+ ending_vertices_coord[found_connexion, 0], ending_vertices_coord[found_connexion, 1]]
1261
+ new_edge_lengths = new_edge_lengths[found_connexion]
1262
+ self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1263
+
1264
+ no_new_connexion = np.logical_or(no_new_connexion, vertex_to_vertex_connexions)
1265
+ vertices_to_crop = starting_vertices_coord[no_new_connexion, :]
1266
+
1267
+ # Remove non-connectable and connected_vertices from:
1268
+ cropped_non_tip_vertices = remove_coordinates(cropped_non_tip_vertices, vertices_to_crop)
1269
+ starting_vertices_coord = remove_coordinates(starting_vertices_coord, vertices_to_crop)
1270
+
1271
+ if new_edge_pix_coord.shape[0] > 0:
1272
+ # Update cropped_skeleton to not identify each edge more than once
1273
+ cropped_skeleton[new_edge_pix_coord[:, 0], new_edge_pix_coord[:, 1]] = 0
1274
+ # And the starting vertices that cannot connect anymore
1275
+ cropped_skeleton[vertices_to_crop[:, 0], vertices_to_crop[:, 1]] = 0
1276
+
1277
+ if self.new_level_vertices is None:
1278
+ self.new_level_vertices = ending_vertices_coord[found_connexion, :].copy()
1279
+ else:
1280
+ self.new_level_vertices = np.vstack((self.new_level_vertices, ending_vertices_coord[found_connexion, :]))
1281
+ return cropped_skeleton, cropped_non_tip_vertices
1282
+
1283
+ def _update_edge_data(self, start, end, new_edge_lengths: NDArray, new_edge_pix_coord: NDArray):
1284
+ """
1285
+ Update edge data by expanding existing arrays with new edges.
1286
+
1287
+ This method updates the internal edge data structures (lengths,
1288
+ labels, and pixel coordinates) by appending new edges.
1289
+
1290
+ Parameters
1291
+ ----------
1292
+ start : int or ndarray of int
1293
+ The starting vertex label(s) for the new edges.
1294
+ end : int or ndarray of int
1295
+ The ending vertex label(s) for the new edges.
1296
+ new_edge_lengths : ndarray of float
1297
+ The lengths of the new edges to be added.
1298
+ new_edge_pix_coord : ndarray of float
1299
+ The pixel coordinates of the new edges.
1300
+
1301
+ Attributes
1302
+ ----------
1303
+ edge_lengths : ndarray of float
1304
+ The lengths of all edges.
1305
+ edges_labels : ndarray of int
1306
+ The labels for each edge (start and end vertices).
1307
+ edge_pix_coord : ndarray of float
1308
+ The pixel coordinates for all edges.
1309
+ """
1310
+ if isinstance(start, np.ndarray):
1311
+ end_idx = len(start)
1312
+ self.edge_lengths = np.concatenate((self.edge_lengths, new_edge_lengths))
1313
+ else:
1314
+ end_idx = 1
1315
+ self.edge_lengths = np.append(self.edge_lengths, new_edge_lengths)
1316
+ start_idx = self.edges_labels.shape[0]
1317
+ new_edges = np.zeros((end_idx, 3), dtype=np.uint32)
1318
+ new_edges[:, 0] = np.arange(start_idx, start_idx + end_idx) + 1 # edge label
1319
+ new_edges[:, 1] = start # starting vertex label
1320
+ new_edges[:, 2] = end # ending vertex label
1321
+ self.edges_labels = np.vstack((self.edges_labels, new_edges))
1322
+ # Add the new edge coord
1323
+ if new_edge_pix_coord.shape[0] > 0:
1324
+ # Add the new edge pixel coord
1325
+ new_edge_pix_coord[:, 2] += start_idx
1326
+ self.edge_pix_coord = np.vstack((self.edge_pix_coord, new_edge_pix_coord))
1327
+
1328
+ def clear_edge_duplicates(self):
1329
+ """
1330
+ Remove duplicate edges by checking vertices and coordinates.
1331
+
1332
+ This method identifies and removes duplicate edges based on their vertex labels
1333
+ and pixel coordinates. It scans through the edge attributes, compares them,
1334
+ and removes duplicates if they are found.
1335
+ """
1336
+ edges_to_remove = []
1337
+ duplicates = find_duplicates_coord(np.vstack((self.edges_labels[:, 1:], self.edges_labels[:, :0:-1])))
1338
+ duplicates = np.logical_or(duplicates[:len(duplicates)//2], duplicates[len(duplicates)//2:])
1339
+ for v in self.edges_labels[duplicates, 1:]: #v = self.edges_labels[duplicates, 1:][4]
1340
+ edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
1341
+ edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
1342
+ edge_labs = np.unique(np.concatenate((edge_lab1, edge_lab2)))
1343
+ for edge_i in range(0, len(edge_labs) - 1): # edge_i = 0
1344
+ edge_i_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_labs[edge_i], :2]
1345
+ for edge_j in range(edge_i + 1, len(edge_labs)): # edge_j = 1
1346
+ edge_j_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_labs[edge_j], :2]
1347
+ if np.array_equal(edge_i_coord, edge_j_coord):
1348
+ edges_to_remove.append(edge_labs[edge_j])
1349
+
1350
+ for edge in edges_to_remove:
1351
+ edge_bool = self.edges_labels[:, 0] != edge
1352
+ self.edges_labels = self.edges_labels[edge_bool, :]
1353
+ self.edge_lengths = self.edge_lengths[edge_bool]
1354
+ self.edge_pix_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] != edge, :]
1355
+
1356
+
1357
+ def clear_vertices_connecting_2_edges(self):
1358
+ """
1359
+ Remove vertices connecting exactly two edges and update edge-related attributes.
1360
+
1361
+ This method identifies vertices that are connected to exactly 2 edges,
1362
+ renames edges, updates edge lengths and vertex coordinates accordingly.
1363
+ It also removes the corresponding vertices from non-tip vertices list.
1364
+ """
1365
+ v_labels, v_counts = np.unique(self.edges_labels[:, 1:], return_counts=True)
1366
+ vertices2 = v_labels[v_counts == 2]
1367
+ for vertex2 in vertices2: # vertex2 = vertices2[0]
1368
+ edge_indices = np.nonzero(self.edges_labels[:, 1:] == vertex2)[0]
1369
+ edge_names = [self.edges_labels[edge_indices[0], 0], self.edges_labels[edge_indices[1], 0]]
1370
+ v_names = np.concatenate((self.edges_labels[edge_indices[0], 1:], self.edges_labels[edge_indices[1], 1:]))
1371
+ v_names = v_names[v_names != vertex2]
1372
+ kept_edge = int(self.edge_lengths[edge_indices[1]] >= self.edge_lengths[edge_indices[0]])
1373
+
1374
+ # Rename the removed edge by the kept edge name in pix_coord:
1375
+ self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_names[1 - kept_edge], 2] = edge_names[kept_edge]
1376
+ # Add the removed edge length to the kept edge length
1377
+ self.edge_lengths[self.edges_labels[:, 0] == edge_names[kept_edge]] += self.edge_lengths[self.edges_labels[:, 0] == edge_names[1 - kept_edge]]
1378
+ # Remove the corresponding edge length from the list
1379
+ self.edge_lengths = self.edge_lengths[self.edges_labels[:, 0] != edge_names[1 - kept_edge]]
1380
+ # Rename the vertex of the kept edge in edges_labels
1381
+ self.edges_labels[self.edges_labels[:, 0] == edge_names[kept_edge], 1:] = v_names[1 - kept_edge], v_names[kept_edge]
1382
+ # Remove the removed edge from the edges_labels array
1383
+ self.edges_labels = self.edges_labels[self.edges_labels[:, 0] != edge_names[1 - kept_edge], :]
1384
+
1385
+ vY, vX = np.nonzero(self.numbered_vertices == vertex2)
1386
+ v_idx = np.nonzero(np.all(self.non_tip_vertices == [vY[0], vX[0]], axis=1))
1387
+ self.non_tip_vertices = np.delete(self.non_tip_vertices, v_idx, axis=0)
1388
+
1389
+ def _remove_padding(self):
1390
+ """
1391
+ Removes padding from various coordinate arrays.
1392
+
1393
+ This method adjusts the coordinates of edge pixels, tips,
1394
+ and non-tip vertices by subtracting 1 from their x and y values.
1395
+ It also removes padding from the skeleton, distances, and vertices
1396
+ using the `remove_padding` function.
1397
+ """
1398
+ self.edge_pix_coord[:, :2] -= 1
1399
+ self.tips_coord[:, :2] -= 1
1400
+ self.non_tip_vertices[:, :2] -= 1
1401
+ self.skeleton, self.distances, self.vertices = remove_padding(
1402
+ [self.pad_skeleton, self.pad_distances, self.numbered_vertices])
1403
+
1404
+
1405
+ def make_vertex_table(self, origin_contours: NDArray[np.uint8]=None, growing_areas: NDArray[bool]=None):
1406
+ """
1407
+ Generate a vertex table for the vertices.
1408
+
1409
+ This method constructs and returns a 2D NumPy array holding information
1410
+ about all vertices. Each row corresponds to one vertex identified either
1411
+ by its coordinates in `self.tips_coord` or `self.non_tip_vertices`. The
1412
+ array includes additional information about each vertex, including whether
1413
+ they are food vertices, growing areas, and connected components.
1414
+
1415
+ Parameters
1416
+ ----------
1417
+ origin_contours : ndarray of uint8, optional
1418
+ Binary map to identify food vertices. Default is `None`.
1419
+ growing_areas : ndarray of bool, optional
1420
+ Binary map to identify growing regions. Default is `None`.
1421
+
1422
+ Notes
1423
+ -----
1424
+ The method updates the instance attribute `self.vertex_table` with
1425
+ the generated vertex information.
1426
+ """
1427
+ if self.vertices is None:
1428
+ self._remove_padding()
1429
+ self.vertex_table = np.zeros((self.tips_coord.shape[0] + self.non_tip_vertices.shape[0], 6), dtype=self.vertices.dtype)
1430
+ self.vertex_table[:self.tips_coord.shape[0], :2] = self.tips_coord
1431
+ self.vertex_table[self.tips_coord.shape[0]:, :2] = self.non_tip_vertices
1432
+ self.vertex_table[:self.tips_coord.shape[0], 2] = self.vertices[self.tips_coord[:, 0], self.tips_coord[:, 1]]
1433
+ self.vertex_table[self.tips_coord.shape[0]:, 2] = self.vertices[self.non_tip_vertices[:, 0], self.non_tip_vertices[:, 1]]
1434
+ self.vertex_table[:self.tips_coord.shape[0], 3] = 1
1435
+ if origin_contours is not None:
1436
+ food_vertices = self.vertices[origin_contours > 0]
1437
+ food_vertices = food_vertices[food_vertices > 0]
1438
+ self.vertex_table[np.isin(self.vertex_table[:, 2], food_vertices), 4] = 1
1439
+
1440
+ if growing_areas is not None:
1441
+ growing = self.vertex_table[:, 2] == np.unique(self.vertices * growing_areas)[1:]
1442
+ self.vertex_table[growing, 4] = 2
1443
+
1444
+ nb, sh, stats, cent = cv2.connectedComponentsWithStats((self.vertices > 0).astype(np.uint8))
1445
+ for i, v_i in enumerate(np.nonzero(stats[:, 4] > 1)[0][1:]):
1446
+ v_labs = self.vertices[sh == v_i]
1447
+ for v_lab in v_labs: # v_lab = v_labs[0]
1448
+ self.vertex_table[self.vertex_table[:, 2] == v_lab, 5] = 1
1449
+
1450
+
1451
+ def make_edge_table(self, greyscale: NDArray[np.uint8]):
1452
+ """
1453
+ Generate edge table with length and average intensity information.
1454
+
1455
+ This method processes the vertex coordinates, calculates lengths
1456
+ between vertices for each edge, and computes average width and intensity
1457
+ along the edges. Additionally, it computes edge betweenness centrality
1458
+ for each vertex pair.
1459
+
1460
+ Parameters
1461
+ ----------
1462
+ greyscale : ndarray of uint8
1463
+ Grayscale image.
1464
+ """
1465
+ self.edge_table = np.zeros((self.edges_labels.shape[0], 7), float) # edge_id, vertex1, vertex2, length, average_width, int, BC
1466
+ self.edge_table[:, :3] = self.edges_labels[:, :]
1467
+ self.edge_table[:, 3] = self.edge_lengths
1468
+ for idx, edge_lab in enumerate(self.edges_labels[:, 0]):
1469
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :]
1470
+ v_id = self.edges_labels[self.edges_labels[:, 0] == edge_lab, 1:][0]
1471
+ v1_width, v2_width = self.distances[self.vertices == v_id[0]], self.distances[self.vertices == v_id[1]]
1472
+ pix_widths = np.concatenate((v1_width, v2_width))
1473
+ v1_int, v2_int = greyscale[self.vertices == v_id[0]], greyscale[self.vertices == v_id[1]]
1474
+ pix_ints = np.concatenate((v1_int, v2_int))
1475
+ if len(edge_coord) > 0:
1476
+ pix_widths = np.append(pix_widths, self.distances[edge_coord[:, 0], edge_coord[:, 1]])
1477
+ pix_ints = np.append(pix_widths, greyscale[edge_coord[:, 0], edge_coord[:, 1]])
1478
+ self.edge_table[idx, 4] = pix_widths.mean()
1479
+ self.edge_table[idx, 5] = pix_ints.mean()
1480
+
1481
+ G = nx.from_edgelist(self.edges_labels[:, 1:])
1482
+ e_BC = nx.edge_betweenness_centrality(G, seed=0)
1483
+ self.BC_net = np.zeros_like(self.distances)
1484
+ for v, k in e_BC.items(): # v=(81, 80)
1485
+ v1_coord = self.vertex_table[self.vertex_table[:, 2] == v[0], :2]
1486
+ v2_coord = self.vertex_table[self.vertex_table[:, 2] == v[1], :2]
1487
+ full_coord = np.concatenate((v1_coord, v2_coord))
1488
+ edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
1489
+ edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
1490
+ edge_lab = np.unique(np.concatenate((edge_lab1, edge_lab2)))
1491
+ if len(edge_lab) == 1:
1492
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :2]
1493
+ full_coord = np.concatenate((full_coord, edge_coord))
1494
+ self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1495
+ self.edge_table[self.edge_table[:, 0] == edge_lab, 6] = k
1496
+ elif len(edge_lab) > 1:
1497
+ edge_coord0 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[0], :2]
1498
+ for edge_i in range(len(edge_lab)): # edge_i=1
1499
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[edge_i], :2]
1500
+ self.edge_table[self.edge_table[:, 0] == edge_lab[edge_i], 6] = k
1501
+ full_coord = np.concatenate((full_coord, edge_coord))
1502
+ self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1503
+ if edge_i > 0 and np.array_equal(edge_coord0, edge_coord):
1504
+ print(f"There still is two identical edges: {edge_lab} of len: {len(edge_coord)} linking v={v}")
1505
+ break
1506
+
1507
+
1508
+ def _find_closest_vertices(skeleton: NDArray[np.uint8], all_vertices_coord: NDArray, starting_vertices_coord: NDArray) -> Tuple[NDArray, NDArray[np.float64], NDArray[np.uint32]]:
1509
+ """
1510
+ Find the closest vertices in a skeleton graph.
1511
+
1512
+ This function performs a breadth-first search (BFS) from each starting vertex to find the nearest branching
1513
+ vertex in a skeleton graph. It returns the coordinates of the ending vertices, edge lengths,
1514
+ and the coordinates of all pixels along each edge.
1515
+
1516
+ Parameters
1517
+ ----------
1518
+ skeleton : ndarray of uint8
1519
+ The skeleton graph represented as a binary image.
1520
+ all_vertices_coord : ndarray
1521
+ Coordinates of all branching vertices in the skeleton.
1522
+ starting_vertices_coord : ndarray
1523
+ Coordinates of the starting vertices from which to search.
1524
+
1525
+ Returns
1526
+ -------
1527
+ ending_vertices_coord : ndarray of uint32
1528
+ Coordinates of the ending vertices found for each starting vertex.
1529
+ edge_lengths : ndarray of float64
1530
+ Lengths of the edges from each starting vertex to its corresponding ending vertex.
1531
+ edges_coords : ndarray of uint32
1532
+ Coordinates of all pixels along each edge.
1533
+
1534
+ Examples
1535
+ --------
1536
+ >>> skeleton=cropped_skeleton; all_vertices_coord=cropped_non_tip_vertices
1537
+ >>> skeleton = np.array([[0, 0, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 0, 0]])
1538
+ >>> all_vertices_coord = np.array([[1, 1], [3, 1]])
1539
+ >>> starting_vertices_coord = np.array([[1, 1]])
1540
+ >>> ending_vertices_coord, edge_lengths, edges_coords = _find_closest_vertices(skeleton, all_vertices_coord, starting_vertices_coord)
1541
+ >>> print(ending_vertices_coord)
1542
+ [[3 1 1]]
1543
+ >>> print(edge_lengths)
1544
+ [2.]
1545
+ >>> print(edges_coords)
1546
+ [[2 1 1]]
1547
+ """
1548
+
1549
+ # Convert branching vertices to set for quick lookup
1550
+ branch_set = set(zip(all_vertices_coord[:, 0], all_vertices_coord[:, 1]))
1551
+ n = starting_vertices_coord.shape[0]
1552
+
1553
+ ending_vertices_coord = np.zeros((n, 3), np.int32) # next_vertex_y, next_vertex_x, edge_id
1554
+ edge_lengths = np.zeros(n, np.float64)
1555
+ all_path_pixels = [] # Will hold rows of (y, x, edge_id)
1556
+ i = 0
1557
+ edge_i = 0
1558
+ for tip_y, tip_x in zip(starting_vertices_coord[:, 0], starting_vertices_coord[:, 1]):
1559
+ visited = np.zeros_like(skeleton, dtype=bool)
1560
+ parent = {}
1561
+ q = deque()
1562
+
1563
+ q.append((tip_y, tip_x))
1564
+ visited[tip_y, tip_x] = True
1565
+ parent[(tip_y, tip_x)] = None
1566
+ found_vertex = None
1567
+
1568
+ while q:
1569
+ r, c = q.popleft()
1570
+
1571
+ # # Check for branching vertex (ignore the starting tip itself)
1572
+ if (r, c) in branch_set and (r, c) != (tip_y, tip_x):
1573
+ # if (r, c) in branch_set and (r, c) not in v_set:
1574
+ found_vertex = (r, c)
1575
+ break # stop at first encountered (shortest due to BFS)
1576
+
1577
+ for dr, dc in neighbors_8:
1578
+ nr, nc = r + dr, c + dc
1579
+ if (0 <= nr < skeleton.shape[0] and 0 <= nc < skeleton.shape[1] and
1580
+ not visited[nr, nc] and skeleton[nr, nc] > 0): # This does not work: and (nr, nc) not in v_set
1581
+ visited[nr, nc] = True
1582
+ parent[(nr, nc)] = (r, c)
1583
+ q.append((nr, nc))
1584
+ if found_vertex:
1585
+ fy, fx = found_vertex
1586
+ # Do not add the connection if has already been detected from the other way:
1587
+ from_start = np.all(starting_vertices_coord[:i, :] == [fy, fx], axis=1).any()
1588
+ to_end = np.all(ending_vertices_coord[:i, :2] == [tip_y, tip_x], axis=1).any()
1589
+ if not from_start or not to_end:
1590
+ edge_i += 1
1591
+ ending_vertices_coord[i, :] = [fy, fx, i + 1]
1592
+ # Reconstruct path from found_vertex back to tip
1593
+ path = []
1594
+ current = (fy, fx)
1595
+ while current is not None:
1596
+ path.append((i, *current))
1597
+ current = parent[current]
1598
+
1599
+ # path.reverse() # So path goes from starting tip to found vertex
1600
+
1601
+ for _, y, x in path[1:-1]: # Exclude no vertices from the edge pixels path
1602
+ all_path_pixels.append((y, x, edge_i))
1603
+
1604
+ edge_lengths[i] = len(path) - 1 # exclude one node for length computation
1605
+
1606
+ else:
1607
+ edge_lengths[i] = np.nan
1608
+ i += 1
1609
+
1610
+ edges_coords = np.array(all_path_pixels, dtype=np.uint32)
1611
+ return ending_vertices_coord, edge_lengths, edges_coords
1612
+
1613
+
1614
+ def add_padding(array_list: list) -> list:
1615
+ """
1616
+ Add padding to each 2D array in a list.
1617
+
1618
+ Parameters
1619
+ ----------
1620
+ array_list : list of ndarrays
1621
+ List of 2D NumPy arrays to be processed.
1622
+
1623
+ Returns
1624
+ -------
1625
+ out : list of ndarrays
1626
+ List of 2D NumPy arrays with the padding removed.
1627
+
1628
+ Examples
1629
+ --------
1630
+ >>> array_list = [np.array([[1, 2], [3, 4]])]
1631
+ >>> padded_list = add_padding(array_list)
1632
+ >>> print(padded_list[0])
1633
+ [[0 0 0]
1634
+ [0 1 2 0]
1635
+ [0 3 4 0]
1636
+ [0 0 0]]
1637
+ """
1638
+ new_array_list = []
1639
+ for arr in array_list:
1640
+ new_array_list.append(np.pad(arr, [(1, ), (1, )], mode='constant'))
1641
+ return new_array_list
1642
+
1643
+
1644
+ def remove_padding(array_list: list) -> list:
1645
+ """
1646
+ Remove padding from a list of 2D arrays.
1647
+
1648
+ Parameters
1649
+ ----------
1650
+ array_list : list of ndarrays
1651
+ List of 2D NumPy arrays to be processed.
1652
+
1653
+ Returns
1654
+ -------
1655
+ out : list of ndarrays
1656
+ List of 2D NumPy arrays with the padding removed.
1657
+
1658
+ Examples
1659
+ --------
1660
+ >>> arr1 = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
1661
+ >>> arr2 = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
1662
+ >>> remove_padding([arr1, arr2])
1663
+ [array([[1]]), array([[0]])]
1664
+ """
1665
+ new_array_list = []
1666
+ for arr in array_list:
1667
+ new_array_list.append(arr[1:-1, 1:-1])
1668
+ return new_array_list
1669
+
1670
+
1671
+ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64], pad_origin: NDArray[np.uint8], pad_network: NDArray[np.uint8], pad_origin_centroid: NDArray) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
1672
+ """
1673
+ Add a central contour to the skeleton while preserving distances.
1674
+
1675
+ This function modifies the input skeleton and distance arrays by adding a
1676
+ central contour around an initial shape, updating the skeleton to include this new contour, and
1677
+ preserving the distance information.
1678
+
1679
+ Parameters
1680
+ ----------
1681
+ pad_skeleton : ndarray of uint8
1682
+ The initial skeleton.
1683
+ pad_distances : ndarray of float64
1684
+ The distance array corresponding to the input skeleton.
1685
+ pad_origin : ndarray of uint8
1686
+ Array representing origin points in the image.
1687
+ pad_network : ndarray of uint8
1688
+ Network structure array used to find contours in the skeleton.
1689
+ pad_origin_centroid : ndarray
1690
+ Centroids of origin points.
1691
+
1692
+ Returns
1693
+ -------
1694
+ out : tuple(ndarray of uint8, ndarray of float64, ndarray of uint8)
1695
+ Tuple containing the new skeleton, updated distance array,
1696
+ and origin contours.
1697
+ """
1698
+ pad_net_contour = get_contours(pad_network)
1699
+
1700
+ # Make a hole at the skeleton center and find the vertices connecting it
1701
+ holed_skeleton = pad_skeleton * (1 - pad_origin)
1702
+ pad_vertices, pad_tips = get_vertices_and_tips_from_skeleton(pad_skeleton)
1703
+ dil_origin = cv2.dilate(pad_origin, rhombus_55, iterations=20)
1704
+ pad_vertices *= dil_origin
1705
+ connecting_pixels = np.transpose(np.array(np.nonzero(pad_vertices)))
1706
+
1707
+ skeleton_without_vertices = pad_skeleton.copy()
1708
+ skeleton_without_vertices[pad_vertices > 0] = 0
1709
+
1710
+ # Previously was connected to the center of the shape.
1711
+ line_coordinates = get_all_line_coordinates(pad_origin_centroid, connecting_pixels)
1712
+ with_central_contour = holed_skeleton.copy()
1713
+ for vertex, new_edge in zip(connecting_pixels, line_coordinates): # nei = 65; new_edge=line_coordinates[nei]
1714
+ new_edge_im = np.zeros_like(pad_origin)
1715
+ new_edge_im[new_edge[:, 0], new_edge[:, 1]] = 1
1716
+ if not np.any(new_edge_im * pad_net_contour) and not np.any(new_edge_im * skeleton_without_vertices):# and not np.any(new_edge_im * holed_skeleton):
1717
+ with_central_contour[new_edge[:, 0], new_edge[:, 1]] = 1
1718
+
1719
+ # Add dilated contour
1720
+ pad_origin_contours = get_contours(pad_origin)
1721
+ with_central_contour *= (1 - pad_origin)
1722
+ with_central_contour += pad_origin_contours
1723
+ if np.any(with_central_contour == 2):
1724
+ with_central_contour[with_central_contour > 0] = 1
1725
+
1726
+ # show(dil_origin * with_central_contour)
1727
+ # Capture only the new contour and its neighborhood, get its skeleton and update the final skeleton
1728
+ new_contour = cv2.morphologyEx(dil_origin * with_central_contour, cv2.MORPH_CLOSE, square_33)
1729
+ new_contour = morphology.medial_axis(new_contour, rng=0).astype(np.uint8)
1730
+ new_skeleton = with_central_contour * (1 - dil_origin)
1731
+ new_skeleton += new_contour
1732
+ new_pixels = np.logical_and(pad_distances == 0, new_skeleton == 1)
1733
+ new_pix_coord = np.transpose(np.array(np.nonzero(new_pixels)))
1734
+ dist_coord = np.transpose(np.array(np.nonzero(pad_distances)))
1735
+
1736
+ dist_from_dist = cdist(new_pix_coord[:, :], dist_coord)
1737
+ for np_i, dist_i in enumerate(dist_from_dist): # dist_i=dist_from_dist[0]
1738
+ close_i = dist_i.argmin()
1739
+ pad_distances[new_pix_coord[np_i, 0], new_pix_coord[np_i, 1]] = pad_distances[dist_coord[close_i, 0], dist_coord[close_i, 1]]
1740
+
1741
+ # Update distances
1742
+ pad_distances *= new_skeleton
1743
+
1744
+ dil_pad_origin_contours = cv2.dilate(pad_origin_contours, cross_33, iterations=1)
1745
+ new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
1746
+ nb, sh = cv2.connectedComponents(new_pad_origin_contours)
1747
+ while nb > 2:
1748
+ dil_pad_origin_contours = cv2.dilate(dil_pad_origin_contours, cross_33, iterations=1)
1749
+ new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
1750
+ nb, sh = cv2.connectedComponents(new_pad_origin_contours)
1751
+ pad_origin_contours = new_pad_origin_contours
1752
+ pad_distances[pad_origin_contours > 0] = np.nan # pad_distances.max() + 1 #
1753
+ # test1 = ((pad_distances > 0) * (1 - new_skeleton)).sum() == 0
1754
+ # test2 = ((1 - (pad_distances > 0)) * new_skeleton).sum() == 0
1755
+
1756
+ return new_skeleton, pad_distances, pad_origin_contours
1757
+