cellects 0.1.3__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +365 -326
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +4 -3
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +841 -450
  14. cellects/gui/required_output.py +100 -56
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +317 -135
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -105
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.7.dist-info/LICENSE +675 -0
  29. cellects-0.2.7.dist-info/METADATA +829 -0
  30. cellects-0.2.7.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.3.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.3.dist-info/METADATA +0 -176
  35. cellects-0.1.3.dist-info/RECORD +0 -44
  36. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/top_level.txt +0 -0
@@ -26,9 +26,8 @@ Uses morphological operations for network refinement, including hole closing, co
26
26
  and distance transform analysis. Implements both Otsu thresholding and rolling window segmentation
27
27
  methods for image processing workflows.
28
28
  """
29
-
30
- from cellects.image_analysis.morphological_operations import square_33, cross_33, rhombus_55, Ellipse, CompareNeighborsWithValue, get_contours, get_all_line_coordinates, close_holes, keep_one_connected_component
31
- from cellects.utils.utilitarian import remove_coordinates
29
+ from cellects.image_analysis.morphological_operations import square_33, cross_33, rhombus_55, create_ellipse, image_borders, CompareNeighborsWithValue, get_contours, get_all_line_coordinates, close_holes, keep_one_connected_component, get_min_or_max_euclidean_pair
30
+ from cellects.utils.utilitarian import remove_coordinates, smallest_memory_array
32
31
  from cellects.utils.formulas import *
33
32
  from cellects.utils.load_display_save import *
34
33
  from cellects.image_analysis.image_segmentation import generate_color_space_combination, rolling_window_segmentation, binary_quality_index, find_threshold_given_mask
@@ -39,6 +38,8 @@ from collections import deque
39
38
  from scipy.spatial.distance import cdist
40
39
  from scipy.ndimage import distance_transform_edt
41
40
  import networkx as nx
41
+ import pandas as pd
42
+ from timeit import default_timer as timer
42
43
 
43
44
  # 8-connectivity neighbors
44
45
  neighbors_8 = [(-1, -1), (-1, 0), (-1, 1),
@@ -46,6 +47,146 @@ neighbors_8 = [(-1, -1), (-1, 0), (-1, 1),
46
47
  (1, -1), (1, 0), (1, 1)]
47
48
  neighbors_4 = [(-1, 0), (0, -1), (0, 1), (1, 0)]
48
49
 
50
+ def detect_network_dynamics(converted_video: NDArray, binary: NDArray[np.uint8], arena_label: int=1,
51
+ starting_time: int=0, visu: NDArray=None, origin: NDArray[np.uint8]=None,
52
+ smooth_segmentation_over_time: bool = True, detect_pseudopods: bool = True,
53
+ save_coord_network: bool = True, show_seg: bool = False):
54
+ """
55
+ Detects and tracks dynamic features (e.g., pseudopods) in a biological network over time from video data.
56
+
57
+ Analyzes spatiotemporal dynamics of a network structure using binary masks and grayscale video data. Processes each frame to detect network components, optionally identifies pseudopods, applies temporal smoothing, and generates visualization overlays. Saves coordinate data for detected networks if enabled.
58
+
59
+ Parameters
60
+ ----------
61
+ converted_video : NDArray
62
+ Input video data array with shape (time x y x z) representing grayscale intensities.
63
+ binary : NDArray[np.uint8]
64
+ Binary mask array with shape (time x y x z) indicating filled regions in each frame.
65
+ arena_label : int
66
+ Unique identifier for the current processing arena/session to name saved output files.
67
+ starting_time : int
68
+ Zero-based index of the first frame to begin network detection and analysis from.
69
+ visu : NDArray
70
+ Visualization video array (time x y x z) with RGB channels for overlay rendering.
71
+ origin : NDArray[np.uint8]
72
+ Binary mask defining a central region of interest to exclude from network detection.
73
+ smooth_segmentation_over_time : bool, optional (default=True)
74
+ Flag indicating whether to apply temporal smoothing using adjacent frame data.
75
+ detect_pseudopods : bool, optional (default=True)
76
+ Determines if pseudopod regions should be detected and merged with the network.
77
+ save_coord_network : bool, optional (default=True)
78
+ Controls saving of detected network/pseudopod coordinates as NumPy arrays.
79
+ show_seg : bool, optional (default=False)
80
+ Enables real-time visualization display during processing.
81
+
82
+ Returns
83
+ -------
84
+ NDArray[np.uint8]
85
+ 3D array containing detected network structures with shape (time x y x z). Uses:
86
+ - `0` for background,
87
+ - `1` for regular network components,
88
+ - `2` for pseudopod regions when detect_pseudopods is True.
89
+
90
+ Notes
91
+ -----
92
+ - Memory-intensive operations on large arrays may require system resources.
93
+ - Temporal smoothing effectiveness depends on network dynamics consistency between frames.
94
+ - Pseudopod detection requires sufficient contrast with the background in grayscale images.
95
+ """
96
+ logging.info(f"Arena n°{arena_label}. Starting network detection.")
97
+ # converted_video = self.converted_video; binary=self.binary; arena_label=1; starting_time=0; visu=self.visu; origin=None; smooth_segmentation_over_time=True; detect_pseudopods=True;save_coord_network=True; show_seg=False
98
+ dims = binary.shape
99
+ pseudopod_min_size = 50
100
+ if detect_pseudopods:
101
+ pseudopod_vid = np.zeros_like(binary, dtype=bool)
102
+ potential_network = np.zeros_like(binary, dtype=bool)
103
+ network_dynamics = np.zeros_like(binary, dtype=np.uint8)
104
+ do_convert = True
105
+ if visu is None:
106
+ do_convert = False
107
+ visu = np.stack((converted_video, converted_video, converted_video), axis=3)
108
+ greyscale = converted_video[-1, ...]
109
+ else:
110
+ greyscale = visu[-1, ...].mean(axis=-1)
111
+ NetDet = NetworkDetection(greyscale, possibly_filled_pixels=binary[-1, ...],
112
+ origin_to_add=origin)
113
+ NetDet.get_best_network_detection_method()
114
+ if do_convert:
115
+ NetDet.greyscale_image = converted_video[-1, ...]
116
+ lighter_background = NetDet.greyscale_image[binary[-1, ...] > 0].mean() < NetDet.greyscale_image[
117
+ binary[-1, ...] == 0].mean()
118
+
119
+ for t in np.arange(starting_time, dims[0]): # 20):#
120
+ if do_convert:
121
+ greyscale = visu[t, ...].mean(axis=-1)
122
+ else:
123
+ greyscale = converted_video[t, ...]
124
+ NetDet_fast = NetworkDetection(greyscale, possibly_filled_pixels=binary[t, ...],
125
+ origin_to_add=origin, best_result=NetDet.best_result)
126
+ NetDet_fast.detect_network()
127
+ NetDet_fast.greyscale_image = converted_video[t, ...]
128
+ if detect_pseudopods:
129
+ NetDet_fast.detect_pseudopods(lighter_background, pseudopod_min_size=pseudopod_min_size)
130
+ NetDet_fast.merge_network_with_pseudopods()
131
+ pseudopod_vid[t, ...] = NetDet_fast.pseudopods
132
+ potential_network[t, ...] = NetDet_fast.complete_network
133
+ if dims[0] == 1:
134
+ network_dynamics = potential_network
135
+ else:
136
+ for t in np.arange(starting_time, dims[0]): # 20):#
137
+ if smooth_segmentation_over_time:
138
+ if 2 <= t <= (dims[0] - 2):
139
+ computed_network = potential_network[(t - 2):(t + 3), :, :].sum(axis=0)
140
+ computed_network[computed_network == 1] = 0
141
+ computed_network[computed_network > 1] = 1
142
+ else:
143
+ if t < 2:
144
+ computed_network = potential_network[:2, :, :].sum(axis=0)
145
+ else:
146
+ computed_network = potential_network[-2:, :, :].sum(axis=0)
147
+ computed_network[computed_network > 0] = 1
148
+ else:
149
+ computed_network = computed_network[t, :, :].copy()
150
+
151
+ if origin is not None:
152
+ computed_network = computed_network * (1 - origin)
153
+ origin_contours = get_contours(origin)
154
+ complete_network = np.logical_or(origin_contours, computed_network).astype(np.uint8)
155
+ else:
156
+ complete_network = computed_network
157
+ complete_network = keep_one_connected_component(complete_network)
158
+
159
+ if detect_pseudopods:
160
+ # Make sure that removing pseudopods do not cut the network:
161
+ without_pseudopods = complete_network * (1 - pseudopod_vid[t])
162
+ only_connected_network = keep_one_connected_component(without_pseudopods)
163
+ # # Option A: To add these cutting regions to the pseudopods do:
164
+ pseudopods = (1 - only_connected_network) * complete_network
165
+ pseudopod_vid[t] = pseudopods
166
+ network_dynamics[t] = complete_network
167
+
168
+ imtoshow = visu[t, ...]
169
+ eroded_binary = cv2.erode(network_dynamics[t, ...], cross_33)
170
+ net_coord = np.nonzero(network_dynamics[t, ...] - eroded_binary)
171
+ imtoshow[net_coord[0], net_coord[1], :] = (34, 34, 158)
172
+ if show_seg:
173
+ cv2.imshow("", cv2.resize(imtoshow, (1000, 1000)))
174
+ cv2.waitKey(1)
175
+ else:
176
+ visu[t, ...] = imtoshow
177
+ if show_seg:
178
+ cv2.destroyAllWindows()
179
+
180
+ network_coord = smallest_memory_array(np.nonzero(network_dynamics), "uint")
181
+ pseudopod_coord = None
182
+ if detect_pseudopods:
183
+ network_dynamics[pseudopod_vid > 0] = 2
184
+ pseudopod_coord = smallest_memory_array(np.nonzero(pseudopod_vid), "uint")
185
+ if save_coord_network:
186
+ np.save(f"coord_pseudopods{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.npy", pseudopod_coord)
187
+ if save_coord_network:
188
+ np.save(f"coord_network{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.npy", network_coord)
189
+ return network_coord, pseudopod_coord
49
190
 
50
191
 
51
192
  class NetworkDetection:
@@ -55,7 +196,7 @@ class NetworkDetection:
55
196
  Class for detecting vessels in images using Frangi and Sato filters with various parameter sets.
56
197
  It applies different thresholding methods, calculates quality metrics, and selects the best detection method.
57
198
  """
58
- def __init__(self, greyscale_image: NDArray[np.uint8], possibly_filled_pixels: NDArray[np.uint8], add_rolling_window: bool=False, origin_to_add: NDArray[np.uint8]=None, best_result: dict=None):
199
+ def __init__(self, greyscale_image: NDArray[np.uint8], possibly_filled_pixels: NDArray[np.uint8]=None, add_rolling_window: bool=False, origin_to_add: NDArray[np.uint8]=None, best_result: dict=None):
59
200
  """
60
201
  Initialize the object with given parameters.
61
202
 
@@ -63,8 +204,8 @@ class NetworkDetection:
63
204
  ----------
64
205
  greyscale_image : NDArray[np.uint8]
65
206
  The input greyscale image.
66
- possibly_filled_pixels : NDArray[np.uint8]
67
- Image containing possibly filled pixels.
207
+ possibly_filled_pixels : NDArray[np.uint8], optional
208
+ Image containing possibly filled pixels. Defaults to None.
68
209
  add_rolling_window : bool, optional
69
210
  Flag to add rolling window. Defaults to False.
70
211
  origin_to_add : NDArray[np.uint8], optional
@@ -73,7 +214,10 @@ class NetworkDetection:
73
214
  Best result dictionary. Defaults to None.
74
215
  """
75
216
  self.greyscale_image = greyscale_image
76
- self.possibly_filled_pixels = possibly_filled_pixels
217
+ if possibly_filled_pixels is None:
218
+ self.possibly_filled_pixels = np.ones(self.greyscale_image.shape, dtype=np.uint8)
219
+ else:
220
+ self.possibly_filled_pixels = possibly_filled_pixels
77
221
  self.best_result = best_result
78
222
  self.add_rolling_window = add_rolling_window
79
223
  self.origin_to_add = origin_to_add
@@ -128,7 +272,7 @@ class NetworkDetection:
128
272
  'binary': binary_otsu,
129
273
  'quality': quality_otsu,
130
274
  'filtered': frangi_result,
131
- 'filter': f'frangi',
275
+ 'filter': f'Frangi',
132
276
  'rolling_window': False,
133
277
  'sigmas': sigmas
134
278
  })
@@ -141,7 +285,7 @@ class NetworkDetection:
141
285
  'binary': binary_rolling,
142
286
  'quality': quality_rolling,
143
287
  'filtered': frangi_result,
144
- 'filter': f'frangi',
288
+ 'filter': f'Frangi',
145
289
  'rolling_window': True,
146
290
  'sigmas': sigmas
147
291
  })
@@ -200,7 +344,7 @@ class NetworkDetection:
200
344
  'binary': binary_otsu,
201
345
  'quality': quality_otsu,
202
346
  'filtered': sato_result,
203
- 'filter': f'sato',
347
+ 'filter': f'Sato',
204
348
  'rolling_window': False,
205
349
  'sigmas': sigmas
206
350
  })
@@ -215,7 +359,7 @@ class NetworkDetection:
215
359
  'binary': binary_rolling,
216
360
  'quality': quality_rolling,
217
361
  'filtered': sato_result,
218
- 'filter': f'sato',
362
+ 'filter': f'Sato',
219
363
  'rolling_window': True,
220
364
  'sigmas': sigmas
221
365
  })
@@ -283,7 +427,7 @@ class NetworkDetection:
283
427
  performs segmentation using either rolling window or Otsu's thresholding.
284
428
  The final network detection result is stored in `self.incomplete_network`.
285
429
  """
286
- if self.best_result['filter'] == 'frangi':
430
+ if self.best_result['filter'] == 'Frangi':
287
431
  filtered_result = frangi(self.greyscale_image, sigmas=self.best_result['sigmas'], beta=self.frangi_beta, gamma=self.frangi_gamma, black_ridges=self.black_ridges)
288
432
  else:
289
433
  filtered_result = sato(self.greyscale_image, sigmas=self.best_result['sigmas'], black_ridges=self.black_ridges, mode='reflect')
@@ -295,7 +439,7 @@ class NetworkDetection:
295
439
  binary_image = filtered_result > thresh_otsu
296
440
  self.incomplete_network = binary_image * self.possibly_filled_pixels
297
441
 
298
- def change_greyscale(self, img: NDArray[np.uint8], c_space_dict: dict):
442
+ def change_greyscale(self, img: NDArray[np.uint8], first_dict: dict):
299
443
  """
300
444
  Change the image to greyscale using color space combinations.
301
445
 
@@ -307,57 +451,45 @@ class NetworkDetection:
307
451
  ----------
308
452
  img : ndarray of uint8
309
453
  The input image to be converted to greyscale.
310
- c_space_dict : dict
311
- A dictionary where keys are color space names and values
312
- are parameters for those color spaces.
313
-
314
454
  """
315
- self.greyscale_image, g2 = generate_color_space_combination(img, list(c_space_dict.keys()), c_space_dict)
455
+ self.greyscale_image, g2, all_c_spaces, first_pc_vector = generate_color_space_combination(img, list(first_dict.keys()), first_dict)
316
456
 
317
- def detect_pseudopods(self, lighter_background: bool, pseudopod_min_width: int=5, pseudopod_min_size: int=50):
457
+ def detect_pseudopods(self, lighter_background: bool, pseudopod_min_width: int=5, pseudopod_min_size: int=50, only_one_connected_component: bool=True):
318
458
  """
319
- Detect and extract pseudopods from the image based on given parameters.
459
+ Detect pseudopods in a binary image.
320
460
 
321
- This method performs a series of morphological operations and distance
322
- transformations to identify pseudopods in the image. It uses binary
323
- dilation, connected components analysis, and thresholding to isolate
324
- pseudopod structures.
461
+ Identify and process regions that resemble pseudopods based on width, size,
462
+ and connectivity criteria. This function is used to detect and label areas
463
+ that are indicative of pseudopod-like structures within a binary image.
325
464
 
326
465
  Parameters
327
466
  ----------
328
467
  lighter_background : bool
329
- Flag indicating whether the background is lighter than the foreground.
468
+ Boolean flag to indicate if the background should be considered lighter.
330
469
  pseudopod_min_width : int, optional
331
- Minimum width of pseudopods to be detected. Default is 5.
470
+ Minimum width for pseudopods to be considered valid. Default is 5.
332
471
  pseudopod_min_size : int, optional
333
- Minimum size of pseudopods to be detected. Default is 50.
472
+ Minimum size for pseudopods to be considered valid. Default is 50.
473
+ only_one_connected_component : bool, optional
474
+ Flag to ensure only one connected component is kept. Default is True.
475
+
476
+ Returns
477
+ -------
478
+ None
334
479
 
335
- Attributes (modified)
336
- ----------------------
337
- self.pseudopods : ndarray
338
- Updated to reflect the detected pseudopod regions.
480
+ Notes
481
+ -----
482
+ This function modifies internal attributes of the object, specifically setting `self.pseudopods` to an array indicating pseudopod regions.
339
483
 
340
484
  Examples
341
485
  --------
342
- >>> possibly_filled_pixels = np.random.randint(0, 2, dims, dtype=np.uint8)
343
- >>> possibly_filled_pixels = keep_one_connected_component(possibly_filled_pixels)
344
- >>> origin_to_add = np.zeros(dims, dtype=np.uint8)
345
- >>> mid = dims[0] // 2
346
- >>> ite = 2
347
- >>> while not origin_to_add.any():
348
- >>> ite += 1
349
- >>> origin_to_add[mid - ite: mid + ite, mid - ite: mid + ite] = possibly_filled_pixels[mid - ite: mid + ite, mid - ite: mid + ite]
350
- >>> greyscale_image = possibly_filled_pixels.copy()
351
- >>> greyscale_image[greyscale_image > 0] = np.random.randint(200, 255, possibly_filled_pixels.sum())
352
- >>> greyscale_image[greyscale_image == 0] = np.random.randint(0, 50, possibly_filled_pixels.size - possibly_filled_pixels.sum())
353
- >>> add_rolling_window = False
354
- >>> NetDet = NetworkDetection(greyscale_image, possibly_filled_pixels, add_rolling_window, origin_to_add)
355
- >>> NetDet.get_best_network_detection_method()
356
- >>> lighter_background = True
357
- >>> pseudopod_min_width = 1
358
- >>> pseudopod_min_size = 3
359
- >>> NetDet.detect_pseudopods(lighter_background, pseudopod_min_width, pseudopod_min_size)
360
- >>> print(NetDet.pseudopods)
486
+ >>> result = detect_pseudopods(True, 5, 50)
487
+ >>> print(self.pseudopods)
488
+ array([[0, 1, ..., 0],
489
+ [0, 0, ..., 0],
490
+ ...,
491
+ [0, 1, ..., 0]], dtype=uint8)
492
+
361
493
  """
362
494
 
363
495
  closed_im = close_holes(self.possibly_filled_pixels)
@@ -380,7 +512,7 @@ class NetworkDetection:
380
512
 
381
513
  _, pseudopod_widths = morphology.medial_axis(high_int_in_periphery, return_distance=True, rng=0)
382
514
  bin_im = pseudopod_widths >= pseudopod_min_width
383
- dil_bin_im = cv2.dilate(bin_im.astype(np.uint8), kernel=Ellipse((7, 7)).create().astype(np.uint8), iterations=1)
515
+ dil_bin_im = cv2.dilate(bin_im.astype(np.uint8), kernel=create_ellipse(7, 7).astype(np.uint8), iterations=1)
384
516
  bin_im = high_int_in_periphery * dil_bin_im
385
517
  nb, shapes, stats, centro = cv2.connectedComponentsWithStats(bin_im)
386
518
  true_pseudopods = np.nonzero(stats[:, 4] > pseudopod_min_size)[0][1:]
@@ -388,11 +520,14 @@ class NetworkDetection:
388
520
 
389
521
  # Make sure that the tubes connecting two pseudopods belong to pseudopods if removing pseudopods cuts the network
390
522
  complete_network = np.logical_or(true_pseudopods, self.incomplete_network).astype(np.uint8)
391
- complete_network = keep_one_connected_component(complete_network)
392
- without_pseudopods = complete_network.copy()
393
- without_pseudopods[true_pseudopods] = 0
394
- only_connected_network = keep_one_connected_component(without_pseudopods)
395
- self.pseudopods = (1 - only_connected_network) * complete_network * self.possibly_filled_pixels
523
+ if only_one_connected_component:
524
+ complete_network = keep_one_connected_component(complete_network)
525
+ without_pseudopods = complete_network.copy()
526
+ without_pseudopods[true_pseudopods] = 0
527
+ only_connected_network = keep_one_connected_component(without_pseudopods)
528
+ self.pseudopods = (1 - only_connected_network) * complete_network * self.possibly_filled_pixels
529
+ else:
530
+ self.pseudopods = true_pseudopods.astype(np.uint8)
396
531
 
397
532
  def merge_network_with_pseudopods(self):
398
533
  """
@@ -406,7 +541,98 @@ class NetworkDetection:
406
541
  self.incomplete_network *= (1 - self.pseudopods)
407
542
 
408
543
 
409
- def get_skeleton_and_widths(pad_network: NDArray[np.uint8], pad_origin: NDArray[np.uint8]=None, pad_origin_centroid: NDArray=None) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
544
+ def extract_graph_dynamics(converted_video: NDArray, coord_network: NDArray, arena_label: int,
545
+ starting_time: int=0, origin: NDArray[np.uint8]=None, coord_pseudopods: NDArray=None):
546
+ """
547
+ Extracts dynamic graph data from video frames based on network dynamics.
548
+
549
+ This function processes time-series binary network structures to extract evolving vertices and edges over time. It computes spatial relationships between networks and an origin point through image processing steps including contour detection, padding for alignment, skeleton extraction, and morphological analysis. Vertex and edge attributes like position, connectivity, width, intensity, and betweenness are compiled into tables saved as CSV files.
550
+
551
+ Parameters
552
+ ----------
553
+ converted_video : NDArray
554
+ 3D video data array (t x y x) containing pixel intensities used for calculating edge intensity attributes during table generation.
555
+ coord_network : NDArray[np.uint8]
556
+ 3D binary network mask array (t x y x) representing connectivity structures across time points.
557
+ arena_label : int
558
+ Unique identifier to prefix output filenames corresponding to specific experimental arenas.
559
+ starting_time : int, optional (default=0)
560
+ Time index within `coord_network` to begin processing from (exclusive of origin initialization).
561
+ origin : NDArray[np.uint8], optional (default=None)
562
+ Binary mask identifying the region of interest's central origin for spatial reference during network comparison.
563
+
564
+ Returns
565
+ -------
566
+ None
567
+ Saves two CSV files in working directory:
568
+ 1. `vertex_table{arena_label}_t{T}_y{Y}_x{X}.csv` - Vertex table with time, coordinates, and connectivity information
569
+ 2. `edge_table{arena_label}_t{T}_y{Y}_x{X}.csv` - Edge table containing attributes like length, width, intensity, and betweenness
570
+
571
+ Notes
572
+ ---
573
+ Output CSVs use NumPy arrays converted to pandas DataFrames with columns:
574
+ - Vertex table includes timestamps (t), coordinates (y,x), and connectivity flags.
575
+ - Edge table contains betweenness centrality calculated during skeleton processing.
576
+ Origin contours are spatially aligned through padding operations to maintain coordinate consistency across time points.
577
+ """
578
+ logging.info(f"Arena n°{arena_label}. Starting graph extraction.")
579
+ # converted_video = self.converted_video; coord_network=self.coord_network; arena_label=1; starting_time=0; origin=self.origin
580
+ dims = converted_video.shape[:3]
581
+ if origin is not None:
582
+ _, _, _, origin_centroid = cv2.connectedComponentsWithStats(origin)
583
+ origin_centroid = np.round((origin_centroid[1, 1], origin_centroid[1, 0])).astype(np.int64)
584
+ pad_origin_centroid = origin_centroid + 1
585
+ origin_contours = get_contours(origin)
586
+ pad_origin = add_padding([origin])[0]
587
+ else:
588
+ pad_origin_centroid = None
589
+ pad_origin = None
590
+ origin_contours = None
591
+ vertex_table = None
592
+ for t in np.arange(starting_time, dims[0]): # t=320 Y, X = 729, 554
593
+ computed_network = np.zeros((dims[1], dims[2]), dtype=np.uint8)
594
+ net_t = coord_network[1:, coord_network[0, :] == t]
595
+ computed_network[net_t[0], net_t[1]] = 1
596
+ if origin is not None:
597
+ computed_network = computed_network * (1 - origin)
598
+ computed_network = np.logical_or(origin_contours, computed_network).astype(np.uint8)
599
+ else:
600
+ computed_network = computed_network.astype(np.uint8)
601
+ if computed_network.any():
602
+ computed_network = keep_one_connected_component(computed_network)
603
+ pad_network = add_padding([computed_network])[0]
604
+ pad_skeleton, pad_distances, pad_origin_contours = get_skeleton_and_widths(pad_network, pad_origin,
605
+ pad_origin_centroid)
606
+ edge_id = EdgeIdentification(pad_skeleton, pad_distances, t)
607
+ edge_id.run_edge_identification()
608
+ if pad_origin_contours is not None:
609
+ origin_contours = remove_padding([pad_origin_contours])[0]
610
+ growing_areas = None
611
+ if coord_pseudopods is not None:
612
+ growing_areas = coord_pseudopods[1:, coord_pseudopods[0, :] == t]
613
+ edge_id.make_vertex_table(origin_contours, growing_areas)
614
+ edge_id.make_edge_table(converted_video[t, ...])
615
+ edge_id.vertex_table = np.hstack((np.repeat(t, edge_id.vertex_table.shape[0])[:, None], edge_id.vertex_table))
616
+ edge_id.edge_table = np.hstack((np.repeat(t, edge_id.edge_table.shape[0])[:, None], edge_id.edge_table))
617
+ if vertex_table is None:
618
+ vertex_table = edge_id.vertex_table.copy()
619
+ edge_table = edge_id.edge_table.copy()
620
+ else:
621
+ vertex_table = np.vstack((vertex_table, edge_id.vertex_table))
622
+ edge_table = np.vstack((edge_table, edge_id.edge_table))
623
+
624
+ vertex_table = pd.DataFrame(vertex_table, columns=["t", "y", "x", "vertex_id", "is_tip", "origin",
625
+ "vertex_connected"])
626
+ edge_table = pd.DataFrame(edge_table,
627
+ columns=["t", "edge_id", "vertex1", "vertex2", "length", "average_width", "intensity",
628
+ "betweenness_centrality"])
629
+ vertex_table.to_csv(
630
+ f"vertex_table{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.csv")
631
+ edge_table.to_csv(
632
+ f"edge_table{arena_label}_t{dims[0]}_y{dims[1]}_x{dims[2]}.csv")
633
+
634
+
635
+ def get_skeleton_and_widths(pad_network: NDArray[np.uint8], pad_origin: NDArray[np.uint8]=None, pad_origin_centroid: NDArray[np.int64]=None) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
410
636
  """
411
637
  Get skeleton and widths from a network.
412
638
 
@@ -695,24 +921,47 @@ def get_inner_vertices(pad_skeleton: NDArray[np.uint8], potential_tips: NDArray[
695
921
  potential_tips[wrong_tip] = 0
696
922
  else:
697
923
  # otherwise do:
698
- # Find the most 4-connected one, and check whether
699
- # its 4 connected neighbors have 1 or more other connexions
700
- # 1. # Find the most 4-connected one:
701
- vertices_group_4 = cnv4.equal_neighbor_nb * vertices_group
702
- max_con = vertices_group_4.max()
703
- most_con = np.nonzero(vertices_group_4 == max_con)
704
- # 2. Check its 4-connected neighbors and remove those having only 1 other 8-connexion
705
- skel_copy = pad_skeleton.copy()
706
- skel_copy[most_con] = 0
707
- skel_copy[most_con[0] - 1, most_con[1]] = 0
708
- skel_copy[most_con[0] + 1, most_con[1]] = 0
709
- skel_copy[most_con[0], most_con[1] - 1] = 0
710
- skel_copy[most_con[0], most_con[1] + 1] = 0
711
- sub_cnv8 = CompareNeighborsWithValue(skel_copy, 8)
712
- sub_cnv8.is_equal(1, and_itself=False)
713
- # Remove those having
714
- v_to_remove = ((vertices_group_4 > 0) * sub_cnv8.equal_neighbor_nb) == 1
715
- pad_vertices[v_to_remove] = 0
924
+ # Find the most 8-connected one, if its 4-connected neighbors have no more 8-connexions than 4-connexions + 1, they can be removed
925
+ # Otherwise,
926
+ # Find the most 4-connected one, and remove its 4 connected neighbors having only 1 or other 8-connexion
927
+
928
+ c = zoom_on_nonzero(vertices_group)
929
+ # 1. Find the most 8-connected one:
930
+ sub_v_grp = vertices_group[c[0]:c[1], c[2]:c[3]]
931
+ c8 = cnv8.equal_neighbor_nb[c[0]:c[1], c[2]:c[3]]
932
+ vertices_group_8 = c8 * sub_v_grp
933
+ max_8_con = vertices_group_8.max()
934
+ most_8_con = np.nonzero(vertices_group_8 == max_8_con)
935
+ # c4[(most_8_con[0][0] - 1):(most_8_con[0][0] + 2), (most_8_con[1][0] - 1):(most_8_con[1][0] + 2)]
936
+ if len(most_8_con[0]) == 1:
937
+ skel_copy = pad_skeleton[c[0]:c[1], c[2]:c[3]].copy()
938
+ skel_copy[most_8_con] = 0
939
+ sub_cnv8 = CompareNeighborsWithValue(skel_copy, 8)
940
+ sub_cnv8.is_equal(1, and_itself=False)
941
+ sub_cnv4 = CompareNeighborsWithValue(skel_copy, 4)
942
+ sub_cnv4.is_equal(1, and_itself=False)
943
+ v_to_remove = sub_v_grp * (sub_cnv8.equal_neighbor_nb <= sub_cnv4.equal_neighbor_nb + 1)
944
+ else:
945
+ c4 = cnv4.equal_neighbor_nb[c[0]:c[1], c[2]:c[3]]
946
+ # 1. # Find the most 4-connected one:
947
+ vertices_group_4 = c4 * sub_v_grp
948
+ max_con = vertices_group_4.max()
949
+ most_con = np.nonzero(vertices_group_4 == max_con)
950
+ if len(most_con[0]) < sub_v_grp.sum():
951
+ # 2. Check its 4-connected neighbors and remove those having only 1 other 8-connexion
952
+ skel_copy = pad_skeleton[c[0]:c[1], c[2]:c[3]].copy()
953
+ skel_copy[most_con] = 0
954
+ skel_copy[most_con[0] - 1, most_con[1]] = 0
955
+ skel_copy[most_con[0] + 1, most_con[1]] = 0
956
+ skel_copy[most_con[0], most_con[1] - 1] = 0
957
+ skel_copy[most_con[0], most_con[1] + 1] = 0
958
+ sub_cnv8 = CompareNeighborsWithValue(skel_copy, 8)
959
+ sub_cnv8.is_equal(1, and_itself=False)
960
+ # There are:
961
+ v_to_remove = ((vertices_group_4 > 0) * sub_cnv8.equal_neighbor_nb) == 1
962
+ else:
963
+ v_to_remove = np.zeros(sub_v_grp.shape, dtype=bool)
964
+ pad_vertices[c[0]:c[1], c[2]:c[3]][v_to_remove] = 0
716
965
 
717
966
  # Other vertices to remove:
718
967
  # - Those that are forming a cross with 0 at the center while the skeleton contains 1
@@ -770,7 +1019,7 @@ class EdgeIdentification:
770
1019
  provided skeleton and distance arrays. It performs various operations to
771
1020
  refine and label edges, ultimately producing a fully identified network.
772
1021
  """
773
- def __init__(self, pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64]):
1022
+ def __init__(self, pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64], t: int=0):
774
1023
  """
775
1024
  Initialize the class with skeleton and distance arrays.
776
1025
 
@@ -794,6 +1043,7 @@ class EdgeIdentification:
794
1043
  """
795
1044
  self.pad_skeleton = pad_skeleton
796
1045
  self.pad_distances = pad_distances
1046
+ self.t = t
797
1047
  self.remaining_vertices = None
798
1048
  self.vertices = None
799
1049
  self.growing_vertices = None
@@ -824,6 +1074,7 @@ class EdgeIdentification:
824
1074
  self.get_tipped_edges()
825
1075
  self.remove_tipped_edge_smaller_than_branch_width()
826
1076
  self.label_tipped_edges_and_their_vertices()
1077
+ self.check_vertex_existence()
827
1078
  self.label_edges_connected_with_vertex_clusters()
828
1079
  self.label_edges_connecting_vertex_clusters()
829
1080
  self.label_edges_from_known_vertices_iteratively()
@@ -881,11 +1132,6 @@ class EdgeIdentification:
881
1132
  smaller than the width of the nearest network branch (an information included in pad_distances).
882
1133
  This method also updates internal data structures (skeleton, edge coordinates, vertex/tip positions)
883
1134
  accordingly through pixel-wise analysis and connectivity checks.
884
-
885
- Parameters
886
- ----------
887
- pad_distances : ndarray of float64
888
- 2D array containing the network width (in pixels) at each position occupied by the skeleton
889
1135
  """
890
1136
  # Identify edges that are smaller than the width of the branch it is attached to
891
1137
  tipped_edges_to_remove = np.zeros(self.edge_lengths.shape[0], dtype=bool)
@@ -893,73 +1139,48 @@ class EdgeIdentification:
893
1139
  branches_to_remove = np.zeros(self.non_tip_vertices.shape[0], dtype=bool)
894
1140
  new_edge_pix_coord = []
895
1141
  remaining_tipped_edges_nb = 0
896
- if self.edge_pix_coord.shape[0] == 0:
897
- for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
898
- Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
899
- if np.nanmax(self.pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
900
- tipped_edges_to_remove[i] = True
901
- # Remove the tip
902
- self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
903
- # check whether the connecting vertex remains a vertex of not
904
- pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
905
- mode='constant')
906
- sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
907
- # If the vertex does not connect at least 3 edges anymore, remove its vertex label
908
- if sub_vertices[3, 3] == 0:
909
- vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
910
- branches_to_remove[vertex_to_remove] = True
911
- # If that pixel became a tip connected to another vertex remove it from the skeleton
912
- if sub_tips[3, 3]:
913
- if sub_vertices[2:5, 2:5].sum() > 1:
914
- self.pad_skeleton[Y, X] = 0
915
- vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
916
- branches_to_remove[vertex_to_remove] = True
917
- else:
918
- remaining_tipped_edges_nb += 1
919
- else:
920
- for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
921
- Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
922
- edge_bool = self.edge_pix_coord[:, 2] == i + 1
923
- eY, eX = self.edge_pix_coord[edge_bool, 0], self.edge_pix_coord[edge_bool, 1]
924
- if np.nanmax(self.pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
925
- tipped_edges_to_remove[i] = True
926
- # Remove the edge
927
- self.pad_skeleton[eY, eX] = 0
928
- # Remove the tip
929
- self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
930
-
931
- # Remove the coordinates corresponding to that edge
932
- self.edge_pix_coord = np.delete(self.edge_pix_coord, edge_bool, 0)
933
-
934
- # check whether the connecting vertex remains a vertex of not
935
- pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
936
- mode='constant')
937
- sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
938
- # If the vertex does not connect at least 3 edges anymore, remove its vertex label
939
- if sub_vertices[3, 3] == 0:
1142
+ for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
1143
+ Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
1144
+ edge_bool = self.edge_pix_coord[:, 2] == i + 1
1145
+ eY, eX = self.edge_pix_coord[edge_bool, 0], self.edge_pix_coord[edge_bool, 1]
1146
+ if np.nanmax(self.pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
1147
+ tipped_edges_to_remove[i] = True
1148
+ # Remove the edge
1149
+ self.pad_skeleton[eY, eX] = 0
1150
+ # Remove the tip
1151
+ self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
1152
+
1153
+ # Remove the coordinates corresponding to that edge
1154
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, edge_bool, 0)
1155
+
1156
+ # check whether the connecting vertex remains a vertex of not
1157
+ pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
1158
+ mode='constant')
1159
+ sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
1160
+ # If the vertex does not connect at least 3 edges anymore, remove its vertex label
1161
+ if sub_vertices[3, 3] == 0:
1162
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
1163
+ branches_to_remove[vertex_to_remove] = True
1164
+ # If that pixel became a tip connected to another vertex remove it from the skeleton
1165
+ if sub_tips[3, 3]:
1166
+ if sub_vertices[2:5, 2:5].sum() > 1:
1167
+ self.pad_skeleton[Y, X] = 0
1168
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, np.all(self.edge_pix_coord[:, :2] == [Y, X], axis=1), 0)
940
1169
  vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
941
1170
  branches_to_remove[vertex_to_remove] = True
942
- # If that pixel became a tip connected to another vertex remove it from the skeleton
943
- if sub_tips[3, 3]:
944
- if sub_vertices[2:5, 2:5].sum() > 1:
945
- self.pad_skeleton[Y, X] = 0
946
- self.edge_pix_coord = np.delete(self.edge_pix_coord, np.all(self.edge_pix_coord[:, :2] == [Y, X], axis=1), 0)
947
- vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
948
- branches_to_remove[vertex_to_remove] = True
949
- else:
950
- remaining_tipped_edges_nb += 1
951
- new_edge_pix_coord.append(np.stack((eY, eX, np.repeat(remaining_tipped_edges_nb, len(eY))), axis=1))
1171
+ else:
1172
+ remaining_tipped_edges_nb += 1
1173
+ new_edge_pix_coord.append(np.stack((eY, eX, np.repeat(remaining_tipped_edges_nb, len(eY))), axis=1))
952
1174
 
953
1175
  # Check that excedent connected components are 1 pixel size, if so:
954
1176
  # It means that they were neighbors to removed tips and not necessary for the skeleton
955
1177
  nb, sh = cv2.connectedComponents(self.pad_skeleton)
956
1178
  if nb > 2:
957
- for i in range(2, nb):
958
- excedent = sh == i
959
- if (excedent).sum() == 1:
960
- self.pad_skeleton[excedent] = 0
961
- # else:
962
- # print("More than one pixel area excedent components exists")
1179
+ logging.error("Removing small tipped edges split the skeleton")
1180
+ # for i in range(2, nb):
1181
+ # excedent = sh == i
1182
+ # if (excedent).sum() == 1:
1183
+ # self.pad_skeleton[excedent] = 0
963
1184
 
964
1185
  # Remove in distances the pixels removed in skeleton:
965
1186
  self.pad_distances *= self.pad_skeleton
@@ -968,21 +1189,21 @@ class EdgeIdentification:
968
1189
  if len(new_edge_pix_coord) > 0:
969
1190
  self.edge_pix_coord = np.vstack(new_edge_pix_coord)
970
1191
 
971
- # Remove tips connected to very small edges
972
- self.tips_coord = np.delete(self.tips_coord, tipped_edges_to_remove, 0)
973
- # Add corresponding edge names
974
- self.tips_coord = np.hstack((self.tips_coord, np.arange(1, len(self.tips_coord) + 1)[:, None]))
1192
+ # # Remove tips connected to very small edges
1193
+ # self.tips_coord = np.delete(self.tips_coord, tipped_edges_to_remove, 0)
1194
+ # # Add corresponding edge names
1195
+ # self.tips_coord = np.hstack((self.tips_coord, np.arange(1, len(self.tips_coord) + 1)[:, None]))
975
1196
 
976
- # Within all branching (non-tip) vertices, keep those that did not lose their vertex status because of the edge removal
977
- self.non_tip_vertices = np.delete(self.non_tip_vertices, branches_to_remove, 0)
1197
+ # # Within all branching (non-tip) vertices, keep those that did not lose their vertex status because of the edge removal
1198
+ # self.non_tip_vertices = np.delete(self.non_tip_vertices, branches_to_remove, 0)
978
1199
 
979
- # Get the branching vertices who kept their typped edge
980
- self.vertices_branching_tips = np.delete(self.vertices_branching_tips, tipped_edges_to_remove, 0)
1200
+ # # Get the branching vertices who kept their typped edge
1201
+ # self.vertices_branching_tips = np.delete(self.vertices_branching_tips, tipped_edges_to_remove, 0)
981
1202
 
982
1203
  # Within all branching (non-tip) vertices, keep those that do not connect a tipped edge.
983
- v_branching_tips_in_branching_v = find_common_coord(self.non_tip_vertices, self.vertices_branching_tips[:, :2])
984
- self.remaining_vertices = np.delete(self.non_tip_vertices, v_branching_tips_in_branching_v, 0)
985
- ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.remaining_vertices))
1204
+ # v_branching_tips_in_branching_v = find_common_coord(self.non_tip_vertices, self.vertices_branching_tips[:, :2])
1205
+ # self.remaining_vertices = np.delete(self.non_tip_vertices, v_branching_tips_in_branching_v, 0)
1206
+ # ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.remaining_vertices))
986
1207
 
987
1208
  # tips = self.tips_coord
988
1209
  # branching_any_edge = self.non_tip_vertices
@@ -1029,6 +1250,9 @@ class EdgeIdentification:
1029
1250
  self.numbered_vertices = np.zeros(self.im_shape, dtype=np.uint32)
1030
1251
  self.numbered_vertices[ordered_v_coord[:, 0], ordered_v_coord[:, 1]] = np.arange(1, ordered_v_coord.shape[0] + 1)
1031
1252
  self.vertices = None
1253
+ self.vertex_index_map = {}
1254
+ for idx, (y, x) in enumerate(ordered_v_coord):
1255
+ self.vertex_index_map[idx + 1] = tuple((np.uint32(y), np.uint32(x)))
1032
1256
 
1033
1257
  # Name edges from 1 to the number of edges connecting tips and set the vertices labels from all tips to their connected vertices:
1034
1258
  self.edges_labels = np.zeros((self.tip_number, 3), dtype=np.uint32)
@@ -1042,6 +1266,20 @@ class EdgeIdentification:
1042
1266
  # Remove duplicates in vertices_branching_tips
1043
1267
  self.vertices_branching_tips = np.unique(self.vertices_branching_tips[:, :2], axis=0)
1044
1268
 
1269
+ def check_vertex_existence(self):
1270
+ if self.tips_coord.shape[0] == 0 and self.non_tip_vertices.shape[0] == 0:
1271
+ loop_coord = np.nonzero(self.pad_skeleton)
1272
+ start = 1
1273
+ end = 1
1274
+ vertex_coord = loop_coord[0][0], loop_coord[1][0]
1275
+ self.numbered_vertices[vertex_coord[0], vertex_coord[1]] = 1
1276
+ self.vertex_index_map[1] = vertex_coord
1277
+ self.non_tip_vertices = np.array(vertex_coord)[None, :]
1278
+ new_edge_lengths = len(loop_coord[0]) - 1
1279
+ new_edge_pix_coord = np.transpose(np.vstack(((loop_coord[0][1:], loop_coord[1][1:], np.zeros(new_edge_lengths, dtype=np.int32)))))
1280
+ self.edge_pix_coord = np.zeros((0, 3), dtype=np.int32)
1281
+ self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1282
+
1045
1283
  def label_edges_connected_with_vertex_clusters(self):
1046
1284
  """
1047
1285
  Identify edges connected to touching vertices by processing vertex clusters.
@@ -1070,28 +1308,29 @@ class EdgeIdentification:
1070
1308
  # Find every vertex_to_vertex_connexion
1071
1309
  v_cluster_nb, self.v_cluster_lab, self.v_cluster_stats, vgc = cv2.connectedComponentsWithStats(
1072
1310
  (self.numbered_vertices > 0).astype(np.uint8), connectivity=8)
1073
- max_v_nb = np.max(self.v_cluster_stats[1:, 4])
1074
- cropped_skeleton_list = []
1075
- starting_vertices_list = []
1076
- for v_nb in range(2, max_v_nb + 1):
1077
- labels = np.nonzero(self.v_cluster_stats[:, 4] == v_nb)[0]
1078
- coord_list = []
1079
- for lab in labels: # lab=labels[0]
1080
- coord_list.append(np.nonzero(self.v_cluster_lab == lab))
1081
- for iter in range(v_nb):
1082
- for lab_ in range(labels.shape[0]): # lab=labels[0]
1083
- cs = cropped_skeleton.copy()
1084
- sv = []
1085
- v_c = coord_list[lab_]
1086
- # Save the current coordinate in the starting vertices array of this iteration
1087
- sv.append([v_c[0][iter], v_c[1][iter]])
1088
- # Remove one vertex coordinate to keep it from cs
1089
- v_y, v_x = np.delete(v_c[0], iter), np.delete(v_c[1], iter)
1090
- cs[v_y, v_x] = 0
1091
- cropped_skeleton_list.append(cs)
1092
- starting_vertices_list.append(np.array(sv))
1093
- for cropped_skeleton, starting_vertices in zip(cropped_skeleton_list, starting_vertices_list):
1094
- _, _ = self._identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices)
1311
+ if v_cluster_nb > 0:
1312
+ max_v_nb = np.max(self.v_cluster_stats[1:, 4])
1313
+ cropped_skeleton_list = []
1314
+ starting_vertices_list = []
1315
+ for v_nb in range(2, max_v_nb + 1):
1316
+ labels = np.nonzero(self.v_cluster_stats[:, 4] == v_nb)[0]
1317
+ coord_list = []
1318
+ for lab in labels: # lab=labels[0]
1319
+ coord_list.append(np.nonzero(self.v_cluster_lab == lab))
1320
+ for iter in range(v_nb):
1321
+ for lab_ in range(labels.shape[0]): # lab=labels[0]
1322
+ cs = cropped_skeleton.copy()
1323
+ sv = []
1324
+ v_c = coord_list[lab_]
1325
+ # Save the current coordinate in the starting vertices array of this iteration
1326
+ sv.append([v_c[0][iter], v_c[1][iter]])
1327
+ # Remove one vertex coordinate to keep it from cs
1328
+ v_y, v_x = np.delete(v_c[0], iter), np.delete(v_c[1], iter)
1329
+ cs[v_y, v_x] = 0
1330
+ cropped_skeleton_list.append(cs)
1331
+ starting_vertices_list.append(np.array(sv))
1332
+ for cropped_skeleton, starting_vertices in zip(cropped_skeleton_list, starting_vertices_list):
1333
+ _, _ = self._identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices)
1095
1334
 
1096
1335
  def label_edges_connecting_vertex_clusters(self):
1097
1336
  """
@@ -1160,9 +1399,9 @@ class EdgeIdentification:
1160
1399
 
1161
1400
  # Find out the remaining non-identified pixels
1162
1401
  nb, self.unidentified_shapes, self.unidentified_stats, ce = cv2.connectedComponentsWithStats(unidentified.astype(np.uint8))
1163
- # Handle the cases were edges are loops over only one vertex
1402
+ # Handle the cases where edges are loops over only one vertex
1164
1403
  looping_edges = np.nonzero(self.unidentified_stats[:, 4 ] > 2)[0][1:]
1165
- for loop_i in looping_edges: # loop_i = looping_edges[0]
1404
+ for loop_i in looping_edges: # loop_i = looping_edges[0] loop_i=11 # zoom_on_nonzero(unique_vertices_im, return_coord=False)
1166
1405
  edge_i = (self.unidentified_shapes == loop_i).astype(np.uint8)
1167
1406
  dil_edge_i = cv2.dilate(edge_i, square_33)
1168
1407
  unique_vertices_im = self.numbered_vertices.copy()
@@ -1170,14 +1409,35 @@ class EdgeIdentification:
1170
1409
  unique_vertices_im = dil_edge_i * unique_vertices_im
1171
1410
  unique_vertices = np.unique(unique_vertices_im)
1172
1411
  unique_vertices = unique_vertices[unique_vertices > 0]
1173
- if len(unique_vertices) == 1:
1412
+ v_nb = len(unique_vertices)
1413
+ new_edge_lengths = edge_i.sum()
1414
+ new_edge_pix_coord = np.transpose(np.vstack((np.nonzero(edge_i))))
1415
+ new_edge_pix_coord = np.hstack((new_edge_pix_coord, np.repeat(1, new_edge_pix_coord.shape[0])[:, None]))
1416
+ if v_nb == 1:
1174
1417
  start, end = unique_vertices[0], unique_vertices[0]
1175
- new_edge_lengths = edge_i.sum()
1176
- new_edge_pix_coord = np.transpose(np.vstack((np.nonzero(edge_i))))
1177
- new_edge_pix_coord = np.hstack((new_edge_pix_coord, np.repeat(1, new_edge_pix_coord.shape[0])[:, None])) # np.arange(1, new_edge_pix_coord.shape[0] + 1)[:, None]))
1418
+ self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1419
+ elif v_nb == 2:
1420
+ # The edge loops around a group of connected vertices
1421
+ start, end = unique_vertices[0], unique_vertices[1]
1422
+ self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1423
+ # conn_v_nb, conn_v = cv2.connectedComponents((unique_vertices_im > 0).astype(np.uint8))
1424
+ # if len(unique_vertices) == 2 and conn_v_nb == 2:
1425
+ elif v_nb > 2: # The question is: How to choose two vertices so that they link all missing pixels?
1426
+ # 1. Find every edge pixel connected to these vertices
1427
+ vertex_connected_pixels = np.nonzero(cv2.dilate((unique_vertices_im > 0).astype(np.uint8), square_33) * edge_i)
1428
+ # 2. Find the most distant pair of these
1429
+ pix1, pix2 = get_min_or_max_euclidean_pair(vertex_connected_pixels, "max")
1430
+ # 3. The two best vertices are the two nearest to these two most distant edge pixels
1431
+ dist_to_pix1 = np.zeros(v_nb, np.float64)
1432
+ dist_to_pix2 = np.zeros(v_nb, np.float64)
1433
+ for _i, v_i in enumerate(unique_vertices):
1434
+ v_coord = self.vertex_index_map[v_i]
1435
+ dist_to_pix1[_i] = eudist(pix1, v_coord)
1436
+ dist_to_pix2[_i] = eudist(pix2, v_coord)
1437
+ start, end = unique_vertices[np.argmin(dist_to_pix1)], unique_vertices[np.argmin(dist_to_pix2)]
1178
1438
  self._update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
1179
1439
  else:
1180
- logging.error(f"Other long edges cannot be identified: i={loop_i} of len={edge_i.sum()}")
1440
+ logging.error(f"t={self.t}, One long edge is not identified: i={loop_i} of length={edge_i.sum()} close to {len(unique_vertices)} vertices.")
1181
1441
  self.identified[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 1
1182
1442
 
1183
1443
  def clear_areas_of_1_or_2_unidentified_pixels(self):
@@ -1201,9 +1461,9 @@ class EdgeIdentification:
1201
1461
  else:
1202
1462
  self.pad_skeleton[y1:y2, x1:x2][self.unidentified_shapes[y1:y2, x1:x2] == pix_i] = 0
1203
1463
  if len(cutting_removal) > 0:
1204
- logging.error(f"These pixels break the skeleton when removed: {cutting_removal}")
1464
+ logging.error(f"t={self.t}, These pixels break the skeleton when removed: {cutting_removal}")
1205
1465
  if (self.identified > 0).sum() != self.pad_skeleton.sum():
1206
- logging.error(f"Proportion of identified pixels in the skeleton: {(self.identified > 0).sum() / self.pad_skeleton.sum()}")
1466
+ logging.error(f"t={self.t}, Proportion of identified pixels in the skeleton: {(self.identified > 0).sum() / self.pad_skeleton.sum()}")
1207
1467
  self.pad_distances *= self.pad_skeleton
1208
1468
  del self.identified
1209
1469
  del self.unidentified_stats
@@ -1236,7 +1496,7 @@ class EdgeIdentification:
1236
1496
  explored_connexions_per_vertex = 0 # the maximal edge number that can connect a vertex
1237
1497
  new_connexions = True
1238
1498
  while new_connexions and explored_connexions_per_vertex < 5 and np.any(cropped_non_tip_vertices) and np.any(starting_vertices_coord):
1239
- # print(new_connexions)
1499
+
1240
1500
  explored_connexions_per_vertex += 1
1241
1501
  # 1. Find the ith closest vertex to each focal vertex
1242
1502
  ending_vertices_coord, new_edge_lengths, new_edge_pix_coord = _find_closest_vertices(
@@ -1278,6 +1538,7 @@ class EdgeIdentification:
1278
1538
  self.new_level_vertices = ending_vertices_coord[found_connexion, :].copy()
1279
1539
  else:
1280
1540
  self.new_level_vertices = np.vstack((self.new_level_vertices, ending_vertices_coord[found_connexion, :]))
1541
+
1281
1542
  return cropped_skeleton, cropped_non_tip_vertices
1282
1543
 
1283
1544
  def _update_edge_data(self, start, end, new_edge_lengths: NDArray, new_edge_pix_coord: NDArray):
@@ -1346,7 +1607,7 @@ class EdgeIdentification:
1346
1607
  edge_j_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_labs[edge_j], :2]
1347
1608
  if np.array_equal(edge_i_coord, edge_j_coord):
1348
1609
  edges_to_remove.append(edge_labs[edge_j])
1349
-
1610
+ edges_to_remove = np.unique(edges_to_remove)
1350
1611
  for edge in edges_to_remove:
1351
1612
  edge_bool = self.edges_labels[:, 0] != edge
1352
1613
  self.edges_labels = self.edges_labels[edge_bool, :]
@@ -1369,22 +1630,25 @@ class EdgeIdentification:
1369
1630
  edge_names = [self.edges_labels[edge_indices[0], 0], self.edges_labels[edge_indices[1], 0]]
1370
1631
  v_names = np.concatenate((self.edges_labels[edge_indices[0], 1:], self.edges_labels[edge_indices[1], 1:]))
1371
1632
  v_names = v_names[v_names != vertex2]
1372
- kept_edge = int(self.edge_lengths[edge_indices[1]] >= self.edge_lengths[edge_indices[0]])
1373
-
1374
- # Rename the removed edge by the kept edge name in pix_coord:
1375
- self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_names[1 - kept_edge], 2] = edge_names[kept_edge]
1376
- # Add the removed edge length to the kept edge length
1377
- self.edge_lengths[self.edges_labels[:, 0] == edge_names[kept_edge]] += self.edge_lengths[self.edges_labels[:, 0] == edge_names[1 - kept_edge]]
1378
- # Remove the corresponding edge length from the list
1379
- self.edge_lengths = self.edge_lengths[self.edges_labels[:, 0] != edge_names[1 - kept_edge]]
1380
- # Rename the vertex of the kept edge in edges_labels
1381
- self.edges_labels[self.edges_labels[:, 0] == edge_names[kept_edge], 1:] = v_names[1 - kept_edge], v_names[kept_edge]
1382
- # Remove the removed edge from the edges_labels array
1383
- self.edges_labels = self.edges_labels[self.edges_labels[:, 0] != edge_names[1 - kept_edge], :]
1384
-
1385
- vY, vX = np.nonzero(self.numbered_vertices == vertex2)
1386
- v_idx = np.nonzero(np.all(self.non_tip_vertices == [vY[0], vX[0]], axis=1))
1387
- self.non_tip_vertices = np.delete(self.non_tip_vertices, v_idx, axis=0)
1633
+ if len(v_names) > 0: # Otherwise it's a vertex between a normal edge and a loop
1634
+ kept_edge = int(self.edge_lengths[edge_indices[1]] >= self.edge_lengths[edge_indices[0]])
1635
+ # Rename the removed edge by the kept edge name in pix_coord:
1636
+ self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_names[1 - kept_edge], 2] = edge_names[kept_edge]
1637
+ # Add the removed edge length to the kept edge length (minus 2, corresponding to the removed vertex)
1638
+ self.edge_lengths[self.edges_labels[:, 0] == edge_names[kept_edge]] += self.edge_lengths[self.edges_labels[:, 0] == edge_names[1 - kept_edge]] - 1
1639
+ # Remove the corresponding edge length from the list
1640
+ self.edge_lengths = self.edge_lengths[self.edges_labels[:, 0] != edge_names[1 - kept_edge]]
1641
+ # Rename the vertex of the kept edge in edges_labels
1642
+ self.edges_labels[self.edges_labels[:, 0] == edge_names[kept_edge], 1:] = v_names[1 - kept_edge], v_names[kept_edge]
1643
+ # Remove the removed edge from the edges_labels array
1644
+ self.edges_labels = self.edges_labels[self.edges_labels[:, 0] != edge_names[1 - kept_edge], :]
1645
+ # vY, vX = np.nonzero(self.numbered_vertices == vertex2)
1646
+ # v_idx = np.nonzero(np.all(self.non_tip_vertices == [vY[0], vX[0]], axis=1))
1647
+ vY, vX = self.vertex_index_map[vertex2]
1648
+ v_idx = np.nonzero(np.all(self.non_tip_vertices == [vY, vX], axis=1))
1649
+ self.non_tip_vertices = np.delete(self.non_tip_vertices, v_idx, axis=0)
1650
+ # Sometimes, clearing vertices connecting 2 edges can create edge duplicates, so:
1651
+ self.clear_edge_duplicates()
1388
1652
 
1389
1653
  def _remove_padding(self):
1390
1654
  """
@@ -1398,13 +1662,14 @@ class EdgeIdentification:
1398
1662
  self.edge_pix_coord[:, :2] -= 1
1399
1663
  self.tips_coord[:, :2] -= 1
1400
1664
  self.non_tip_vertices[:, :2] -= 1
1665
+ del self.vertex_index_map
1401
1666
  self.skeleton, self.distances, self.vertices = remove_padding(
1402
1667
  [self.pad_skeleton, self.pad_distances, self.numbered_vertices])
1403
1668
 
1404
1669
 
1405
- def make_vertex_table(self, origin_contours: NDArray[np.uint8]=None, growing_areas: NDArray[bool]=None):
1670
+ def make_vertex_table(self, origin_contours: NDArray[np.uint8]=None, growing_areas: NDArray=None):
1406
1671
  """
1407
- Generate a vertex table for the vertices.
1672
+ Generate a table for the vertices.
1408
1673
 
1409
1674
  This method constructs and returns a 2D NumPy array holding information
1410
1675
  about all vertices. Each row corresponds to one vertex identified either
@@ -1416,7 +1681,7 @@ class EdgeIdentification:
1416
1681
  ----------
1417
1682
  origin_contours : ndarray of uint8, optional
1418
1683
  Binary map to identify food vertices. Default is `None`.
1419
- growing_areas : ndarray of bool, optional
1684
+ growing_areas : ndarray, optional
1420
1685
  Binary map to identify growing regions. Default is `None`.
1421
1686
 
1422
1687
  Notes
@@ -1437,9 +1702,13 @@ class EdgeIdentification:
1437
1702
  food_vertices = food_vertices[food_vertices > 0]
1438
1703
  self.vertex_table[np.isin(self.vertex_table[:, 2], food_vertices), 4] = 1
1439
1704
 
1440
- if growing_areas is not None:
1441
- growing = self.vertex_table[:, 2] == np.unique(self.vertices * growing_areas)[1:]
1442
- self.vertex_table[growing, 4] = 2
1705
+ if growing_areas is not None and growing_areas.shape[1] > 0:
1706
+ # growing = np.unique(self.vertices * growing_areas)[1:]
1707
+ growing = np.unique(self.vertices[growing_areas[0], growing_areas[1]])
1708
+ growing = growing[growing > 0]
1709
+ if len(growing) > 0:
1710
+ growing = np.isin(self.vertex_table[:, 2], growing)
1711
+ self.vertex_table[growing, 4] = 2
1443
1712
 
1444
1713
  nb, sh, stats, cent = cv2.connectedComponentsWithStats((self.vertices > 0).astype(np.uint8))
1445
1714
  for i, v_i in enumerate(np.nonzero(stats[:, 4] > 1)[0][1:]):
@@ -1448,7 +1717,7 @@ class EdgeIdentification:
1448
1717
  self.vertex_table[self.vertex_table[:, 2] == v_lab, 5] = 1
1449
1718
 
1450
1719
 
1451
- def make_edge_table(self, greyscale: NDArray[np.uint8]):
1720
+ def make_edge_table(self, greyscale: NDArray[np.uint8], compute_BC: bool=False):
1452
1721
  """
1453
1722
  Generate edge table with length and average intensity information.
1454
1723
 
@@ -1462,47 +1731,58 @@ class EdgeIdentification:
1462
1731
  greyscale : ndarray of uint8
1463
1732
  Grayscale image.
1464
1733
  """
1734
+ if self.vertices is None:
1735
+ self._remove_padding()
1465
1736
  self.edge_table = np.zeros((self.edges_labels.shape[0], 7), float) # edge_id, vertex1, vertex2, length, average_width, int, BC
1466
1737
  self.edge_table[:, :3] = self.edges_labels[:, :]
1467
1738
  self.edge_table[:, 3] = self.edge_lengths
1468
1739
  for idx, edge_lab in enumerate(self.edges_labels[:, 0]):
1469
1740
  edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :]
1741
+ pix_widths = self.distances[edge_coord[:, 0], edge_coord[:, 1]]
1470
1742
  v_id = self.edges_labels[self.edges_labels[:, 0] == edge_lab, 1:][0]
1471
- v1_width, v2_width = self.distances[self.vertices == v_id[0]], self.distances[self.vertices == v_id[1]]
1472
- pix_widths = np.concatenate((v1_width, v2_width))
1473
- v1_int, v2_int = greyscale[self.vertices == v_id[0]], greyscale[self.vertices == v_id[1]]
1474
- pix_ints = np.concatenate((v1_int, v2_int))
1475
- if len(edge_coord) > 0:
1476
- pix_widths = np.append(pix_widths, self.distances[edge_coord[:, 0], edge_coord[:, 1]])
1477
- pix_ints = np.append(pix_widths, greyscale[edge_coord[:, 0], edge_coord[:, 1]])
1478
- self.edge_table[idx, 4] = pix_widths.mean()
1743
+ v1_coord = self.vertex_table[self.vertex_table[:, 2] == v_id[0], :2][0]#
1744
+ v2_coord = self.vertex_table[self.vertex_table[:, 2] == v_id[1], :2][0]#
1745
+ v1_width, v2_width = self.distances[v1_coord[0], v1_coord[1]], self.distances[v2_coord[0], v2_coord[1]]
1746
+
1747
+ if not np.isnan(v1_width):
1748
+ pix_widths = np.append(pix_widths, v1_width)
1749
+ if not np.isnan(v2_width):
1750
+ pix_widths = np.append(pix_widths, v2_width)
1751
+ if pix_widths.size > 0:
1752
+ self.edge_table[idx, 4] = pix_widths.mean()
1753
+ else:
1754
+ self.edge_table[idx, 4] = np.nan
1755
+ pix_ints = greyscale[edge_coord[:, 0], edge_coord[:, 1]]
1756
+ v1_int, v2_int = greyscale[v1_coord[0], v1_coord[1]], greyscale[v2_coord[0], v2_coord[1]]
1757
+ pix_ints = np.append(pix_ints, (v1_int, v2_int))
1479
1758
  self.edge_table[idx, 5] = pix_ints.mean()
1480
1759
 
1481
- G = nx.from_edgelist(self.edges_labels[:, 1:])
1482
- e_BC = nx.edge_betweenness_centrality(G, seed=0)
1483
- self.BC_net = np.zeros_like(self.distances)
1484
- for v, k in e_BC.items(): # v=(81, 80)
1485
- v1_coord = self.vertex_table[self.vertex_table[:, 2] == v[0], :2]
1486
- v2_coord = self.vertex_table[self.vertex_table[:, 2] == v[1], :2]
1487
- full_coord = np.concatenate((v1_coord, v2_coord))
1488
- edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
1489
- edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
1490
- edge_lab = np.unique(np.concatenate((edge_lab1, edge_lab2)))
1491
- if len(edge_lab) == 1:
1492
- edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :2]
1493
- full_coord = np.concatenate((full_coord, edge_coord))
1494
- self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1495
- self.edge_table[self.edge_table[:, 0] == edge_lab, 6] = k
1496
- elif len(edge_lab) > 1:
1497
- edge_coord0 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[0], :2]
1498
- for edge_i in range(len(edge_lab)): # edge_i=1
1499
- edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[edge_i], :2]
1500
- self.edge_table[self.edge_table[:, 0] == edge_lab[edge_i], 6] = k
1760
+ if compute_BC:
1761
+ G = nx.from_edgelist(self.edges_labels[:, 1:])
1762
+ e_BC = nx.edge_betweenness_centrality(G, seed=0)
1763
+ self.BC_net = np.zeros_like(self.distances)
1764
+ for v, k in e_BC.items(): # v=(81, 80)
1765
+ v1_coord = self.vertex_table[self.vertex_table[:, 2] == v[0], :2]
1766
+ v2_coord = self.vertex_table[self.vertex_table[:, 2] == v[1], :2]
1767
+ full_coord = np.concatenate((v1_coord, v2_coord))
1768
+ edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
1769
+ edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
1770
+ edge_lab = np.unique(np.concatenate((edge_lab1, edge_lab2)))
1771
+ if len(edge_lab) == 1:
1772
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :2]
1501
1773
  full_coord = np.concatenate((full_coord, edge_coord))
1502
1774
  self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1503
- if edge_i > 0 and np.array_equal(edge_coord0, edge_coord):
1504
- print(f"There still is two identical edges: {edge_lab} of len: {len(edge_coord)} linking v={v}")
1505
- break
1775
+ self.edge_table[self.edge_table[:, 0] == edge_lab, 6] = k
1776
+ elif len(edge_lab) > 1:
1777
+ edge_coord0 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[0], :2]
1778
+ for edge_i in range(len(edge_lab)): # edge_i=1
1779
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[edge_i], :2]
1780
+ self.edge_table[self.edge_table[:, 0] == edge_lab[edge_i], 6] = k
1781
+ full_coord = np.concatenate((full_coord, edge_coord))
1782
+ self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1783
+ if edge_i > 0 and np.array_equal(edge_coord0, edge_coord):
1784
+ logging.error(f"There still is two identical edges: {edge_lab} of len: {len(edge_coord)} linking vertices {v}")
1785
+ break
1506
1786
 
1507
1787
 
1508
1788
  def _find_closest_vertices(skeleton: NDArray[np.uint8], all_vertices_coord: NDArray, starting_vertices_coord: NDArray) -> Tuple[NDArray, NDArray[np.float64], NDArray[np.uint32]]:
@@ -1606,10 +1886,70 @@ def _find_closest_vertices(skeleton: NDArray[np.uint8], all_vertices_coord: NDAr
1606
1886
  else:
1607
1887
  edge_lengths[i] = np.nan
1608
1888
  i += 1
1609
-
1610
- edges_coords = np.array(all_path_pixels, dtype=np.uint32)
1889
+ if len(all_path_pixels) > 0:
1890
+ edges_coords = np.array(all_path_pixels, dtype=np.uint32)
1891
+ else:
1892
+ edges_coords = np.zeros((0, 3), dtype=np.uint32)
1611
1893
  return ending_vertices_coord, edge_lengths, edges_coords
1612
1894
 
1895
+ def ad_pad(arr: NDArray) -> NDArray:
1896
+ """
1897
+ Pad the input array with a single layer of zeros around its edges.
1898
+
1899
+ Parameters
1900
+ ----------
1901
+ arr : ndarray
1902
+ The input array to pad. Must be at least 2-dimensional.
1903
+
1904
+ Returns
1905
+ -------
1906
+ padded_arr : ndarray
1907
+ The output array with a single 0-padded layer around its edges.
1908
+
1909
+ Notes
1910
+ -----
1911
+ This function uses NumPy's `pad` with mode='constant' to add a single layer
1912
+ of zeros around the edges of the input array.
1913
+
1914
+ Examples
1915
+ --------
1916
+ >>> arr = np.array([[1, 2], [3, 4]])
1917
+ >>> ad_pad(arr)
1918
+ array([[0, 0, 0, 0],
1919
+ [0, 1, 2, 0],
1920
+ [0, 3, 4, 0],
1921
+ [0, 0, 0, 0]])
1922
+ """
1923
+ return np.pad(arr, [(1, ), (1, )], mode='constant')
1924
+
1925
+ def un_pad(arr: NDArray) -> NDArray:
1926
+ """
1927
+ Unpads a 2D NumPy array by removing the first and last row/column.
1928
+
1929
+ Extended Description
1930
+ --------------------
1931
+ Reduces the size of a 2D array by removing the outermost rows and columns.
1932
+ Useful for trimming boundaries added during padding operations.
1933
+
1934
+ Parameters
1935
+ ----------
1936
+ arr : ndarray
1937
+ Input 2D array to be unpadded. Shape (n,m) is expected.
1938
+
1939
+ Returns
1940
+ -------
1941
+ ndarray
1942
+ Unpadded 2D array with shape (n-2, m-2).
1943
+
1944
+ Examples
1945
+ --------
1946
+ >>> arr = np.array([[0, 0, 0],
1947
+ >>> [0, 4, 0],
1948
+ >>> [0, 0, 0]])
1949
+ >>> un_pad(arr)
1950
+ array([[4]])
1951
+ """
1952
+ return arr[1:-1, 1:-1]
1613
1953
 
1614
1954
  def add_padding(array_list: list) -> list:
1615
1955
  """
@@ -1637,7 +1977,7 @@ def add_padding(array_list: list) -> list:
1637
1977
  """
1638
1978
  new_array_list = []
1639
1979
  for arr in array_list:
1640
- new_array_list.append(np.pad(arr, [(1, ), (1, )], mode='constant'))
1980
+ new_array_list.append(ad_pad(arr))
1641
1981
  return new_array_list
1642
1982
 
1643
1983
 
@@ -1664,11 +2004,11 @@ def remove_padding(array_list: list) -> list:
1664
2004
  """
1665
2005
  new_array_list = []
1666
2006
  for arr in array_list:
1667
- new_array_list.append(arr[1:-1, 1:-1])
2007
+ new_array_list.append(un_pad(arr))
1668
2008
  return new_array_list
1669
2009
 
1670
2010
 
1671
- def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64], pad_origin: NDArray[np.uint8], pad_network: NDArray[np.uint8], pad_origin_centroid: NDArray) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
2011
+ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray[np.float64], pad_origin: NDArray[np.uint8], pad_network: NDArray[np.uint8], pad_origin_centroid: NDArray[np.int64]) -> Tuple[NDArray[np.uint8], NDArray[np.float64], NDArray[np.uint8]]:
1672
2012
  """
1673
2013
  Add a central contour to the skeleton while preserving distances.
1674
2014
 
@@ -1700,7 +2040,12 @@ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray
1700
2040
  # Make a hole at the skeleton center and find the vertices connecting it
1701
2041
  holed_skeleton = pad_skeleton * (1 - pad_origin)
1702
2042
  pad_vertices, pad_tips = get_vertices_and_tips_from_skeleton(pad_skeleton)
1703
- dil_origin = cv2.dilate(pad_origin, rhombus_55, iterations=20)
2043
+ ite = 20
2044
+ dil_origin = cv2.dilate(pad_origin, rhombus_55, iterations=ite)
2045
+ im_border = 1 - image_borders(pad_network.shape)
2046
+ while np.any(dil_origin * im_border):
2047
+ ite -= 1
2048
+ dil_origin = cv2.dilate(pad_origin, rhombus_55, iterations=ite)
1704
2049
  pad_vertices *= dil_origin
1705
2050
  connecting_pixels = np.transpose(np.array(np.nonzero(pad_vertices)))
1706
2051
 
@@ -1715,7 +2060,6 @@ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray
1715
2060
  new_edge_im[new_edge[:, 0], new_edge[:, 1]] = 1
1716
2061
  if not np.any(new_edge_im * pad_net_contour) and not np.any(new_edge_im * skeleton_without_vertices):# and not np.any(new_edge_im * holed_skeleton):
1717
2062
  with_central_contour[new_edge[:, 0], new_edge[:, 1]] = 1
1718
-
1719
2063
  # Add dilated contour
1720
2064
  pad_origin_contours = get_contours(pad_origin)
1721
2065
  with_central_contour *= (1 - pad_origin)
@@ -1725,7 +2069,10 @@ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray
1725
2069
 
1726
2070
  # show(dil_origin * with_central_contour)
1727
2071
  # Capture only the new contour and its neighborhood, get its skeleton and update the final skeleton
1728
- new_contour = cv2.morphologyEx(dil_origin * with_central_contour, cv2.MORPH_CLOSE, square_33)
2072
+ new_contour = dil_origin * with_central_contour
2073
+ dil_im_border = cv2.dilate(im_border, cross_33, iterations=1)
2074
+ if not np.any(new_contour * dil_im_border):
2075
+ new_contour = cv2.morphologyEx(new_contour, cv2.MORPH_CLOSE, square_33)
1729
2076
  new_contour = morphology.medial_axis(new_contour, rng=0).astype(np.uint8)
1730
2077
  new_skeleton = with_central_contour * (1 - dil_origin)
1731
2078
  new_skeleton += new_contour
@@ -1743,15 +2090,31 @@ def _add_central_contour(pad_skeleton: NDArray[np.uint8], pad_distances: NDArray
1743
2090
 
1744
2091
  dil_pad_origin_contours = cv2.dilate(pad_origin_contours, cross_33, iterations=1)
1745
2092
  new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
2093
+ new_pad_origin_contours += pad_origin
2094
+ new_pad_origin_contours[new_pad_origin_contours > 0] = 1
2095
+ new_pad_origin_contours = get_contours(new_pad_origin_contours)
1746
2096
  nb, sh = cv2.connectedComponents(new_pad_origin_contours)
1747
- while nb > 2:
1748
- dil_pad_origin_contours = cv2.dilate(dil_pad_origin_contours, cross_33, iterations=1)
1749
- new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
2097
+
2098
+ new_skeleton[new_pad_origin_contours > 0] = 1
2099
+ if nb > 2:
2100
+ new_pad_origin_contours = cv2.morphologyEx(new_pad_origin_contours, cv2.MORPH_CLOSE, square_33, iterations=1)
1750
2101
  nb, sh = cv2.connectedComponents(new_pad_origin_contours)
2102
+ current_contour_coord = np.argwhere(new_pad_origin_contours)
2103
+ cnv4, cnv8 = get_neighbor_comparisons(new_pad_origin_contours)
2104
+ potential_tips = get_terminations_and_their_connected_nodes(new_pad_origin_contours, cnv4, cnv8)
2105
+ tips_coord = np.transpose(np.array(np.nonzero(potential_tips)))
2106
+ ending_vertices_coord, edge_lengths, edges_coords = _find_closest_vertices(pad_origin, current_contour_coord, tips_coord)
2107
+ new_potentials = np.unique(edges_coords[:, 2])
2108
+ for new_pot in new_potentials:
2109
+ edge_coord = edges_coords[edges_coords[:, 2] == new_pot, :2]
2110
+ test = new_pad_origin_contours.copy()
2111
+ test[edge_coord[:, 0], edge_coord[:, 1]] = 1
2112
+ new_nb, sh = cv2.connectedComponents(test)
2113
+ if new_nb < nb:
2114
+ new_pad_origin_contours[edge_coord[:, 0], edge_coord[:, 1]] = 1
2115
+
1751
2116
  pad_origin_contours = new_pad_origin_contours
1752
- pad_distances[pad_origin_contours > 0] = np.nan # pad_distances.max() + 1 #
1753
- # test1 = ((pad_distances > 0) * (1 - new_skeleton)).sum() == 0
1754
- # test2 = ((1 - (pad_distances > 0)) * new_skeleton).sum() == 0
2117
+ pad_distances[pad_origin_contours > 0] = np.nan
1755
2118
 
1756
2119
  return new_skeleton, pad_distances, pad_origin_contours
1757
2120