cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1244 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains the class for detecting networks out of a grayscale image of Physarum polycephalum
4
+ """
5
+
6
+ # A completely different strategy could be to segment the network by layers of luminosity.
7
+ # The first layer captures the brightest veins and replace their pixels by background pixels.
8
+ # The second layer captures other veins, (make sure that they are connected to the first?) and replace their pixels too.
9
+ # During one layer segmentation, the algorithm make sure that all detected veins are as long as possible
10
+ # but less long than and connected to the previous.
11
+
12
+ import matplotlib.pyplot as plt
13
+ import cv2
14
+ import numpy as np
15
+ import pandas as pd
16
+
17
+ from cellects.image_analysis.morphological_operations import square_33, cross_33, cc, Ellipse, CompareNeighborsWithValue, get_contours, get_all_line_coordinates, get_line_points
18
+ from cellects.utils.utilitarian import remove_coordinates
19
+ from cellects.utils.formulas import *
20
+ from cellects.utils.load_display_save import *
21
+ from cellects.core.one_image_analysis import OneImageAnalysis
22
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination, rolling_window_segmentation, binary_quality_index
23
+ from numba.typed import Dict as TDict
24
+ from skimage import morphology
25
+ from skimage.segmentation import flood_fill
26
+ from skimage.filters import frangi, sato, threshold_otsu
27
+ from skimage.measure import perimeter
28
+ from collections import deque
29
+ from scipy.ndimage import distance_transform_edt
30
+ from scipy.spatial.distance import cdist
31
+ import networkx as nx
32
+
33
+ # 8-connectivity neighbors
34
+ neighbors_8 = [(-1, -1), (-1, 0), (-1, 1),
35
+ (0, -1), (0, 1),
36
+ (1, -1), (1, 0), (1, 1)]
37
+ neighbors_4 = [(-1, 0), (0, -1), (0, 1), (1, 0)]
38
+
39
+
40
+
41
+ class NetworkDetection:
42
+ def __init__(self, greyscale_image, possibly_filled_pixels, lighter_background, add_rolling_window=False, origin_to_add=None, best_result=None):
43
+ self.greyscale_image = greyscale_image
44
+ self.lighter_background = lighter_background
45
+ self.possibly_filled_pixels = possibly_filled_pixels
46
+ self.best_result = best_result
47
+ self.add_rolling_window = add_rolling_window
48
+ self.origin_to_add = origin_to_add
49
+ self.frangi_beta = 1.
50
+ self.frangi_gamma = 1.
51
+ self.black_ridges = True
52
+
53
+ def apply_frangi_variations(self):
54
+ """Apply 12 variations of Frangi filter"""
55
+ results = []
56
+
57
+ # Parameter variations for Frangi filter
58
+ frangi_sigmas = {
59
+ 's_fine_vessels': [0.75],
60
+ 'fine_vessels': [0.5, 1.0], # Very fine capillaries, thin fibers
61
+ 'small_vessels': [1.0, 2.0], # Small vessels, fine structures
62
+ 'multi_scale_medium': [1.0, 2.0, 3.0], # Standard multi-scale
63
+ 'ultra_fine': [0.3, 0.5, 0.8], # Ultra-fine structures
64
+ 'comprehensive': [0.5, 1.0, 2.0, 4.0], # Multi-scale
65
+ 'retinal_vessels': [1.0, 2.0, 4.0, 8.0], # Optimized for retinal imaging
66
+ 'microscopy': [0.5, 1.0, 1.5, 2.5], # Microscopy applications
67
+ 'broad_spectrum': [0.5, 1.5, 3.0, 6.0, 10.0]
68
+ }
69
+
70
+ for i, (key, sigmas) in enumerate(frangi_sigmas.items()):
71
+ # Apply Frangi filter
72
+ frangi_result = frangi(self.greyscale_image, sigmas=sigmas, beta=self.frangi_beta, gamma=self.frangi_gamma, black_ridges=self.black_ridges)
73
+
74
+ # Apply both thresholding methods
75
+ # Method 1: Otsu thresholding
76
+ thresh_otsu = threshold_otsu(frangi_result)
77
+ binary_otsu = frangi_result > thresh_otsu
78
+ quality_otsu = binary_quality_index(self.possibly_filled_pixels * binary_otsu)
79
+
80
+ # Method 2: Rolling window thresholding
81
+
82
+ # Store results
83
+ results.append({
84
+ 'method': f'f_{sigmas}_thresh',
85
+ 'binary': binary_otsu,
86
+ 'quality': quality_otsu,
87
+ 'filtered': frangi_result,
88
+ 'filter': f'frangi',
89
+ 'rolling_window': False,
90
+ 'sigmas': sigmas
91
+ })
92
+ # Method 2: Rolling window thresholding
93
+ if self.add_rolling_window:
94
+ binary_rolling = rolling_window_segmentation(frangi_result, self.possibly_filled_pixels, patch_size=(10, 10))
95
+ quality_rolling = binary_quality_index(binary_rolling)
96
+ results.append({
97
+ 'method': f'f_{sigmas}_roll',
98
+ 'binary': binary_rolling,
99
+ 'quality': quality_rolling,
100
+ 'filtered': frangi_result,
101
+ 'filter': f'frangi',
102
+ 'rolling_window': True,
103
+ 'sigmas': sigmas
104
+ })
105
+
106
+ return results
107
+
108
+
109
+ def apply_sato_variations(self):
110
+ """Apply 12 variations of sato filter"""
111
+ results = []
112
+
113
+ # Parameter variations for Frangi filter
114
+ sigmas_list = [
115
+ [1], [2], [3], [1, 2], [2, 3], [1, 3],
116
+ [1, 2, 3], [0.5, 1], [1, 4], [0.5, 2],
117
+ [2, 4], [1, 2, 4]
118
+ ]
119
+ sato_sigmas = {
120
+ 'super_small_tubes': [0.01, 0.05, 0.1, 0.15], #
121
+ 'small_tubes': [0.1, 0.2, 0.4, 0.8], #
122
+ 's_thick_ridges': [0.25, 0.75], # Thick ridges/tubes
123
+ 'small_multi_scale': [0.1, 0.2, 0.4, 0.8, 1.6], #
124
+ 'fine_ridges': [0.8, 1.5], # Fine ridge detection
125
+ 'medium_ridges': [1.5, 3.0], # Medium ridge structures
126
+ 'multi_scale_fine': [0.8, 1.5, 2.5], # Multi-scale fine detection
127
+ 'multi_scale_standard': [1.0, 2.5, 5.0], # Standard multi-scale
128
+ 'edge_enhanced': [0.5, 1.0, 2.0], # Edge-enhanced detection
129
+ 'noise_robust': [1.5, 2.5, 4.0], # Robust to noise
130
+ 'fingerprint': [1.0, 1.5, 2.0, 3.0], # Fingerprint ridge detection
131
+ 'geological': [2.0, 5.0, 10.0, 15.0] # Geological structures
132
+ }
133
+
134
+ for i, (key, sigmas) in enumerate(sato_sigmas.items()):
135
+ # Apply sato filter
136
+ sato_result = sato(self.greyscale_image, sigmas=sigmas, black_ridges=self.black_ridges, mode='reflect')
137
+
138
+ # Apply both thresholding methods
139
+ # Method 1: Otsu thresholding
140
+ thresh_otsu = threshold_otsu(sato_result)
141
+ binary_otsu = sato_result > thresh_otsu
142
+ quality_otsu = binary_quality_index(self.possibly_filled_pixels * binary_otsu)
143
+
144
+
145
+ # Store results
146
+ results.append({
147
+ 'method': f's_{sigmas}_thresh',
148
+ 'binary': binary_otsu,
149
+ 'quality': quality_otsu,
150
+ 'filtered': sato_result,
151
+ 'filter': f'sato',
152
+ 'rolling_window': False,
153
+ 'sigmas': sigmas
154
+ })
155
+
156
+ # Method 2: Rolling window thresholding
157
+ if self.add_rolling_window:
158
+ binary_rolling = rolling_window_segmentation(sato_result, self.possibly_filled_pixels, patch_size=(10, 10))
159
+ quality_rolling = binary_quality_index(binary_rolling)
160
+
161
+ results.append({
162
+ 'method': f's_{sigmas}_roll',
163
+ 'binary': binary_rolling,
164
+ 'quality': quality_rolling,
165
+ 'filtered': sato_result,
166
+ 'filter': f'sato',
167
+ 'rolling_window': True,
168
+ 'sigmas': sigmas
169
+ })
170
+
171
+ return results
172
+
173
+
174
+ def get_best_network_detection_method(self):
175
+ frangi_res = self.apply_frangi_variations()
176
+ sato_res = self.apply_sato_variations()
177
+ self.all_results = frangi_res + sato_res
178
+ self.quality_metrics = np.array([result['quality'] for result in self.all_results])
179
+ self.best_idx = np.argmax(self.quality_metrics)
180
+ self.best_result = self.all_results[self.best_idx]
181
+ self.incomplete_network = self.best_result['binary'] * self.possibly_filled_pixels
182
+
183
+
184
+ def detect_network(self):
185
+ if self.best_result['filter'] == 'frangi':
186
+ filtered_result = frangi(self.greyscale_image, sigmas=self.best_result['sigmas'], beta=self.frangi_beta, gamma=self.frangi_gamma, black_ridges=self.black_ridges)
187
+ else:
188
+ filtered_result = sato(self.greyscale_image, sigmas=self.best_result['sigmas'], black_ridges=self.black_ridges, mode='reflect')
189
+
190
+ if self.best_result['rolling_window']:
191
+ binary_image = rolling_window_segmentation(filtered_result, self.possibly_filled_pixels, patch_size=(10, 10))
192
+ else:
193
+ thresh_otsu = threshold_otsu(filtered_result)
194
+ binary_image = filtered_result > thresh_otsu
195
+ return binary_image
196
+
197
+ def change_greyscale(self, img, c_space_dict):
198
+ self.greyscale_image, g2 = generate_color_space_combination(img, list(c_space_dict.keys()), c_space_dict)
199
+
200
+ def get_best_pseudopod_detection_method(self):
201
+ # This adds a lot of noise in the network instead of only detecting pseudopods
202
+ pad_skeleton, pad_distances = morphology.medial_axis(self.incomplete_network, return_distance=True, rng=0)
203
+ pad_skeleton = pad_skeleton.astype(np.uint8)
204
+
205
+ unique_distances = np.unique(pad_distances)
206
+ counter = 1
207
+ while pad_skeleton.sum() > 1000:
208
+ counter += 1
209
+ width_threshold = unique_distances[counter]
210
+ pad_skeleton[pad_distances < width_threshold] = 0
211
+ self.best_result['width_threshold'] = width_threshold
212
+ potential_tips = np.nonzero(pad_skeleton)
213
+ if self.lighter_background:
214
+ max_tip_int = self.greyscale_image[potential_tips].max()
215
+ low_pixels = self.greyscale_image <= max_tip_int # mean_tip_int# max_tip_int
216
+ else:
217
+ min_tip_int = self.greyscale_image[potential_tips].min()
218
+ high_pixels = self.greyscale_image >= min_tip_int # mean_tip_int
219
+
220
+ not_in_cell = 1 - self.possibly_filled_pixels
221
+ error_threshold = not_in_cell.sum() * 0.01
222
+ tolerances = np.arange(150, 0, - 1)
223
+ for t_i, tolerance in enumerate(tolerances):
224
+ potential_network = self.incomplete_network.copy()
225
+ for y, x in zip(potential_tips[0],
226
+ potential_tips[1]): # y, x =potential_tips[0][0], potential_tips[1][0]
227
+ filled = flood_fill(image=self.greyscale_image, seed_point=(y, x), new_value=255, tolerance=tolerance)
228
+ filled = filled == 255
229
+ if (filled * not_in_cell).sum() > error_threshold:
230
+ break
231
+ if self.lighter_background:
232
+ filled *= low_pixels
233
+ else:
234
+ filled *= high_pixels
235
+ potential_network[filled] = 1
236
+ # show(potential_network)
237
+ if not np.array_equal(potential_network, self.incomplete_network):
238
+ break
239
+ self.best_result['tolerance'] = tolerance
240
+
241
+ complete_network = potential_network * self.possibly_filled_pixels
242
+ complete_network = cv2.morphologyEx(complete_network, cv2.MORPH_CLOSE, cross_33)
243
+ self.complete_network, stats, centers = cc(complete_network)
244
+ self.complete_network[self.complete_network > 1] = 0
245
+
246
+
247
+ def detect_pseudopods(self):
248
+ pad_skeleton, pad_distances = morphology.medial_axis(self.incomplete_network, return_distance=True, rng=0)
249
+ pad_skeleton = pad_skeleton.astype(np.uint8)
250
+ pad_skeleton[pad_distances < self.best_result['width_threshold']] = 0
251
+ potential_tips = np.nonzero(pad_skeleton)
252
+ if self.lighter_background:
253
+ max_tip_int = self.greyscale_image[potential_tips].max()
254
+ else:
255
+ min_tip_int = self.greyscale_image[potential_tips].min()
256
+
257
+ complete_network = self.incomplete_network.copy()
258
+ for y, x in zip(potential_tips[0],
259
+ potential_tips[1]): # y, x =potential_tips[0][0], potential_tips[1][0]
260
+ filled = flood_fill(image=self.greyscale_image, seed_point=(y, x), new_value=255, tolerance=self.best_result['tolerance'])
261
+ filled = filled == 255
262
+ if self.lighter_background:
263
+ filled *= self.greyscale_image <= max_tip_int # mean_tip_int# max_tip_int
264
+ else:
265
+ filled *= self.greyscale_image >= min_tip_int # mean_tip_int
266
+ complete_network[filled] = 1
267
+
268
+ # Check that the current parameters do not produce images full of ones
269
+ # If so, update the width_threshold and tolerance parameters
270
+ not_in_cell = 1 - self.possibly_filled_pixels
271
+ error_threshold = not_in_cell.sum() * 0.1
272
+ if (complete_network * not_in_cell).sum() > error_threshold:
273
+ self.get_best_network_detection_method()
274
+ else:
275
+ complete_network = complete_network * self.possibly_filled_pixels
276
+ complete_network = cv2.morphologyEx(complete_network, cv2.MORPH_CLOSE, cross_33)
277
+ self.complete_network, stats, centers = cc(complete_network)
278
+ self.complete_network[self.complete_network > 1] = 0
279
+
280
+
281
+ def get_skeleton_and_widths(pad_network, pad_origin=None, pad_origin_centroid=None):
282
+ pad_skeleton, pad_distances = morphology.medial_axis(pad_network, return_distance=True, rng=0)
283
+ pad_skeleton = pad_skeleton.astype(np.uint8)
284
+ if pad_origin is not None:
285
+ pad_skeleton, pad_distances, pad_origin_contours = add_central_contour(pad_skeleton, pad_distances, pad_origin, pad_network, pad_origin_centroid)
286
+ else:
287
+ pad_origin_contours = None
288
+ # a = pad_skeleton[821:828, 643:649]
289
+ # new_skeleton2[821:828, 643:649]
290
+ pad_skeleton, pad_distances = remove_small_loops(pad_skeleton, pad_distances)
291
+
292
+ # width = 10
293
+ # pad_skeleton[pad_distances > width] = 0
294
+ pad_skeleton = keep_one_connected_component(pad_skeleton)
295
+ pad_distances *= pad_skeleton
296
+ # print(pad_skeleton.sum())
297
+ return pad_skeleton, pad_distances, pad_origin_contours
298
+ # width = 10
299
+ # skel_size = skeleton.sum()
300
+ # while width > 0 and skel_size > skeleton.sum() * 0.75:
301
+ # width -= 1
302
+ # skeleton = skeleton.copy()
303
+ # skeleton[distances > width] = 0
304
+ # # Only keep the largest connected component
305
+ # skeleton, stats, _ = cc(skeleton)
306
+ # skeleton[skeleton > 1] = 0
307
+ # skel_size = skeleton.sum()
308
+ # skeleton = pad_skeleton.copy()
309
+ # Remove the origin
310
+
311
+ def remove_small_loops(pad_skeleton, pad_distances=None):
312
+ """
313
+ New version:
314
+ New rule to add: when there is the pattern
315
+ [[x, 1, x],
316
+ [1, 0, 1],
317
+ [x, 1, x]]
318
+ Add 1 at the center when all other 1 are 3 connected
319
+ Otherwise, just remove the 1 that are 2 connected
320
+
321
+ Previous version:
322
+ When zeros are surrounded by 4-connected ones and only contain 0 on their diagonal, replace 1 by 0
323
+ and put 1 in the center
324
+
325
+
326
+
327
+ :param pad_skeleton:
328
+ :return:
329
+ """
330
+ cnv4, cnv8 = get_neighbor_comparisons(pad_skeleton)
331
+ # potential_tips = get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8)
332
+
333
+ cnv_diag_0 = CompareNeighborsWithValue(pad_skeleton, 0)
334
+ cnv_diag_0.is_equal(0, and_itself=True)
335
+
336
+ cnv4_false = CompareNeighborsWithValue(pad_skeleton, 4)
337
+ cnv4_false.is_equal(1, and_itself=False)
338
+
339
+ loop_centers = np.logical_and((cnv4_false.equal_neighbor_nb == 4), cnv_diag_0.equal_neighbor_nb > 2).astype(np.uint8)
340
+
341
+ surrounding = cv2.dilate(loop_centers, kernel=square_33)
342
+ surrounding -= loop_centers
343
+ surrounding = surrounding * cnv8.equal_neighbor_nb
344
+
345
+ # Every 2 can be replaced by 0 if the loop center becomes 1
346
+ filled_loops = pad_skeleton.copy()
347
+ filled_loops[surrounding == 2] = 0
348
+ filled_loops += loop_centers
349
+
350
+ new_pad_skeleton = morphology.skeletonize(filled_loops, method='lee')
351
+
352
+ # Put the new pixels in pad_distances
353
+ new_pixels = new_pad_skeleton * (1 - pad_skeleton)
354
+ pad_skeleton = new_pad_skeleton.astype(np.uint8)
355
+ if pad_distances is None:
356
+ return pad_skeleton
357
+ else:
358
+ pad_distances[np.nonzero(new_pixels)] = np.nan # 2. # Put nearest value instead?
359
+ pad_distances *= pad_skeleton
360
+ # for yi, xi in zip(npY, npX): # yi, xi = npY[0], npX[0]
361
+ # distances[yi, xi] = 2.
362
+ return pad_skeleton, pad_distances
363
+
364
+
365
+ def get_neighbor_comparisons(pad_skeleton):
366
+ cnv4 = CompareNeighborsWithValue(pad_skeleton, 4)
367
+ cnv4.is_equal(1, and_itself=True)
368
+ cnv8 = CompareNeighborsWithValue(pad_skeleton, 8)
369
+ cnv8.is_equal(1, and_itself=True)
370
+ return cnv4, cnv8
371
+
372
+
373
+ def keep_one_connected_component(pad_skeleton):
374
+ """
375
+ """
376
+ nb_pad_skeleton, stats, _ = cc(pad_skeleton)
377
+ pad_skeleton[nb_pad_skeleton > 1] = 0
378
+ return pad_skeleton
379
+
380
+
381
+ def keep_components_larger_than_one(pad_skeleton):
382
+ """
383
+ """
384
+ nb_pad_skeleton, stats, _ = cc(pad_skeleton)
385
+ for i in np.nonzero(stats[:, 4] == 1)[0]:
386
+ pad_skeleton[nb_pad_skeleton == i] = 0
387
+ # nb, nb_pad_skeleton = cv2.connectedComponents(pad_skeleton)
388
+ # pad_skeleton[nb_pad_skeleton > 1] = 0
389
+ return pad_skeleton
390
+
391
+
392
+ def get_vertices_and_tips_from_skeleton(pad_skeleton):
393
+ """
394
+ Find the vertices from a skeleton according to the following rules:
395
+ - Network terminations at the border are nodes
396
+ - The 4-connected nodes have priority over 8-connected nodes
397
+ :return:
398
+ """
399
+ cnv4, cnv8 = get_neighbor_comparisons(pad_skeleton)
400
+ potential_tips = get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8)
401
+ pad_vertices, pad_tips = get_inner_vertices(pad_skeleton, potential_tips, cnv4, cnv8)
402
+ return pad_vertices, pad_tips
403
+
404
+
405
+ def get_terminations_and_their_connected_nodes(pad_skeleton, cnv4, cnv8):
406
+ # All pixels having only one neighbor, and containing the value 1, are terminations for sure
407
+ potential_tips = np.zeros(pad_skeleton.shape, dtype=np.uint8)
408
+ potential_tips[cnv8.equal_neighbor_nb == 1] = 1
409
+ # Add more terminations using 4-connectivity
410
+ # If a pixel is 1 (in 4) and all its neighbors are neighbors (in 4), it is a termination
411
+
412
+ coord1_4 = cnv4.equal_neighbor_nb == 1
413
+ if np.any(coord1_4):
414
+ coord1_4 = np.nonzero(coord1_4)
415
+ for y1, x1 in zip(coord1_4[0], coord1_4[1]): # y1, x1 = 3,5
416
+ # If, in the neighborhood of the 1 (in 4), all (in 8) its neighbors are 4-connected together, and none of them are terminations, the 1 is a termination
417
+ is_4neigh = cnv4.equal_neighbor_nb[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] != 0
418
+ all_4_connected = pad_skeleton[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] == is_4neigh
419
+ is_not_term = 1 - potential_tips[y1, x1]
420
+ if np.all(all_4_connected * is_not_term):
421
+ is_4neigh[1, 1] = 0
422
+ is_4neigh = np.pad(is_4neigh, [(1,), (1,)], mode='constant')
423
+ cnv_4con = CompareNeighborsWithValue(is_4neigh, 4)
424
+ cnv_4con.is_equal(1, and_itself=True)
425
+ all_connected = (is_4neigh.sum() - (cnv_4con.equal_neighbor_nb > 0).sum()) == 0
426
+ # If they are connected, it can be a termination
427
+ if all_connected:
428
+ # If its closest neighbor is above 3 (in 8), this one is also a node
429
+ is_closest_above_3 = cnv8.equal_neighbor_nb[(y1 - 1):(y1 + 2), (x1 - 1):(x1 + 2)] * cross_33 > 3
430
+ if np.any(is_closest_above_3):
431
+ Y, X = np.nonzero(is_closest_above_3)
432
+ Y += y1 - 1
433
+ X += x1 - 1
434
+ potential_tips[Y, X] = 1
435
+ potential_tips[y1, x1] = 1
436
+ return potential_tips
437
+
438
+
439
+ def get_inner_vertices(pad_skeleton, potential_tips, cnv4, cnv8): # potential_tips=pad_tips
440
+ """
441
+ 1. Find connected vertices using the number of 8-connected neighbors
442
+ 2. The ones having 3 neighbors:
443
+ - are connected when in the neighborhood of the 3, there is at least a 2 (in 8) that is 0 (in 4), and not a termination
444
+ but this, only when it does not create an empty cross.... To do
445
+ """
446
+
447
+ # Initiate the vertices final matrix as a copy of the potential_tips
448
+ pad_vertices = deepcopy(potential_tips)
449
+ for neighbor_nb in [8, 7, 6, 5, 4]:
450
+ # All pixels having neighbor_nb neighbor are potential vertices
451
+ potential_vertices = np.zeros(potential_tips.shape, dtype=np.uint8)
452
+
453
+ potential_vertices[cnv8.equal_neighbor_nb == neighbor_nb] = 1
454
+ # remove the false intersections that are a neighbor of a previously detected intersection
455
+ # Dilate vertices to make sure that no neighbors of the current potential vertices are already vertices.
456
+ dilated_previous_intersections = cv2.dilate(pad_vertices, cross_33, iterations=1)
457
+ potential_vertices *= (1 - dilated_previous_intersections)
458
+ pad_vertices[np.nonzero(potential_vertices)] = 1
459
+
460
+ # Having 3 neighbors is ambiguous
461
+ with_3_neighbors = cnv8.equal_neighbor_nb == 3
462
+ if np.any(with_3_neighbors):
463
+ # We compare 8-connections with 4-connections
464
+ # We loop over all 3 connected
465
+ coord_3 = np.nonzero(with_3_neighbors)
466
+ for y3, x3 in zip(coord_3[0], coord_3[1]): # y3, x3 = 3,7
467
+ # If, in the neighborhood of the 3, there is at least a 2 (in 8) that is 0 (in 4), and not a termination: the 3 is a node
468
+ has_2_8neigh = cnv8.equal_neighbor_nb[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)] > 0 # 1
469
+ has_2_8neigh_without_focal = has_2_8neigh.copy()
470
+ has_2_8neigh_without_focal[1, 1] = 0
471
+ node_but_not_term = pad_vertices[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)] * (1 - potential_tips[(y3 - 1):(y3 + 2), (x3 - 1):(x3 + 2)])
472
+ all_are_node_but_not_term = np.array_equal(has_2_8neigh_without_focal, node_but_not_term)
473
+ if np.any(has_2_8neigh * (1 - all_are_node_but_not_term)):
474
+ # At least 3 of the 8neigh are not connected:
475
+ has_2_8neigh_without_focal = np.pad(has_2_8neigh_without_focal, [(1,), (1,)], mode='constant')
476
+ cnv_8con = CompareNeighborsWithValue(has_2_8neigh_without_focal, 4)
477
+ cnv_8con.is_equal(1, and_itself=True)
478
+ disconnected_nb = has_2_8neigh_without_focal.sum() - (cnv_8con.equal_neighbor_nb > 0).sum()
479
+ if disconnected_nb > 2:
480
+ pad_vertices[y3, x3] = 1
481
+ # Now there may be too many vertices:
482
+ # - Those that are 4-connected:
483
+ nb, sh, st, ce = cv2.connectedComponentsWithStats(pad_vertices, connectivity=4)
484
+ problematic_vertices = np.nonzero(st[:, 4] > 1)[0][1:]
485
+ for prob_v in problematic_vertices:
486
+ vertices_group = sh == prob_v
487
+ # If there is a tip in the group, do
488
+ if np.any(potential_tips[vertices_group]):
489
+ # Change the most connected one from tip to vertex
490
+ curr_neighbor_nb = cnv8.equal_neighbor_nb * vertices_group
491
+ wrong_tip = np.nonzero(curr_neighbor_nb == curr_neighbor_nb.max())
492
+ potential_tips[wrong_tip] = 0
493
+ else:
494
+ # otherwise do:
495
+ # Find the most 4-connected one, and check whether
496
+ # its 4 connected neighbors have 1 or more other connexions
497
+ # 1. # Find the most 4-connected one:
498
+ vertices_group_4 = cnv4.equal_neighbor_nb * vertices_group
499
+ max_con = vertices_group_4.max()
500
+ most_con = np.nonzero(vertices_group_4 == max_con)
501
+ # 2. Check its 4-connected neighbors and remove those having only 1 other 8-connexion
502
+ skel_copy = pad_skeleton.copy()
503
+ skel_copy[most_con] = 0
504
+ skel_copy[most_con[0] - 1, most_con[1]] = 0
505
+ skel_copy[most_con[0] + 1, most_con[1]] = 0
506
+ skel_copy[most_con[0], most_con[1] - 1] = 0
507
+ skel_copy[most_con[0], most_con[1] + 1] = 0
508
+ sub_cnv8 = CompareNeighborsWithValue(skel_copy, 8)
509
+ sub_cnv8.is_equal(1, and_itself=False)
510
+ # Remove those having
511
+ v_to_remove = ((vertices_group_4 > 0) * sub_cnv8.equal_neighbor_nb) == 1
512
+ pad_vertices[v_to_remove] = 0
513
+
514
+ # Other vertices to remove:
515
+ # - Those that are forming a cross with 0 at the center while the skeleton contains 1
516
+ cnv4_false = CompareNeighborsWithValue(pad_vertices, 4)
517
+ cnv4_false.is_equal(1, and_itself=False)
518
+ cross_vertices = cnv4_false.equal_neighbor_nb == 4
519
+ wrong_cross_vertices = cross_vertices * pad_skeleton
520
+ if wrong_cross_vertices.any():
521
+ pad_vertices[np.nonzero(wrong_cross_vertices)] = 1
522
+ cross_fix = cv2.dilate(wrong_cross_vertices, kernel=cross_33, iterations=1)
523
+ # Remove the 4-connected vertices that have no more than 4 8-connected neighbors
524
+ # i.e. the three on the side of the surrounded 0 and only one on edge on the other side
525
+ cross_fix = ((cnv8.equal_neighbor_nb * cross_fix) == 4) * (1 - wrong_cross_vertices)
526
+ pad_vertices *= (1 - cross_fix)
527
+ return pad_vertices, potential_tips
528
+
529
+
530
+ def get_branches_and_tips_coord(pad_vertices, pad_tips):
531
+ pad_branches = pad_vertices - pad_tips
532
+ branch_v_coord = np.transpose(np.array(np.nonzero(pad_branches)))
533
+ tips_coord = np.transpose(np.array(np.nonzero(pad_tips)))
534
+ return branch_v_coord, tips_coord
535
+
536
+
537
+ class EdgeIdentification:
538
+ def __init__(self, pad_skeleton):
539
+ self.pad_skeleton = pad_skeleton
540
+ self.remaining_vertices = None
541
+ self.vertices = None
542
+ self.growing_vertices = None
543
+ self.im_shape = pad_skeleton.shape
544
+
545
+ def get_vertices_and_tips_coord(self):
546
+ pad_vertices, pad_tips = get_vertices_and_tips_from_skeleton(self.pad_skeleton)
547
+ self.non_tip_vertices, self.tips_coord = get_branches_and_tips_coord(pad_vertices, pad_tips)
548
+
549
+ def get_tipped_edges(self):
550
+ self.pad_skeleton = keep_one_connected_component(self.pad_skeleton)
551
+ self.vertices_branching_tips, self.edge_lengths, self.edge_pix_coord = find_closest_vertices(self.pad_skeleton,
552
+ self.non_tip_vertices,
553
+ self.tips_coord[:, :2])
554
+
555
+ def remove_tipped_edge_smaller_than_branch_width(self, pad_distances):
556
+ """
557
+ Problem: when an edge is removed and its branching vertex is not anymore a vertex,
558
+ if another tipped edge was connected to this vertex, its length, pixel coord, and branching v are wrong.
559
+ Solution, re-run self.get_tipped_edges() after that
560
+
561
+ a=pad_skeleton.copy()
562
+ a[self.tips_coord[:, 0],self.tips_coord[:, 1]] = 2
563
+ aa=a[632:645, 638:651]
564
+ Yt,Xt=632+10,638+1
565
+ np.nonzero(np.all(self.tips_coord[:, :2] == [t1Y, t1X], axis=1))
566
+ np.nonzero(np.all(self.tips_coord[:, :2] == [t2Y, t2X], axis=1))
567
+ i = 3153
568
+ """
569
+ self.pad_distances = pad_distances
570
+ # Identify edges that are smaller than the width of the branch it is attached to
571
+ tipped_edges_to_remove = np.zeros(self.edge_lengths.shape[0], dtype=bool)
572
+ # connecting_vertices_to_remove = np.zeros(self.vertices_branching_tips.shape[0], dtype=bool)
573
+ branches_to_remove = np.zeros(self.non_tip_vertices.shape[0], dtype=bool)
574
+ new_edge_pix_coord = []
575
+ remaining_tipped_edges_nb = 0
576
+ for i in range(len(self.edge_lengths)): # i = 3142 #1096 # 974 # 222
577
+ Y, X = self.vertices_branching_tips[i, 0], self.vertices_branching_tips[i, 1]
578
+ edge_bool = self.edge_pix_coord[:, 2] == i + 1
579
+ eY, eX = self.edge_pix_coord[edge_bool, 0], self.edge_pix_coord[edge_bool, 1]
580
+ if np.nanmax(pad_distances[(Y - 1): (Y + 2), (X - 1): (X + 2)]) >= self.edge_lengths[i]:
581
+ tipped_edges_to_remove[i] = True
582
+ # Remove the edge
583
+ self.pad_skeleton[eY, eX] = 0
584
+ # Remove the tip
585
+ self.pad_skeleton[self.tips_coord[i, 0], self.tips_coord[i, 1]] = 0
586
+
587
+ # Remove the coordinates corresponding to that edge
588
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, edge_bool, 0)
589
+
590
+ # check whether the connecting vertex remains a vertex of not
591
+ pad_sub_skeleton = np.pad(self.pad_skeleton[(Y - 2): (Y + 3), (X - 2): (X + 3)], [(1,), (1,)],
592
+ mode='constant')
593
+ sub_vertices, sub_tips = get_vertices_and_tips_from_skeleton(pad_sub_skeleton)
594
+ # If the vertex does not connect at least 3 edges anymore, remove its vertex label
595
+ if sub_vertices[3, 3] == 0:
596
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
597
+ branches_to_remove[vertex_to_remove] = True
598
+ # If that pixel became a tip connected to another vertex remove it from the skeleton
599
+ if sub_tips[3, 3]:
600
+ if sub_vertices[2:5, 2:5]. np.sum() > 1:
601
+ self.pad_skeleton[Y, X] = 0
602
+ self.edge_pix_coord = np.delete(self.edge_pix_coord, np.all(self.edge_pix_coord[:, :2] == [Y, X], axis=1), 0)
603
+ vertex_to_remove = np.nonzero(np.logical_and(self.non_tip_vertices[:, 0] == Y, self.non_tip_vertices[:, 1] == X))[0]
604
+ branches_to_remove[vertex_to_remove] = True
605
+ else:
606
+ remaining_tipped_edges_nb += 1
607
+ new_edge_pix_coord.append(np.stack((eY, eX, np.repeat(remaining_tipped_edges_nb, len(eY))), axis=1))
608
+
609
+ # Check that excedent connected components are 1 pixel size, if so:
610
+ # It means that they were neighbors to removed tips and not necessary for the skeleton
611
+ nb, sh = cv2.connectedComponents(self.pad_skeleton)
612
+ if nb > 2:
613
+ for i in range(2, nb):
614
+ excedent = sh == i
615
+ if (excedent).sum() == 1:
616
+ self.pad_skeleton[excedent] = 0
617
+ # else:
618
+ # print("More than one pixel area excedent components exists")
619
+
620
+ # Remove in distances the pixels removed in skeleton:
621
+ self.pad_distances *= self.pad_skeleton
622
+
623
+ # update edge_pix_coord
624
+ self.edge_pix_coord = np.vstack(new_edge_pix_coord)
625
+
626
+ # Remove tips connected to very small edges
627
+ self.tips_coord = np.delete(self.tips_coord, tipped_edges_to_remove, 0)
628
+ # Add corresponding edge names
629
+ self.tips_coord = np.hstack((self.tips_coord, np.arange(1, len(self.tips_coord) + 1)[:, None]))
630
+
631
+ # Within all branching (non-tip) vertices, keep those that did not lose their vertex status because of the edge removal
632
+ self.non_tip_vertices = np.delete(self.non_tip_vertices, branches_to_remove, 0)
633
+
634
+ # Get the branching vertices who kept their typped edge
635
+ self.vertices_branching_tips = np.delete(self.vertices_branching_tips, tipped_edges_to_remove, 0)
636
+
637
+ # Within all branching (non-tip) vertices, keep those that do not connect a tipped edge.
638
+ v_branching_tips_in_branching_v = find_common_coord(self.non_tip_vertices, self.vertices_branching_tips[:, :2])
639
+ self.remaining_vertices = np.delete(self.non_tip_vertices, v_branching_tips_in_branching_v, 0)
640
+ ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.remaining_vertices))
641
+
642
+ # tips = self.tips_coord
643
+ # branching_any_edge = self.non_tip_vertices
644
+ # branching_typped_edges = self.vertices_branching_tips
645
+ # branching_no_typped_edges = self.remaining_vertices
646
+
647
+ self.get_vertices_and_tips_coord()
648
+ self.get_tipped_edges()
649
+
650
+ def label_tipped_edges_and_their_vertices(self):
651
+ self.tip_number = self.tips_coord.shape[0]
652
+
653
+ # Stack vertex coordinates in that order: 1. Tips, 2. Vertices branching tips, 3. All remaining vertices
654
+ ordered_v_coord = np.vstack((self.tips_coord[:, :2], self.vertices_branching_tips[:, :2], self.non_tip_vertices))
655
+ ordered_v_coord = np.unique(ordered_v_coord, axis=0)
656
+
657
+ # Create arrays to store edges and vertices labels
658
+ self.numbered_vertices = np.zeros(self.im_shape, dtype=np.uint32)
659
+ self.numbered_vertices[ordered_v_coord[:, 0], ordered_v_coord[:, 1]] = np.arange(1, ordered_v_coord.shape[0] + 1)
660
+ self.vertices = None
661
+
662
+ # Name edges from 1 to the number of edges connecting tips and set the vertices labels from all tips to their connected vertices:
663
+ self.edges_labels = np.zeros((self.tip_number, 3), dtype=np.uint32)
664
+ # edge label:
665
+ self.edges_labels[:, 0] = np.arange(self.tip_number) + 1
666
+ # tip label:
667
+ self.edges_labels[:, 1] = self.numbered_vertices[self.tips_coord[:, 0], self.tips_coord[:, 1]]
668
+ # vertex branching tip label:
669
+ self.edges_labels[:, 2] = self.numbered_vertices[self.vertices_branching_tips[:, 0], self.vertices_branching_tips[:, 1]]
670
+
671
+ # Remove duplicates in vertices_branching_tips
672
+ self.vertices_branching_tips = np.unique(self.vertices_branching_tips[:, :2], axis=0)
673
+
674
+ def identify_all_other_edges(self):
675
+ # I. Identify edges connected to connected vertices and their own connexions:
676
+ # II. Identify all remaining edges
677
+ self.obsn = np.zeros_like(self.numbered_vertices) # DEBUG
678
+ self.obsn[np.nonzero(self.pad_skeleton)] = 1 # DEBUG
679
+ self.obsn[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 2 # DEBUG
680
+ self.obi = 2 # DEBUG
681
+
682
+ # I.1. Identify edges connected to touching vertices:
683
+ # First, create another version of these arrays, where we remove every already detected edge and their tips
684
+ cropped_skeleton = self.pad_skeleton.copy()
685
+ cropped_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 0
686
+ cropped_skeleton[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
687
+
688
+ # non_tip_vertices does not need to be updated yet, because it only contains verified branching vertices
689
+ cropped_non_tip_vertices = self.non_tip_vertices.copy()
690
+
691
+ self.new_level_vertices = None
692
+ # Fix the vertex_to_vertex_connexion problem
693
+ # The problem with vertex_to_vertex_connexion is that since they are not separated by zeros,
694
+ # they always atract each other instead of exploring other paths.
695
+ # To fix this, we loop over each vertex group to
696
+ # 1. Add one edge per inter-vertex connexion inside the group
697
+ # 2. Remove all except one, and loop as many time as necessary.
698
+ # Inside that second loop, we explore and identify every edge nearby.
699
+ # Find every vertex_to_vertex_connexion
700
+ v_grp_nb, v_grp_lab, v_grp_stats, vgc = cv2.connectedComponentsWithStats(
701
+ (self.numbered_vertices > 0).astype(np.uint8), connectivity=8)
702
+ max_v_nb = np.max(v_grp_stats[1:, 4])
703
+ cropped_skeleton_list = []
704
+ starting_vertices_list = []
705
+ for v_nb in range(2, max_v_nb + 1):
706
+ labels = np.nonzero(v_grp_stats[:, 4] == v_nb)[0]
707
+ coord_list = []
708
+ for lab in labels: # lab=labels[0]
709
+ coord_list.append(np.nonzero(v_grp_lab == lab))
710
+ for iter in range(v_nb):
711
+ for lab_ in range(labels.shape[0]): # lab=labels[0]
712
+ cs = cropped_skeleton.copy()
713
+ sv = []
714
+ v_c = coord_list[lab_]
715
+ # Save the current coordinate in the starting vertices array of this iteration
716
+ sv.append([v_c[0][iter], v_c[1][iter]])
717
+ # Remove one vertex coordinate to keep it from cs
718
+ v_y, v_x = np.delete(v_c[0], iter), np.delete(v_c[1], iter)
719
+ cs[v_y, v_x] = 0
720
+ cropped_skeleton_list.append(cs)
721
+ starting_vertices_list.append(np.array(sv))
722
+
723
+ for cropped_skeleton, starting_vertices in zip(cropped_skeleton_list, starting_vertices_list):
724
+ _, _ = self.identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices)
725
+
726
+ # I.2. Identify the connexions between connected vertices:
727
+ all_connected_vertices = np.nonzero(v_grp_stats[:, 4] > 1)[0][1:]
728
+ all_con_v_im = np.zeros_like(cropped_skeleton)
729
+ for v_group in all_connected_vertices:
730
+ all_con_v_im[v_grp_lab == v_group] = 1
731
+ cropped_skeleton = all_con_v_im
732
+ vertex_groups_coord = np.transpose(np.array(np.nonzero(cropped_skeleton)))
733
+ # cropped_non_tip_vertices, starting_vertices_coord = vertex_groups_coord, vertex_groups_coord
734
+ _, _ = self.identify_edges_connecting_a_vertex_list(cropped_skeleton, vertex_groups_coord, vertex_groups_coord)
735
+ # self.edges_labels
736
+
737
+ # II/ Identify all remaining edges
738
+ if self.new_level_vertices is not None:
739
+ starting_vertices_coord = np.vstack((self.new_level_vertices[:, :2], self.vertices_branching_tips))
740
+ starting_vertices_coord = np.unique(starting_vertices_coord, axis=0)
741
+ else:
742
+ # We start from the vertices connecting tips
743
+ starting_vertices_coord = self.vertices_branching_tips.copy()
744
+
745
+ # Remove the detected edges from cropped_skeleton:
746
+ cropped_skeleton = self.pad_skeleton.copy()
747
+ cropped_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 0
748
+ cropped_skeleton[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
749
+ cropped_skeleton[vertex_groups_coord[:, 0], vertex_groups_coord[:, 1]] = 0
750
+
751
+ # Reinitialize cropped_non_tip_vertices to browse all vertices except tips and groups
752
+ cropped_non_tip_vertices = self.non_tip_vertices.copy()
753
+ cropped_non_tip_vertices = remove_coordinates(cropped_non_tip_vertices, vertex_groups_coord)
754
+ remaining_v = cropped_non_tip_vertices.shape[0] + 1
755
+ while remaining_v > cropped_non_tip_vertices.shape[0]:
756
+ remaining_v = cropped_non_tip_vertices.shape[0]
757
+ cropped_skeleton, cropped_non_tip_vertices = self.identify_edges_connecting_a_vertex_list(cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord)
758
+
759
+ if self.new_level_vertices is None:
760
+ break
761
+ else:
762
+ starting_vertices_coord = np.unique(self.new_level_vertices[:, :2], axis=0)
763
+
764
+ identified_skeleton = np.zeros_like(self.numbered_vertices)
765
+ identified_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 1
766
+ identified_skeleton[self.non_tip_vertices[:, 0], self.non_tip_vertices[:, 1]] = 1
767
+ identified_skeleton[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 1
768
+ not_identified = (1 - identified_skeleton) * self.pad_skeleton
769
+
770
+ # Find out the remaining non-identified pixels
771
+ nb, sh, st, ce = cv2.connectedComponentsWithStats(not_identified.astype(np.uint8))
772
+
773
+ # Handle the cases were edges are loops over only one vertex
774
+ looping_edges = np.nonzero(st[:, 4 ] > 2)[0][1:]
775
+ for loop_i in looping_edges: # loop_i = looping_edges[0]
776
+ edge_i = (sh == loop_i).astype(np.uint8)
777
+ dil_edge_i = cv2.dilate(edge_i, square_33)
778
+ unique_vertices_im = self.numbered_vertices.copy()
779
+ unique_vertices_im[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 0
780
+ unique_vertices_im = dil_edge_i * unique_vertices_im
781
+ unique_vertices = np.unique(unique_vertices_im)
782
+ unique_vertices = unique_vertices[unique_vertices > 0]
783
+ if len(unique_vertices) == 1:
784
+ start, end = unique_vertices[0], unique_vertices[0]
785
+ new_edge_lengths = edge_i.sum()
786
+ new_edge_pix_coord = np.transpose(np.vstack((np.nonzero(edge_i))))
787
+ new_edge_pix_coord = np.hstack((new_edge_pix_coord, np.repeat(1, new_edge_pix_coord.shape[0])[:, None])) # np.arange(1, new_edge_pix_coord.shape[0] + 1)[:, None]))
788
+ self.update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
789
+ self.obsn[new_edge_pix_coord[:, 0], new_edge_pix_coord[:, 1]] -= 10 # DEBUG
790
+ else:
791
+ print(f"Other long edges cannot be identified: i={loop_i} of len={edge_i.sum()}")
792
+ identified_skeleton[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = 1
793
+
794
+ # Check whether the 1 or 2 pixel size non-identified areas can be removed without breaking the skel
795
+ one_pix = np.nonzero(st[:, 4 ] <= 2)[0] # == 1)[0]
796
+ cutting_removal = []
797
+ for pix_i in one_pix: #pix_i=one_pix[0]
798
+ skel_copy = self.pad_skeleton.copy()
799
+ y1, y2, x1, x2 = st[pix_i, 1], st[pix_i, 1] + st[pix_i, 3], st[pix_i, 0], st[pix_i, 0] + st[pix_i, 2]
800
+ skel_copy[y1:y2, x1:x2][sh[y1:y2, x1:x2] == pix_i] = 0
801
+ nb1, sh1 = cv2.connectedComponents(skel_copy.astype(np.uint8), connectivity=8)
802
+ if nb1 > 2:
803
+ cutting_removal.append(pix_i)
804
+ else:
805
+ self.pad_skeleton[y1:y2, x1:x2][sh[y1:y2, x1:x2] == pix_i] = 0
806
+ if len(cutting_removal) > 0:
807
+ print(f"These pixels break the skeleton when removed: {cutting_removal}")
808
+ # print(100 * (identified_skeleton > 0).sum() / self.pad_skeleton.sum())
809
+ self.pad_distances *= self.pad_skeleton
810
+
811
+
812
+ def identify_edges_connecting_a_vertex_list(self, cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord):
813
+ explored_connexions_per_vertex = 0 # the maximal edge number that can connect a vertex
814
+ new_connexions = True
815
+ while new_connexions and explored_connexions_per_vertex < 5 and np.any(cropped_non_tip_vertices) and np.any(starting_vertices_coord):
816
+ # print(new_connexions)
817
+ explored_connexions_per_vertex += 1
818
+ # 1. Find the ith closest vertex to each focal vertex
819
+ ending_vertices_coord, new_edge_lengths, new_edge_pix_coord = find_closest_vertices(
820
+ cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord)
821
+ if np.isnan(new_edge_lengths).sum() + (new_edge_lengths == 0).sum() == new_edge_lengths.shape[0]:
822
+ new_connexions = False
823
+ else:
824
+ # In new_edge_lengths, zeros are duplicates and nan are lone vertices (from starting_vertices_coord)
825
+ # Find out which starting_vertices_coord should be kept and which one should be used to save edges
826
+ no_new_connexion = np.isnan(new_edge_lengths)
827
+ no_found_connexion = np.logical_or(no_new_connexion, new_edge_lengths == 0)
828
+ found_connexion = np.logical_not(no_found_connexion)
829
+
830
+ # Any vertex_to_vertex_connexions must be analyzed only once. We remove them with the non-connectable vertices
831
+ vertex_to_vertex_connexions = new_edge_lengths == 1
832
+
833
+ # Save edge data
834
+ start = self.numbered_vertices[
835
+ starting_vertices_coord[found_connexion, 0], starting_vertices_coord[found_connexion, 1]]
836
+ end = self.numbered_vertices[
837
+ ending_vertices_coord[found_connexion, 0], ending_vertices_coord[found_connexion, 1]]
838
+ new_edge_lengths = new_edge_lengths[found_connexion]
839
+ self.update_edge_data(start, end, new_edge_lengths, new_edge_pix_coord)
840
+
841
+ no_new_connexion = np.logical_or(no_new_connexion, vertex_to_vertex_connexions)
842
+ vertices_to_crop = starting_vertices_coord[no_new_connexion, :]
843
+
844
+ # Remove non-connectable and connected_vertices from:
845
+ cropped_non_tip_vertices = remove_coordinates(cropped_non_tip_vertices, vertices_to_crop)
846
+ starting_vertices_coord = remove_coordinates(starting_vertices_coord, vertices_to_crop)
847
+
848
+ if new_edge_pix_coord.shape[0] > 0:
849
+ # Update cropped_skeleton to not identify each edge more than once
850
+ cropped_skeleton[new_edge_pix_coord[:, 0], new_edge_pix_coord[:, 1]] = 0
851
+ self.obi += 1 # DEBUG
852
+ self.obsn[new_edge_pix_coord[:, 0], new_edge_pix_coord[:, 1]] = self.obi # DEBUG
853
+
854
+ # And the starting vertices that cannot connect anymore
855
+ cropped_skeleton[vertices_to_crop[:, 0], vertices_to_crop[:, 1]] = 0
856
+
857
+ if self.new_level_vertices is None:
858
+ self.new_level_vertices = ending_vertices_coord[found_connexion, :].copy()
859
+ else:
860
+ self.new_level_vertices = np.vstack((self.new_level_vertices, ending_vertices_coord[found_connexion, :]))
861
+ return cropped_skeleton, cropped_non_tip_vertices
862
+
863
+ def update_edge_data(self, start, end, new_edge_lengths, new_edge_pix_coord):
864
+ if isinstance(start, np.ndarray):
865
+ end_idx = len(start)
866
+ self.edge_lengths = np.concatenate((self.edge_lengths, new_edge_lengths))
867
+ else:
868
+ end_idx = 1
869
+ self.edge_lengths = np.append(self.edge_lengths, new_edge_lengths)
870
+ start_idx = self.edges_labels.shape[0]
871
+ new_edges = np.zeros((end_idx, 3), dtype=np.uint32)
872
+ new_edges[:, 0] = np.arange(start_idx, start_idx + end_idx) + 1 # edge label
873
+ new_edges[:, 1] = start # starting vertex label
874
+ new_edges[:, 2] = end # ending vertex label
875
+ self.edges_labels = np.vstack((self.edges_labels, new_edges))
876
+ # Add the new edge coord
877
+ if new_edge_pix_coord.shape[0] > 0:
878
+ # Add the new edge pixel coord
879
+ new_edge_pix_coord[:, 2] += start_idx
880
+ self.edge_pix_coord = np.vstack((self.edge_pix_coord, new_edge_pix_coord))
881
+
882
+ def remove_edge_duplicates(self):
883
+ edges_to_remove = []
884
+ duplicates = find_duplicates_coord(np.vstack((self.edges_labels[:, 1:], self.edges_labels[:, :0:-1])))
885
+ duplicates = np.logical_or(duplicates[:len(duplicates)//2], duplicates[len(duplicates)//2:])
886
+ for v in self.edges_labels[duplicates, 1:]: #v = self.edges_labels[duplicates, 1:][4]
887
+ edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
888
+ edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
889
+ edge_labs = np.unique(np.concatenate((edge_lab1, edge_lab2)))
890
+ for edge_i in range(0, len(edge_labs) - 1): # edge_i = 0
891
+ edge_i_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_labs[edge_i], :2]
892
+ for edge_j in range(edge_i + 1, len(edge_labs)): # edge_j = 1
893
+ edge_j_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_labs[edge_j], :2]
894
+ if np.array_equal(edge_i_coord, edge_j_coord):
895
+ edges_to_remove.append(edge_labs[edge_j])
896
+
897
+ for edge in edges_to_remove:
898
+ edge_bool = self.edges_labels[:, 0] != edge
899
+ self.edges_labels = self.edges_labels[edge_bool, :]
900
+ self.edge_lengths = self.edge_lengths[edge_bool]
901
+ self.edge_pix_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] != edge, :]
902
+
903
+
904
+ def remove_vertices_connecting_2_edges(self):
905
+ """
906
+ Find all vertices connecting 2 edges
907
+ For each:
908
+ If it is a tip, NOW THIS CANNOT NOT BE A TIP
909
+ remove one connexion
910
+ else
911
+ Get the 2 edges id and their 2nd vertex
912
+ Make them have the same edge_id and update (with edge and vertex):
913
+ self.edges_labels, self.edge_lengths, self.edge_pix_coord
914
+ Remove the vertex in
915
+ self.numbered_vertices, self.non_tip_vertices
916
+ """
917
+ v_labels, v_counts = np.unique(self.edges_labels[:, 1:], return_counts=True)
918
+ vertices2 = v_labels[v_counts == 2]
919
+ for vertex2 in vertices2: # vertex2 = vertices2[0]
920
+ edge_indices = np.nonzero(self.edges_labels[:, 1:] == vertex2)[0]
921
+ edge_names = [self.edges_labels[edge_indices[0], 0], self.edges_labels[edge_indices[1], 0]]
922
+ v_names = np.concatenate((self.edges_labels[edge_indices[0], 1:], self.edges_labels[edge_indices[1], 1:]))
923
+ v_names = v_names[v_names != vertex2]
924
+ kept_edge = int(self.edge_lengths[edge_indices[1]] >= self.edge_lengths[edge_indices[0]])
925
+
926
+ # Rename the removed edge by the kept edge name in pix_coord:
927
+ self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_names[1 - kept_edge], 2] = edge_names[kept_edge]
928
+ # Add the removed edge length to the kept edge length
929
+ self.edge_lengths[self.edges_labels[:, 0] == edge_names[kept_edge]] += self.edge_lengths[self.edges_labels[:, 0] == edge_names[1 - kept_edge]]
930
+ # Remove the corresponding edge length from the list
931
+ self.edge_lengths = self.edge_lengths[self.edges_labels[:, 0] != edge_names[1 - kept_edge]]
932
+ # Rename the vertex of the kept edge in edges_labels
933
+ self.edges_labels[self.edges_labels[:, 0] == edge_names[kept_edge], 1:] = v_names[1 - kept_edge], v_names[kept_edge]
934
+ # Remove the removed edge from the edges_labels array
935
+ self.edges_labels = self.edges_labels[self.edges_labels[:, 0] != edge_names[1 - kept_edge], :]
936
+
937
+ vY, vX = np.nonzero(self.numbered_vertices == vertex2)
938
+ v_idx = np.nonzero(np.all(self.non_tip_vertices == [vY[0], vX[0]], axis=1))
939
+ self.non_tip_vertices = np.delete(self.non_tip_vertices, v_idx, axis=0)
940
+
941
+ def remove_padding(self):
942
+ self.edge_pix_coord[:, :2] -= 1
943
+ self.tips_coord[:, :2] -= 1
944
+ self.non_tip_vertices[:, :2] -= 1
945
+ self.skeleton, self.distances, self.vertices = remove_padding(
946
+ [self.pad_skeleton, self.pad_distances, self.numbered_vertices])
947
+
948
+ def find_growing_vertices(self, origin_contours=None, origin_centeroid=None):
949
+ if origin_contours is not None:
950
+ if self.vertices is None:
951
+ self.remove_padding()
952
+ edge_widths_copy = self.distances.copy()
953
+ edge_widths_copy[origin_contours > 0] = 0
954
+ pot_growing_skel = edge_widths_copy > np.quantile(edge_widths_copy[edge_widths_copy > 0], .9)
955
+ dist_from_center = np.ones(self.vertices.shape, dtype=np.float64)
956
+ dist_from_center[origin_centeroid[0], origin_centeroid[1]] = 0
957
+ dist_from_center = distance_transform_edt(dist_from_center)
958
+ dist_from_center *= pot_growing_skel
959
+ growing_skel = dist_from_center > np.quantile(dist_from_center[dist_from_center > 0], .7)
960
+ self.growing_vertices = np.unique(self.vertices * growing_skel)[1:]
961
+
962
+
963
+ def make_vertex_table(self, origin_contours=None):
964
+ """
965
+ Gives coordinates, labels, and natures of each vertex
966
+ The nature can be:
967
+ - a tip or a branching vertex
968
+ - if it is network/food/growing
969
+ """
970
+ if self.vertices is None:
971
+ self.remove_padding()
972
+ self.vertex_table = np.zeros((self.tips_coord.shape[0] + self.non_tip_vertices.shape[0], 6), dtype=self.vertices.dtype)
973
+ self.vertex_table[:self.tips_coord.shape[0], :2] = self.tips_coord
974
+ self.vertex_table[self.tips_coord.shape[0]:, :2] = self.non_tip_vertices
975
+ self.vertex_table[:self.tips_coord.shape[0], 2] = self.vertices[self.tips_coord[:, 0], self.tips_coord[:, 1]]
976
+ self.vertex_table[self.tips_coord.shape[0]:, 2] = self.vertices[self.non_tip_vertices[:, 0], self.non_tip_vertices[:, 1]]
977
+ self.vertex_table[:self.tips_coord.shape[0], 3] = 1
978
+ if origin_contours is not None:
979
+ food_vertices = self.vertices[origin_contours > 0]
980
+ food_vertices = food_vertices[food_vertices > 0]
981
+ self.vertex_table[np.isin(self.vertex_table[:, 2], food_vertices), 4] = 1
982
+
983
+ if self.growing_vertices is not None:
984
+ self.vertex_table[:, 4] = 0
985
+ growing = np.all(self.vertex_table[:, 2] == self.growing_vertices, axis=1)
986
+ self.vertex_table[growing, 4] = 2
987
+
988
+ nb, sh, stats, cent = cv2.connectedComponentsWithStats((self.vertices > 0).astype(np.uint8))
989
+ for i, v_i in enumerate(np.nonzero(stats[:, 4] > 1)[0][1:]):
990
+ v_labs = self.vertices[sh == v_i]
991
+ for v_lab in v_labs: # v_lab = v_labs[0]
992
+ self.vertex_table[self.vertex_table[:, 2] == v_lab, 5] = 1
993
+
994
+
995
+ def make_edge_table(self, greyscale):
996
+ self.edge_table = np.zeros((self.edges_labels.shape[0], 7), float) # edge_id, vertex1, vertex2, length, average_width, int, BC
997
+ self.edge_table[:, :3] = self.edges_labels[:, :]
998
+ self.edge_table[:, 3] = self.edge_lengths
999
+ for idx, edge_lab in enumerate(self.edges_labels[:, 0]):
1000
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :]
1001
+ v_id = self.edges_labels[self.edges_labels[:, 0] == edge_lab, 1:][0]
1002
+ v1_width, v2_width = self.distances[self.vertices == v_id[0]], self.distances[self.vertices == v_id[1]]
1003
+ pix_widths = np.concatenate((v1_width, v2_width))
1004
+ v1_int, v2_int = greyscale[self.vertices == v_id[0]], greyscale[self.vertices == v_id[1]]
1005
+ pix_ints = np.concatenate((v1_int, v2_int))
1006
+ if len(edge_coord) > 0:
1007
+ pix_widths = np.append(pix_widths, self.distances[edge_coord[:, 0], edge_coord[:, 1]])
1008
+ pix_ints = np.append(pix_widths, greyscale[edge_coord[:, 0], edge_coord[:, 1]])
1009
+ self.edge_table[idx, 4] = pix_widths.mean()
1010
+ self.edge_table[idx, 5] = pix_ints.mean()
1011
+
1012
+ G = nx.from_edgelist(self.edges_labels[:, 1:])
1013
+ e_BC = nx.edge_betweenness_centrality(G, seed=0)
1014
+ self.BC_net = np.zeros_like(self.distances)
1015
+ for v, k in e_BC.items(): # v=(81, 80)
1016
+ v1_coord = self.vertex_table[self.vertex_table[:, 2] == v[0], :2]
1017
+ v2_coord = self.vertex_table[self.vertex_table[:, 2] == v[1], :2]
1018
+ full_coord = np.concatenate((v1_coord, v2_coord))
1019
+ edge_lab1 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v[::-1], axis=1), 0]
1020
+ edge_lab2 = self.edges_labels[np.all(self.edges_labels[:, 1:] == v, axis=1), 0]
1021
+ edge_lab = np.unique(np.concatenate((edge_lab1, edge_lab2)))
1022
+ if len(edge_lab) == 1:
1023
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab, :2]
1024
+ full_coord = np.concatenate((full_coord, edge_coord))
1025
+ self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1026
+ self.edge_table[self.edge_table[:, 0] == edge_lab, 6] = k
1027
+ elif len(edge_lab) > 1:
1028
+ edge_coord0 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[0], :2]
1029
+ for edge_i in range(len(edge_lab)): # edge_i=1
1030
+ edge_coord = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[edge_i], :2]
1031
+ self.edge_table[self.edge_table[:, 0] == edge_lab[edge_i], 6] = k
1032
+ full_coord = np.concatenate((full_coord, edge_coord))
1033
+ self.BC_net[full_coord[:, 0], full_coord[:, 1]] = k
1034
+ if edge_i > 0 and np.array_equal(edge_coord0, edge_coord):
1035
+ print(f"There still is two identical edges: {edge_lab} of len: {len(edge_coord)} linking v={v}")
1036
+ break
1037
+ #
1038
+ # edge_coord1 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[0], :2]
1039
+ # edge_coord2 = self.edge_pix_coord[self.edge_pix_coord[:, 2] == edge_lab[1], :2]
1040
+ # self.edge_table[edge_lab[0] - 1, 3] = k
1041
+ # self.edge_table[edge_lab[1] - 1, 3] = k
1042
+ # full_coord1 = np.concatenate((full_coord, edge_coord1))
1043
+ # full_coord2 = np.concatenate((full_coord, edge_coord2))
1044
+ # self.BC_net[full_coord1[:, 0], full_coord1[:, 1]] = k
1045
+ # self.BC_net[full_coord2[:, 0], full_coord2[:, 1]] = k
1046
+ # else:
1047
+ # print(f"len(edge_lab)={len(edge_lab)}")
1048
+ # break
1049
+
1050
+
1051
+ def make_graph(self, cell_img, computed_network, pathway, i):
1052
+ if self.vertices is None:
1053
+ self.remove_padding()
1054
+ self.graph = np.zeros_like(self.distances)
1055
+ self.graph[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]] = self.distances[self.edge_pix_coord[:, 0], self.edge_pix_coord[:, 1]]
1056
+ self.graph = bracket_to_uint8_image_contrast(self.graph)
1057
+ cell_contours = get_contours(cell_img)
1058
+ net_contours = get_contours(computed_network)
1059
+
1060
+ self.graph[np.nonzero(cell_contours)] = 9
1061
+ self.graph[np.nonzero(net_contours)] = 255
1062
+ vertices = np.zeros_like(self.graph)
1063
+ vertices[self.non_tip_vertices[:, 0], self.non_tip_vertices[:, 1]] = 1
1064
+ vertices = cv2.dilate(vertices, cross_33)
1065
+ self.graph[vertices > 0] = 240
1066
+ self.graph[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 140
1067
+ food = self.vertex_table[self.vertex_table[:, 4] == 1]
1068
+ self.graph[food[:, 0], food[:, 1]] = 190
1069
+ self.graph[self.tips_coord[:, 0], self.tips_coord[:, 1]] = 140
1070
+ sizes = self.graph.shape[0] / 50, self.graph.shape[1] / 50
1071
+ fig = plt.figure(figsize=(sizes[0], sizes[1]))
1072
+ plt.imshow(self.graph, cmap='nipy_spectral')
1073
+ fig.tight_layout()
1074
+ fig.show()
1075
+ fig.savefig(pathway / f"contour network with medial axis{i}.png", dpi=1500)
1076
+ plt.close()
1077
+
1078
+
1079
+ def find_closest_vertices(skeleton, all_vertices_coord, starting_vertices_coord):
1080
+ """
1081
+ skeleton, all_vertices_coord, starting_vertices_coord = cropped_skeleton, cropped_non_tip_vertices, starting_vertices_coord
1082
+ skeleton, all_vertices_coord, starting_vertices_coord = self.pad_skeleton, self.non_tip_vertices, self.tips_coord
1083
+ skeleton, all_vertices_coord, starting_vertices_coord = pad_skeleton, non_tip_vertices, starting_vertices_coord
1084
+ For each vertex, find the nearest branching vertex along the skeleton.
1085
+
1086
+ UPDATE TO MAKE:
1087
+ - vertex pixels should not be included in all_path_pixels, maybe added afterward?
1088
+ - When the edge only contains two vertices, it takes a length of 1 and is saved in starting_vertices_coord
1089
+ - update remove_tipped_edge_smaller_than_branch_width to cope with that change
1090
+
1091
+
1092
+ Parameters:
1093
+ - skeleton (2D np.ndarray): Binary skeleton image (0 and 1)
1094
+ - all_vertices_coord (tuple): (array_y, array_x) coordinates of the first vertices
1095
+ - starting_vertices_coord (tuple): (array_y, array_x) coordinates of the second vertices
1096
+
1097
+ Returns:
1098
+ - dict: keys are tip coordinates, values are (branch_vertex_coords, geodesic_distance)
1099
+ """
1100
+
1101
+ # Convert branching vertices to set for quick lookup
1102
+ branch_set = set(zip(all_vertices_coord[:, 0], all_vertices_coord[:, 1]))
1103
+ n = starting_vertices_coord.shape[0]
1104
+
1105
+ ending_vertices_coord = np.zeros((n, 3), np.uint32) # next_vertex_y, next_vertex_x, edge_id
1106
+ edge_lengths = np.zeros(n, np.float64)
1107
+ all_path_pixels = [] # Will hold rows of (y, x, edge_id)
1108
+ i = 0
1109
+ edge_i = 0
1110
+ for tip_y, tip_x in zip(starting_vertices_coord[:, 0], starting_vertices_coord[:, 1]):
1111
+ visited = np.zeros_like(skeleton, dtype=bool)
1112
+ parent = {}
1113
+ q = deque()
1114
+
1115
+ q.append((tip_y, tip_x))
1116
+ visited[tip_y, tip_x] = True
1117
+ parent[(tip_y, tip_x)] = None
1118
+ found_vertex = None
1119
+
1120
+ while q:
1121
+ r, c = q.popleft()
1122
+
1123
+ # # Check for branching vertex (ignore the starting tip itself)
1124
+ if (r, c) in branch_set and (r, c) != (tip_y, tip_x):
1125
+ # if (r, c) in branch_set and (r, c) not in v_set:
1126
+ found_vertex = (r, c)
1127
+ break # stop at first encountered (shortest due to BFS)
1128
+
1129
+ for dr, dc in neighbors_8:
1130
+ nr, nc = r + dr, c + dc
1131
+ if (0 <= nr < skeleton.shape[0] and 0 <= nc < skeleton.shape[1] and
1132
+ not visited[nr, nc] and skeleton[nr, nc] > 0): # This does not work: and (nr, nc) not in v_set
1133
+ visited[nr, nc] = True
1134
+ parent[(nr, nc)] = (r, c)
1135
+ q.append((nr, nc))
1136
+ if found_vertex:
1137
+ fy, fx = found_vertex
1138
+ # Do not add the connection if has already been detected from the other way:
1139
+ from_start = np.all(starting_vertices_coord[:i, :] == [fy, fx], axis=1).any()
1140
+ to_end = np.all(ending_vertices_coord[:i, :2] == [tip_y, tip_x], axis=1).any()
1141
+ if not from_start or not to_end:
1142
+ edge_i += 1
1143
+ ending_vertices_coord[i, :] = [fy, fx, i + 1]
1144
+ # Reconstruct path from found_vertex back to tip
1145
+ path = []
1146
+ current = (fy, fx)
1147
+ while current is not None:
1148
+ path.append((i, *current))
1149
+ current = parent[current]
1150
+
1151
+ # path.reverse() # So path goes from starting tip to found vertex
1152
+
1153
+ for _, y, x in path[1:-1]: # Exclude no vertices from the edge pixels path
1154
+ all_path_pixels.append((y, x, edge_i))
1155
+
1156
+ edge_lengths[i] = len(path) - 1 # exclude one node for length computation
1157
+
1158
+ else:
1159
+ edge_lengths[i] = np.nan
1160
+ i += 1
1161
+
1162
+ edges_coords = np.array(all_path_pixels, dtype=np.uint32)
1163
+ return ending_vertices_coord, edge_lengths, edges_coords
1164
+
1165
+
1166
+ def add_padding(array_list):
1167
+ new_array_list = []
1168
+ for arr in array_list:
1169
+ new_array_list.append(np.pad(arr, [(1, ), (1, )], mode='constant'))
1170
+ return new_array_list
1171
+
1172
+
1173
+ def remove_padding(array_list):
1174
+ new_array_list = []
1175
+ for arr in array_list:
1176
+ new_array_list.append(arr[1:-1, 1:-1])
1177
+ return new_array_list
1178
+
1179
+
1180
+ def add_central_contour(pad_skeleton, pad_distances, pad_origin, pad_network, pad_origin_centroid):
1181
+ """
1182
+
1183
+ """
1184
+ pad_net_contour = get_contours(pad_network)
1185
+
1186
+ # Make a hole at the skeleton center and find the vertices connecting it
1187
+ holed_skeleton = pad_skeleton * (1 - pad_origin)
1188
+ pad_vertices, pad_tips = get_vertices_and_tips_from_skeleton(pad_skeleton)
1189
+ dil_origin = cv2.dilate(pad_origin, Ellipse((5, 5)).create().astype(np.uint8), iterations=20)
1190
+ pad_vertices *= dil_origin
1191
+ connecting_pixels = np.transpose(np.array(np.nonzero(pad_vertices)))
1192
+
1193
+ skeleton_without_vertices = pad_skeleton.copy()
1194
+ skeleton_without_vertices[pad_vertices > 0] = 0
1195
+
1196
+ # Previously was connected to the center of the shape.
1197
+ line_coordinates = get_all_line_coordinates(pad_origin_centroid, connecting_pixels)
1198
+ with_central_contour = holed_skeleton.copy()
1199
+ for vertex, new_edge in zip(connecting_pixels, line_coordinates): # nei = 65; new_edge=line_coordinates[nei]
1200
+ new_edge_im = np.zeros_like(pad_origin)
1201
+ new_edge_im[new_edge[:, 0], new_edge[:, 1]] = 1
1202
+ if not np.any(new_edge_im * pad_net_contour) and not np.any(new_edge_im * skeleton_without_vertices):# and not np.any(new_edge_im * holed_skeleton):
1203
+
1204
+ with_central_contour[new_edge[:, 0], new_edge[:, 1]] = 1
1205
+
1206
+ # Add dilated contour
1207
+ pad_origin_contours = get_contours(pad_origin)
1208
+ with_central_contour *= (1 - pad_origin)
1209
+ with_central_contour += pad_origin_contours
1210
+ if np.any(with_central_contour == 2):
1211
+ with_central_contour[with_central_contour > 0] = 1
1212
+
1213
+ # show(dil_origin * with_central_contour)
1214
+ # Capture only the new contour and its neighborhood, get its skeleton and update the final skeleton
1215
+ new_contour = cv2.morphologyEx(dil_origin * with_central_contour, cv2.MORPH_CLOSE, square_33)
1216
+ new_contour = morphology.medial_axis(new_contour, rng=0).astype(np.uint8)
1217
+ new_skeleton = with_central_contour * (1 - dil_origin)
1218
+ new_skeleton += new_contour
1219
+ new_pixels = np.logical_and(pad_distances == 0, new_skeleton == 1)
1220
+ new_pix_coord = np.transpose(np.array(np.nonzero(new_pixels)))
1221
+ dist_coord = np.transpose(np.array(np.nonzero(pad_distances)))
1222
+
1223
+ dist_from_dist = cdist(new_pix_coord[:, :], dist_coord)
1224
+ for np_i, dist_i in enumerate(dist_from_dist): # dist_i=dist_from_dist[0]
1225
+ close_i = dist_i.argmin()
1226
+ pad_distances[new_pix_coord[np_i, 0], new_pix_coord[np_i, 1]] = pad_distances[dist_coord[close_i, 0], dist_coord[close_i, 1]]
1227
+
1228
+ # Update distances
1229
+ pad_distances *= new_skeleton
1230
+
1231
+ dil_pad_origin_contours = cv2.dilate(pad_origin_contours, cross_33, iterations=1)
1232
+ new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
1233
+ nb, sh = cv2.connectedComponents(new_pad_origin_contours)
1234
+ while nb > 2:
1235
+ dil_pad_origin_contours = cv2.dilate(dil_pad_origin_contours, cross_33, iterations=1)
1236
+ new_pad_origin_contours = dil_pad_origin_contours * new_skeleton
1237
+ nb, sh = cv2.connectedComponents(new_pad_origin_contours)
1238
+ pad_origin_contours = new_pad_origin_contours
1239
+ pad_distances[pad_origin_contours > 0] = np.nan # pad_distances.max() + 1 #
1240
+ # test1 = ((pad_distances > 0) * (1 - new_skeleton)).sum() == 0
1241
+ # test2 = ((1 - (pad_distances > 0)) * new_skeleton).sum() == 0
1242
+
1243
+ return new_skeleton, pad_distances, pad_origin_contours
1244
+