cellects 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +155 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +31 -0
  7. cellects/core/cellects_threads.py +1451 -0
  8. cellects/core/motion_analysis.py +2010 -0
  9. cellects/core/one_image_analysis.py +1061 -0
  10. cellects/core/one_video_per_blob.py +540 -0
  11. cellects/core/program_organizer.py +1316 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +790 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +2066 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/image_segmentation.py +706 -0
  29. cellects/image_analysis/morphological_operations.py +1635 -0
  30. cellects/image_analysis/network_functions.py +1757 -0
  31. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  32. cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
  33. cellects/image_analysis/shape_descriptors.py +1016 -0
  34. cellects/utils/__init__.py +0 -0
  35. cellects/utils/decorators.py +14 -0
  36. cellects/utils/formulas.py +637 -0
  37. cellects/utils/load_display_save.py +1054 -0
  38. cellects/utils/utilitarian.py +490 -0
  39. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  40. cellects-0.1.2.dist-info/METADATA +132 -0
  41. cellects-0.1.2.dist-info/RECORD +44 -0
  42. cellects-0.1.2.dist-info/WHEEL +5 -0
  43. cellects-0.1.2.dist-info/entry_points.txt +2 -0
  44. cellects-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1635 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This module provides methods to analyze and modify shapes in binary images.
4
+ It includes functions for comparing neighboring pixels, generating shape descriptors,
5
+ and performing morphological operations like expanding shapes and filling holes.
6
+
7
+ Classes
8
+ ---------
9
+ CompareNeighborsWithValue : Class to compare neighboring pixels to a specified value
10
+
11
+ Functions
12
+ ---------------
13
+ cc : Sort connected components according to size
14
+ make_gravity_field : Create a gradient field around shapes
15
+ find_median_shape : Generate median shape from multiple inputs
16
+ make_numbered_rays : Create numbered rays for analysis
17
+ CompareNeighborsWithFocal : Compare neighboring pixels to focal values
18
+ ShapeDescriptors : Generate shape descriptors using provided functions
19
+ get_radius_distance_against_time : Calculate radius distances over time
20
+ expand_until_one : Expand shapes until a single connected component remains
21
+ expand_and_rate_until_one : Expand and rate shapes until one remains
22
+ expand_until_overlap : Expand shapes until overlap occurs
23
+ dynamically_expand_to_fill_holes : Dynamically expand to fill holes in shapes
24
+ expand_smalls_toward_biggest : Expand smaller shapes toward largest component
25
+ change_thresh_until_one : Change threshold until one connected component remains
26
+ Ellipse : Generate ellipse shape descriptors
27
+ get_rolling_window_coordinates_list : Get coordinates for rolling window operations
28
+
29
+ """
30
+ import logging
31
+ from copy import deepcopy
32
+ import cv2
33
+ import numpy as np
34
+ from numpy.typing import NDArray
35
+ from typing import Tuple
36
+ from scipy.spatial import KDTree
37
+ from cellects.utils.decorators import njit
38
+ from cellects.image_analysis.shape_descriptors import ShapeDescriptors
39
+ from cellects.utils.formulas import moving_average
40
+ from skimage.filters import threshold_otsu
41
+ from skimage.measure import label
42
+ from scipy.stats import linregress
43
+ from scipy.ndimage import distance_transform_edt
44
+ import matplotlib.pyplot as plt
45
+
46
+
47
+ cross_33 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
48
+ square_33 = np.ones((3, 3), np.uint8)
49
+
50
+
51
+ class CompareNeighborsWithValue:
52
+ """
53
+ CompareNeighborsWithValue class to summarize each pixel by comparing its neighbors to a value.
54
+
55
+ This class analyzes pixels in a 2D array, comparing each pixel's neighbors
56
+ to a specified value. The comparison can be equality, superiority,
57
+ or inferiority, and neighbors can be the 4 or 8 nearest pixels based on
58
+ the connectivity parameter.
59
+ """
60
+ def __init__(self, array: np.ndarray, connectivity: int=None, data_type: np.dtype=np.int8):
61
+ """
62
+ Initialize a class for array connectivity processing.
63
+
64
+ This class processes arrays based on given connectivities, creating
65
+ windows around the original data for both 1D and 2D arrays. Depending on
66
+ the connectivity value (4 or 8), it creates different windows with borders.
67
+
68
+ Parameters
69
+ ----------
70
+ array : ndarray
71
+ Input array to process, can be 1D or 2D.
72
+ connectivity : int, optional
73
+ Connectivity type for processing (4 or 8), by default None.
74
+ data_type : dtype, optional
75
+ Data type for the array elements, by default np.int8.
76
+
77
+ Attributes
78
+ ----------
79
+ array : ndarray
80
+ The processed array based on the given data type.
81
+ connectivity : int
82
+ Connectivity value used for processing.
83
+ on_the_right : ndarray
84
+ Array with shifted elements to the right.
85
+ on_the_left : ndarray
86
+ Array with shifted elements to the left.
87
+ on_the_bot : ndarray, optional
88
+ Array with shifted elements to the bottom (for 2D arrays).
89
+ on_the_top : ndarray, optional
90
+ Array with shifted elements to the top (for 2D arrays).
91
+ on_the_topleft : ndarray, optional
92
+ Array with shifted elements to the top left (for 2D arrays).
93
+ on_the_topright : ndarray, optional
94
+ Array with shifted elements to the top right (for 2D arrays).
95
+ on_the_botleft : ndarray, optional
96
+ Array with shifted elements to the bottom left (for 2D arrays).
97
+ on_the_botright : ndarray, optional
98
+ Array with shifted elements to the bottom right (for 2D arrays).
99
+ """
100
+ array = array.astype(data_type)
101
+ self.array = array
102
+ self.connectivity = connectivity
103
+ if len(self.array.shape) == 1:
104
+ self.on_the_right = np.append(array[1:], array[-1])
105
+ self.on_the_left = np.append(array[0], array[:-1])
106
+ else:
107
+ # Build 4 window of the original array, each missing one of the four borders
108
+ # Grow each window with a copy of the last border at the opposite of the side a border have been deleted
109
+ if self.connectivity == 4 or self.connectivity == 8:
110
+ self.on_the_right = np.column_stack((array[:, 1:], array[:, -1]))
111
+ self.on_the_left = np.column_stack((array[:, 0], array[:, :-1]))
112
+ self.on_the_bot = np.vstack((array[1:, :], array[-1, :]))
113
+ self.on_the_top = np.vstack((array[0, :], array[:-1, :]))
114
+ if self.connectivity != 4:
115
+ self.on_the_topleft = array[:-1, :-1]
116
+ self.on_the_topright = array[:-1, 1:]
117
+ self.on_the_botleft = array[1:, :-1]
118
+ self.on_the_botright = array[1:, 1:]
119
+
120
+ self.on_the_topleft = np.vstack((self.on_the_topleft[0, :], self.on_the_topleft))
121
+ self.on_the_topleft = np.column_stack((self.on_the_topleft[:, 0], self.on_the_topleft))
122
+
123
+ self.on_the_topright = np.vstack((self.on_the_topright[0, :], self.on_the_topright))
124
+ self.on_the_topright = np.column_stack((self.on_the_topright, self.on_the_topright[:, -1]))
125
+
126
+ self.on_the_botleft = np.vstack((self.on_the_botleft, self.on_the_botleft[-1, :]))
127
+ self.on_the_botleft = np.column_stack((self.on_the_botleft[:, 0], self.on_the_botleft))
128
+
129
+ self.on_the_botright = np.vstack((self.on_the_botright, self.on_the_botright[-1, :]))
130
+ self.on_the_botright = np.column_stack((self.on_the_botright, self.on_the_botright[:, -1]))
131
+
132
+ def is_equal(self, value, and_itself: bool=False):
133
+ """
134
+ Check equality of neighboring values in an array.
135
+
136
+ This method compares the neighbors of each element in `self.array` to a given value.
137
+ Depending on the dimensionality and connectivity settings, it checks different neighboring
138
+ elements.
139
+
140
+ Parameters
141
+ ----------
142
+ value : int or float
143
+ The value to check equality with neighboring elements.
144
+ and_itself : bool, optional
145
+ If True, also check equality with the element itself. Defaults to False.
146
+
147
+ Returns
148
+ -------
149
+ None
150
+
151
+ Attributes (not standard Qt properties)
152
+ --------------------------------------
153
+ equal_neighbor_nb : ndarray of uint8
154
+ Array that holds the number of equal neighbors for each element.
155
+
156
+ Examples
157
+ --------
158
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
159
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
160
+ >>> compare.is_equal(1)
161
+ >>> print(compare.equal_neighbor_nb)
162
+ [[0 0 1 0]
163
+ [0 1 1 1]
164
+ [0 1 1 1]
165
+ [0 0 1 0]]
166
+ """
167
+
168
+ if len(self.array.shape) == 1:
169
+ self.equal_neighbor_nb = np.sum((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value)), axis=0)
170
+ else:
171
+ if self.connectivity == 4:
172
+ self.equal_neighbor_nb = np.dstack((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
173
+ np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value)))
174
+ elif self.connectivity == 8:
175
+ self.equal_neighbor_nb = np.dstack(
176
+ (np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
177
+ np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value),
178
+ np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
179
+ np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
180
+ else:
181
+ self.equal_neighbor_nb = np.dstack(
182
+ (np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
183
+ np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
184
+ self.equal_neighbor_nb = np.sum(self.equal_neighbor_nb, 2, dtype=np.uint8)
185
+
186
+ if and_itself:
187
+ self.equal_neighbor_nb[np.not_equal(self.array, value)] = 0
188
+
189
+ def is_sup(self, value, and_itself=False):
190
+ """
191
+ Determine if pixels have more neighbors with higher values than a given threshold.
192
+
193
+ This method computes the number of neighboring pixels that have values greater
194
+ than a specified `value` for each pixel in the array. Optionally, it can exclude
195
+ the pixel itself if its value is less than or equal to `value`.
196
+
197
+ Parameters
198
+ ----------
199
+ value : int
200
+ The threshold value used to determine if a neighboring pixel's value is greater.
201
+ and_itself : bool, optional
202
+ If True, exclude the pixel itself if its value is less than or equal to `value`.
203
+ Defaults to False.
204
+
205
+ Examples
206
+ --------
207
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
208
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
209
+ >>> compare.is_sup(1)
210
+ >>> print(compare.sup_neighbor_nb)
211
+ [[3 3 2 4]
212
+ [4 2 3 3]
213
+ [4 2 3 3]
214
+ [3 3 2 4]]
215
+ """
216
+ if len(self.array.shape) == 1:
217
+ self.sup_neighbor_nb = (self.on_the_right > value).astype(self.array.dtype) + (self.on_the_left > value).astype(self.array.dtype)
218
+ else:
219
+ if self.connectivity == 4:
220
+ self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
221
+ self.on_the_bot > value, self.on_the_top > value))
222
+ elif self.connectivity == 8:
223
+ self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
224
+ self.on_the_bot > value, self.on_the_top > value,
225
+ self.on_the_topleft > value, self.on_the_topright > value,
226
+ self.on_the_botleft > value, self.on_the_botright > value))
227
+ else:
228
+ self.sup_neighbor_nb = np.dstack((self.on_the_topleft > value, self.on_the_topright > value,
229
+ self.on_the_botleft > value, self.on_the_botright > value))
230
+
231
+ self.sup_neighbor_nb = np.sum(self.sup_neighbor_nb, 2, dtype=np.uint8)
232
+ if and_itself:
233
+ self.sup_neighbor_nb[np.less_equal(self.array, value)] = 0
234
+
235
+ def is_inf(self, value, and_itself=False):
236
+ """
237
+ is_inf(value and_itself=False)
238
+
239
+ Determine the number of neighbors that are infinitely small relative to a given value,
240
+ considering optional connectivity and exclusion of the element itself.
241
+
242
+ Parameters
243
+ ----------
244
+ value : numeric
245
+ The value to compare neighbor elements against.
246
+ and_itself : bool, optional
247
+ If True, excludes the element itself from being counted. Default is False.
248
+
249
+ Examples
250
+ --------
251
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
252
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
253
+ >>> compare.is_inf(1)
254
+ >>> print(compare.inf_neighbor_nb)
255
+ [[1 1 1 0]
256
+ [0 1 0 0]
257
+ [0 1 0 0]
258
+ [1 1 1 0]]
259
+ """
260
+ if len(self.array.shape) == 1:
261
+ self.inf_neighbor_nb = (self.on_the_right < value).astype(self.array.dtype) + (self.on_the_left < value).astype(self.array.dtype)
262
+ else:
263
+ if self.connectivity == 4:
264
+ self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
265
+ self.on_the_bot < value, self.on_the_top < value))
266
+ elif self.connectivity == 8:
267
+ self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
268
+ self.on_the_bot < value, self.on_the_top < value,
269
+ self.on_the_topleft < value, self.on_the_topright < value,
270
+ self.on_the_botleft < value, self.on_the_botright < value))
271
+ else:
272
+ self.inf_neighbor_nb = np.dstack((self.on_the_topleft < value, self.on_the_topright < value,
273
+ self.on_the_botleft < value, self.on_the_botright < value))
274
+
275
+ self.inf_neighbor_nb = np.sum(self.inf_neighbor_nb, 2, dtype=np.uint8)
276
+ if and_itself:
277
+ self.inf_neighbor_nb[np.greater_equal(self.array, value)] = 0
278
+
279
+
280
+ def cc(binary_img: NDArray[np.uint8]) -> Tuple[NDArray, NDArray, NDArray]:
281
+ """
282
+ Processes a binary image to reorder and label connected components.
283
+
284
+ This function takes a binary image, analyses the connected components,
285
+ reorders them by size, ensures background is correctly labeled as 0,
286
+ and returns the new ordered labels along with their statistics and centers.
287
+
288
+ Parameters
289
+ ----------
290
+ binary_img : ndarray of uint8
291
+ Input binary image with connected components.
292
+
293
+ Returns
294
+ -------
295
+ new_order : ndarray of uint8, uint16 or uint32
296
+ Image with reordered labels for connected components.
297
+ stats : ndarray of ints
298
+ Statistics for each component (x, y, width, height, area).
299
+ centers : ndarray of floats
300
+ Centers for each component (x, y).
301
+
302
+ Examples
303
+ --------
304
+ >>> binary_img = np.array([[0, 1, 0], [0, 1, 0]], dtype=np.uint8)
305
+ >>> new_order, stats, centers = cc(binary_img)
306
+ >>> print(stats)
307
+ array([[0, 0, 3, 2, 4],
308
+ [1, 0, 2, 2, 2]], dtype=int32)
309
+ """
310
+ number, img, stats, centers = cv2.connectedComponentsWithStats(binary_img, ltype=cv2.CV_16U)
311
+ if number > 255:
312
+ img_dtype = np.uint16
313
+ if number > 65535:
314
+ img_dtype = np.uint32
315
+ else:
316
+ img_dtype = np.uint8
317
+ stats[:, 2] = stats[:, 0] + stats[:, 2]
318
+ stats[:, 3] = stats[:, 1] + stats[:, 3]
319
+ sorted_idx = np.argsort(stats[:, 4])[::-1]
320
+
321
+ # Make sure that the first connected component (labelled 0) is the background and not the main shape
322
+ size_ranked_stats = stats[sorted_idx, :]
323
+ background = (size_ranked_stats[:, 0] == 0).astype(np.uint8) + (size_ranked_stats[:, 1] == 0).astype(np.uint8) + (
324
+ size_ranked_stats[:, 2] == img.shape[1]).astype(np.uint8) + (
325
+ size_ranked_stats[:, 3] == img.shape[0]).astype(np.uint8)
326
+
327
+ # background = ((size_ranked_stats[:, 0] == 0) & (size_ranked_stats[:, 1] == 0) & (size_ranked_stats[:, 2] == img.shape[1]) & (size_ranked_stats[:, 3] == img.shape[0]))
328
+
329
+ touch_borders = np.nonzero(background > 2)[0]
330
+ # if not isinstance(touch_borders, np.int64):
331
+ # touch_borders = touch_borders[0]
332
+ # Most of the time, the background should be the largest shape and therefore has the index 0,
333
+ # Then, if there is at least one shape touching more than 2 borders and having not the index 0, solve:
334
+ if np.any(touch_borders != 0):
335
+ # If there is only one shape touching borders, it means that background is not at its right position (i.e. 0)
336
+ if len(touch_borders) == 1:
337
+ # Then exchange that shape position with background position
338
+ shape = sorted_idx[0] # Store shape position in the first place
339
+ back = sorted_idx[touch_borders[0]] # Store back position in the first place
340
+ sorted_idx[touch_borders[0]] = shape # Put shape position at the previous place of back and conversely
341
+ sorted_idx[0] = back
342
+ # If there are two shapes, it means that the main shape grew sufficiently to reach at least 3 borders
343
+ # We assume that it grew larger than background
344
+ else:
345
+ shape = sorted_idx[0]
346
+ back = sorted_idx[1]
347
+ sorted_idx[1] = shape
348
+ sorted_idx[0] = back
349
+ # Put shape position at the previous place of back and conversely
350
+
351
+
352
+ stats = stats[sorted_idx, :]
353
+ centers = centers[sorted_idx, :]
354
+
355
+ new_order = np.zeros_like(binary_img, dtype=img_dtype)
356
+
357
+ for i, val in enumerate(sorted_idx):
358
+ new_order[img == val] = i
359
+ return new_order, stats, centers
360
+
361
+
362
+ def rounded_inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
363
+ """
364
+ Perform rounded inverted distance transform on a binary image.
365
+
366
+ This function computes the inverse of the Euclidean distance transform,
367
+ where each pixel value represents its distance to the nearest zero
368
+ pixel. The operation can include erosion and will stop either at a given
369
+ max distance or until no further expansion is needed.
370
+
371
+ Parameters
372
+ ----------
373
+ original_shape : ndarray of uint8
374
+ Input binary image to be processed.
375
+ max_distance : int, optional
376
+ Maximum distance for the expansion. If None, no limit is applied.
377
+ with_erosion : int, optional
378
+ Number of erosion iterations to apply before the transform. Default is 0.
379
+
380
+ Returns
381
+ -------
382
+ out : ndarray of uint32
383
+ Output image containing the rounded inverted distance transform.
384
+
385
+ Examples
386
+ --------
387
+ >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
388
+ >>> segmentation[1:3, 1:3] = 1
389
+ >>> gravity = rounded_inverted_distance_transform(segmentation, max_distance=2)
390
+ >>> print(gravity)
391
+ [[1 2 2 1]
392
+ [2 0 0 2]
393
+ [2 0 0 2]
394
+ [1 2 2 1]]
395
+ """
396
+ if with_erosion > 0:
397
+ original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
398
+ expand = deepcopy(original_shape)
399
+ if max_distance is not None:
400
+ if max_distance > np.max(original_shape.shape):
401
+ max_distance = np.max(original_shape.shape).astype(np.uint32)
402
+ gravity_field = np.zeros(original_shape.shape , np.uint32)
403
+ for gravi in np.arange(max_distance):
404
+ expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
405
+ gravity_field[np.logical_xor(expand, original_shape)] += 1
406
+ else:
407
+ gravity_field = np.zeros(original_shape.shape , np.uint32)
408
+ while np.any(np.equal(original_shape + expand, 0)):
409
+ expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
410
+ gravity_field[np.logical_xor(expand, original_shape)] += 1
411
+ return gravity_field
412
+
413
+
414
+ def inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
415
+ """
416
+ Calculate the distance transform around ones in a binary image, with optional erosion.
417
+
418
+ This function computes the Euclidean distance transform where zero values
419
+ represent the background and ones represent the foreground. Optionally,
420
+ it erodes the input image before computing the distance transform, and
421
+ limits distances based on a maximum value.
422
+
423
+ Parameters
424
+ ----------
425
+ original_shape : ndarray of uint8
426
+ Input binary image where ones represent the foreground.
427
+ max_distance : int, optional
428
+ Maximum distance value to threshold. If None (default), no thresholding is applied.
429
+ with_erosion : int, optional
430
+ Number of iterations for erosion. If 0 (default), no erosion is applied.
431
+
432
+ Returns
433
+ -------
434
+ out : ndarray of uint32
435
+ Distance transform array where each element represents the distance
436
+ to the nearest zero value in the input image.
437
+
438
+ See also
439
+ --------
440
+ rounded_distance_transform : less precise (outputs int) and faster for small max_distance values.
441
+
442
+ Examples
443
+ --------
444
+ >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
445
+ >>> segmentation[1:3, 1:3] = 1
446
+ >>> gravity = inverted_distance_transform(segmentation, max_distance=2)
447
+ >>> print(gravity)
448
+ [[1. 1.41421356 1.41421356 1. ]
449
+ [1.41421356 0. 0. 1.41421356]
450
+ [1.41421356 0. 0. 1.41421356]
451
+ [1. 1.41421356 1.41421356 1. ]]
452
+ """
453
+ if with_erosion:
454
+ original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
455
+ gravity_field = distance_transform_edt(1 - original_shape)
456
+ if max_distance is not None:
457
+ if max_distance > np.min(original_shape.shape) / 2:
458
+ max_distance = (np.min(original_shape.shape) // 2).astype(np.uint32)
459
+ gravity_field[gravity_field >= max_distance] = 0
460
+ gravity_field[gravity_field > 0] = 1 + gravity_field.max() - gravity_field[gravity_field > 0]
461
+ return gravity_field
462
+
463
+
464
+ @njit()
465
+ def get_line_points(start, end) -> NDArray[int]:
466
+ """
467
+ Get line points between two endpoints using Bresenham's line algorithm.
468
+
469
+ This function calculates all the integer coordinate points that form a
470
+ line between two endpoints using Bresenham's line algorithm. It is
471
+ optimized for performance using Numba's just-in-time compilation.
472
+
473
+ Parameters
474
+ ----------
475
+ start : tuple of int
476
+ The starting point coordinates (x0, y0).
477
+ end : tuple of int
478
+ The ending point coordinates (x1, y1).
479
+
480
+ Returns
481
+ -------
482
+ out : ndarray of int
483
+ Array of points representing the line, with shape (N, 2), where N is
484
+ the number of points on the line.
485
+
486
+ Examples
487
+ --------
488
+ >>> start = (0, 0)
489
+ >>> end = (1, 2)
490
+ >>> points = get_line_points(start, end)
491
+ >>> print(points)
492
+ [[0 0]
493
+ [0 1]
494
+ [1 2]]
495
+ """
496
+ y0, x0 = start
497
+ y1, x1 = end
498
+
499
+ # Calculate differences
500
+ dx = np.abs(x1 - x0)
501
+ dy = np.abs(y1 - y0)
502
+
503
+ # Determine step direction
504
+ sx = 1 if x0 < x1 else -1
505
+ sy = 1 if y0 < y1 else -1
506
+
507
+ # Initialize
508
+ err = dx - dy
509
+ points = []
510
+ x, y = x0, y0
511
+
512
+ while True:
513
+ points.append([y, x])
514
+
515
+ # Check if we've reached the end
516
+ if x == x1 and y == y1:
517
+ break
518
+
519
+ # Calculate error for next step
520
+ e2 = 2 * err
521
+
522
+ if e2 > -dy:
523
+ err -= dy
524
+ x += sx
525
+
526
+ if e2 < dx:
527
+ err += dx
528
+ y += sy
529
+
530
+ return np.array(points)
531
+
532
+
533
+ def get_all_line_coordinates(start_point: NDArray[int], end_points: NDArray[int]) -> NDArray[int]:
534
+ """
535
+ Get all line coordinates between start point and end points.
536
+
537
+ This function computes the coordinates of lines connecting a
538
+ start point to multiple end points, converting input arrays to float
539
+ if necessary before processing.
540
+
541
+ Parameters
542
+ ----------
543
+ start_point : NDArray[float]
544
+ Starting coordinate point for the lines. Can be of any numeric type,
545
+ will be converted to float if needed.
546
+ end_points : NDArray[float]
547
+ Array of end coordinate points for the lines. Can be of any
548
+ numeric type, will be converted to float if needed.
549
+
550
+ Returns
551
+ -------
552
+ out : List[NDArray[int]]
553
+ A list of numpy arrays containing the coordinates of each line
554
+ as integer values.
555
+
556
+ Examples
557
+ --------
558
+ >>> start_point = np.array([0, 0])
559
+ >>> end_points = np.array([[1, 2], [3, 4]])
560
+ >>> get_all_line_coordinates(start_point, end_points)
561
+ [array([[0, 0],
562
+ [0, 1],
563
+ [1, 2]], dtype=uint64), array([[0, 0],
564
+ [1, 1],
565
+ [1, 2],
566
+ [2, 3],
567
+ [3, 4]], dtype=uint64)]
568
+ """
569
+ lines = []
570
+ for end_point in end_points:
571
+ line_coords = get_line_points(start_point, end_point)
572
+ lines.append(np.array(line_coords, dtype=np.uint64))
573
+ return lines
574
+
575
+
576
+ def draw_me_a_sun(main_shape: NDArray, ray_length_coef=4) -> Tuple[NDArray, NDArray]:
577
+ """
578
+ Draw a sun-shaped pattern on an image based on the main shape and ray length coefficient.
579
+
580
+ This function takes an input binary image (main_shape) and draws sun rays
581
+ from the perimeter of that shape. The length of the rays is controlled by a coefficient.
582
+ The function ensures that rays do not extend beyond the image borders.
583
+
584
+ Parameters
585
+ ----------
586
+ main_shape : ndarray of bool or int
587
+ Binary input image where the main shape is defined.
588
+ ray_length_coef : float, optional
589
+ Coefficient to control the length of sun rays. Defaults to 2.
590
+
591
+ Returns
592
+ -------
593
+ rays : ndarray
594
+ Indices of the rays drawn.
595
+ sun : ndarray
596
+ Image with sun rays drawn on it.
597
+
598
+ Examples
599
+ --------
600
+ >>> main_shape = np.zeros((10, 10), dtype=np.uint8)
601
+ >>> main_shape[4:7, 3:6] = 1
602
+ >>> rays, sun = draw_me_a_sun(main_shape)
603
+ >>> print(sun)
604
+
605
+ """
606
+ nb, shapes, stats, center = cv2.connectedComponentsWithStats(main_shape)
607
+ sun = np.zeros(main_shape.shape, np.uint32)
608
+ rays = []
609
+ r = 0
610
+ for i in range(1, nb):
611
+ shape_i = cv2.dilate((shapes == i).astype(np.uint8), kernel=cross_33)
612
+ contours = get_contours(shape_i)
613
+ first_ring_idx = np.nonzero(contours)
614
+ centroid = np.round((center[i, 1], center[i, 0])).astype(np.int64)
615
+ second_ring_y = centroid[0] + ((first_ring_idx[0] - centroid[0]) * ray_length_coef)
616
+ second_ring_x = centroid[1] + ((first_ring_idx[1] - centroid[1]) * ray_length_coef)
617
+
618
+ second_ring_y[second_ring_y < 0] = 0
619
+ second_ring_x[second_ring_x < 0] = 0
620
+
621
+ second_ring_y[second_ring_y > main_shape.shape[0] - 1] = main_shape.shape[0] - 1
622
+ second_ring_x[second_ring_x > main_shape.shape[1] - 1] = main_shape.shape[1] - 1
623
+ for j in range(len(second_ring_y)):
624
+ r += 1
625
+ fy, fx, sy, sx = first_ring_idx[0][j], first_ring_idx[1][j], second_ring_y[j], second_ring_x[j]
626
+ line = get_line_points((fy, fx), (sy, sx))
627
+ sun[line[:, 1], line[:, 0]] = r
628
+ rays.append(r)
629
+ return np.array(rays), sun
630
+
631
+
632
+ def find_median_shape(binary_3d_matrix: NDArray[np.uint8]) -> NDArray[np.uint8]:
633
+ """
634
+ Find the median shape from a binary 3D matrix.
635
+
636
+ This function computes the median 2D slice of a binary (0/1) 3D matrix
637
+ by finding which voxels appear in at least half of the slices.
638
+
639
+ Parameters
640
+ ----------
641
+ binary_3d_matrix : ndarray of uint8
642
+ Input 3D binary matrix where each slice is a 2D array.
643
+
644
+ Returns
645
+ -------
646
+ ndarray of uint8
647
+ Median shape as a 2D binary matrix where the same voxels
648
+ that appear in at least half of the input slices are set to 1.
649
+
650
+ Examples
651
+ --------
652
+ >>> binary_3d_matrix = np.random.randint(0, 2, (10, 5, 5), dtype=np.uint8)
653
+ >>> median_shape = find_median_shape(binary_3d_matrix)
654
+ >>> print(median_shape)
655
+ """
656
+ binary_2d_matrix = np.apply_along_axis(np.sum, 0, binary_3d_matrix)
657
+ median_shape = np.zeros(binary_2d_matrix.shape, dtype=np.uint8)
658
+ median_shape[np.greater_equal(binary_2d_matrix, binary_3d_matrix.shape[0] // 2)] = 1
659
+ return median_shape
660
+
661
+
662
+ @njit()
663
+ def reduce_image_size_for_speed(image_of_2_shapes: NDArray[np.uint8]) -> Tuple[Tuple, Tuple]:
664
+ """
665
+ Reduces the size of an image containing two shapes for faster processing.
666
+
667
+ The function iteratively divides the image into quadrants and keeps only
668
+ those that contain both shapes until a minimal size is reached.
669
+
670
+ Parameters
671
+ ----------
672
+ image_of_2_shapes : ndarray of uint8
673
+ The input image containing two shapes.
674
+
675
+ Returns
676
+ -------
677
+ out : tuple of tuples
678
+ The indices of the first and second shape in the reduced image.
679
+
680
+ Examples
681
+ --------
682
+ >>> main_shape = np.zeros((10, 10), dtype=np.uint8)
683
+ >>> main_shape[1:3, 1:3] = 1
684
+ >>> main_shape[1:3, 4:6] = 2
685
+ >>> shape1_idx, shape2_idx = reduce_image_size_for_speed(main_shape)
686
+ >>> print(shape1_idx)
687
+ (array([1, 1, 2, 2]), array([1, 2, 1, 2]))
688
+ """
689
+ sub_image = image_of_2_shapes.copy()
690
+ y_size, x_size = sub_image.shape
691
+ images_list = [sub_image]
692
+ good_images = [0]
693
+ sub_image = images_list[good_images[0]]
694
+ while (len(good_images) == 1 | len(good_images) == 2) & y_size > 3 & x_size > 3:
695
+ y_size, x_size = sub_image.shape
696
+ images_list = []
697
+ images_list.append(sub_image[:((y_size // 2) + 1), :((x_size // 2) + 1)])
698
+ images_list.append(sub_image[:((y_size // 2) + 1), (x_size // 2):])
699
+ images_list.append(sub_image[(y_size // 2):, :((x_size // 2) + 1)])
700
+ images_list.append(sub_image[(y_size // 2):, (x_size // 2):])
701
+ good_images = []
702
+ for idx, image in enumerate(images_list):
703
+ if np.any(image == 2):
704
+ if np.any(image == 1):
705
+ good_images.append(idx)
706
+ if len(good_images) == 2:
707
+ if good_images == [0, 1]:
708
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
709
+ elif good_images == [0, 2]:
710
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
711
+ elif good_images == [1, 3]:
712
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
713
+ elif good_images == [2, 3]:
714
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
715
+ else:
716
+ pass
717
+ else:
718
+ sub_image = images_list[good_images[0]]
719
+
720
+ shape1_idx = np.nonzero(sub_image == 1)
721
+ shape2_idx = np.nonzero(sub_image == 2)
722
+ return shape1_idx, shape2_idx
723
+
724
+
725
+ def get_minimal_distance_between_2_shapes(image_of_2_shapes: NDArray[np.uint8], increase_speed: bool=True) -> float:
726
+ """
727
+ Get the minimal distance between two shapes in an image.
728
+
729
+ This function calculates the minimal Euclidean distance between
730
+ two different shapes represented by binary values 1 and 2 in a given image.
731
+ It can optionally reduce the image size for faster processing.
732
+
733
+ Parameters
734
+ ----------
735
+ image_of_2_shapes : ndarray of int8
736
+ Binary image containing two shapes to measure distance between.
737
+ increase_speed : bool, optional
738
+ Flag to reduce image size for faster computation. Default is True.
739
+
740
+ Returns
741
+ -------
742
+ min_distance : float64
743
+ The minimal Euclidean distance between the two shapes.
744
+
745
+ Examples
746
+ --------
747
+ >>> import numpy as np
748
+ >>> image = np.array([[1, 0], [0, 2]])
749
+ >>> distance = get_minimal_distance_between_2_shapes(image)
750
+ >>> print(distance)
751
+ expected output
752
+ """
753
+ if increase_speed:
754
+ shape1_idx, shape2_idx = reduce_image_size_for_speed(image_of_2_shapes)
755
+ else:
756
+ shape1_idx, shape2_idx = np.nonzero(image_of_2_shapes == 1), np.nonzero(image_of_2_shapes == 2)
757
+ t = KDTree(np.transpose(shape1_idx))
758
+ dists, nns = t.query(np.transpose(shape2_idx), 1)
759
+ return np.min(dists)
760
+
761
+
762
+ def find_major_incline(vector: NDArray, natural_noise: float) -> Tuple[int, int]:
763
+ """
764
+ Find the major incline section in a vector.
765
+
766
+ This function identifies the segment of a vector that exhibits
767
+ the most significant change in values, considering a specified
768
+ natural noise level. It returns the left and right indices that
769
+ define this segment.
770
+
771
+ Parameters
772
+ ----------
773
+ vector : ndarray of float64
774
+ Input data vector where the incline needs to be detected.
775
+ natural_noise : float
776
+ The acceptable noise level for determining the incline.
777
+
778
+ Returns
779
+ -------
780
+ Tuple[int, int]
781
+ A tuple containing two integers: the left and right indices
782
+ of the major incline section in the vector.
783
+
784
+ Examples
785
+ --------
786
+ >>> vector = np.array([3, 5, 7, 9, 10])
787
+ >>> natural_noise = 2.5
788
+ >>> left, right = find_major_incline(vector, natural_noise)
789
+ >>> (left, right)
790
+ (0, 1)
791
+ """
792
+ left = 0
793
+ right = 1
794
+ ref_length = np.max((5, 2 * natural_noise))
795
+ vector = moving_average(vector, 5)
796
+ ref_extent = np.ptp(vector)
797
+ extent = ref_extent
798
+ # Find the left limit:
799
+ while len(vector) > ref_length and extent > (ref_extent - (natural_noise / 4)):
800
+ vector = vector[1:]
801
+ extent = np.ptp(vector)
802
+ left += 1
803
+ # And the right one:
804
+ extent = ref_extent
805
+ while len(vector) > ref_length and extent > (ref_extent - natural_noise / 2):
806
+ vector = vector[:-1]
807
+ extent = np.ptp(vector)
808
+ right += 1
809
+ # And the left again, with stronger stringency:
810
+ extent = ref_extent
811
+ while len(vector) > ref_length and extent > (ref_extent - natural_noise):
812
+ vector = vector[1:]
813
+ extent = np.ptp(vector)
814
+ left += 1
815
+ # When there is no incline, put back left and right to 0
816
+ if len(vector) <= ref_length:
817
+ left = 0
818
+ right = 1
819
+ return left, right
820
+
821
+
822
+ def rank_from_top_to_bottom_from_left_to_right(binary_image: NDArray[np.uint8], y_boundaries: NDArray[int], get_ordered_image: bool=False) -> Tuple:
823
+ """
824
+ Rank components in a binary image from top to bottom and from left to right.
825
+
826
+ This function processes a binary image to rank its components based on
827
+ their centroids. It first sorts the components row by row and then orders them
828
+ within each row from left to right. If the ordering fails, it attempts an alternative
829
+ algorithm and returns the ordered statistics and centroids.
830
+
831
+ Parameters
832
+ ----------
833
+ binary_image : ndarray of uint8
834
+ The input binary image to process.
835
+ y_boundaries : ndarray of int
836
+ Boundary information for the y-coordinates.
837
+ get_ordered_image : bool, optional
838
+ If True, returns an ordered image in addition to the statistics and centroids.
839
+ Default is False.
840
+
841
+ Returns
842
+ -------
843
+ tuple
844
+ If `get_ordered_image` is True, returns a tuple containing:
845
+ - ordered_stats : ndarray of int
846
+ Statistics for the ordered components.
847
+ - ordered_centroids : ndarray of float64
848
+ Centroids for the ordered components.
849
+ - ordered_image : ndarray of uint8
850
+ The binary image with ordered component labels.
851
+
852
+ If `get_ordered_image` is False, returns a tuple containing:
853
+ - ordered_stats : ndarray of int
854
+ Statistics for the ordered components.
855
+ - ordered_centroids : ndarray of float64
856
+ Centroids for the ordered components.
857
+ """
858
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
859
+ connectivity=8)
860
+
861
+ centroids = centroids[1:, :]
862
+ final_order = np.zeros(centroids.shape[0], dtype=np.uint8)
863
+ sorted_against_y = np.argsort(centroids[:, 1])
864
+ # row_nb = (y_boundaries == 1).sum()
865
+ row_nb = np.max(((y_boundaries == 1).sum(), (y_boundaries == - 1).sum()))
866
+ component_per_row = int(np.ceil((nb_components - 1) / row_nb))
867
+ for row_i in range(row_nb):
868
+ row_i_start = row_i * component_per_row
869
+ if row_i == (row_nb - 1):
870
+ sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:], 0])
871
+ final_order[row_i_start:] = sorted_against_y[row_i_start:][sorted_against_x]
872
+ else:
873
+ row_i_end = (row_i + 1) * component_per_row
874
+ sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:row_i_end], 0])
875
+ final_order[row_i_start:row_i_end] = sorted_against_y[row_i_start:row_i_end][sorted_against_x]
876
+ ordered_centroids = centroids[final_order, :]
877
+ ordered_stats = stats[1:, :]
878
+ ordered_stats = ordered_stats[final_order, :]
879
+
880
+ # If it fails, use another algo
881
+ if (final_order == 0).sum() > 1:
882
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
883
+ connectivity=8)
884
+ # First order according to x: from left to right
885
+ # Remove the background and order centroids along x axis
886
+ centroids = centroids[1:, :]
887
+ x_order = np.argsort(centroids[:, 0])
888
+ centroids = centroids[x_order, :]
889
+
890
+
891
+ # Then use the boundaries of each Y peak to sort these shapes row by row
892
+ if y_boundaries is not None:
893
+ binary_image = deepcopy(output)
894
+ binary_image[np.nonzero(binary_image)] = 1
895
+ y_starts, y_ends = np.argwhere(y_boundaries == - 1), np.argwhere(y_boundaries == 1)
896
+
897
+ margins_ci = np.array((0.5, 0.4, 0.3, 0.2, 0.1))
898
+ for margin in margins_ci:
899
+ ranking_success: bool = True
900
+ y_order = np.zeros(centroids.shape[0], dtype=np.uint8)
901
+ count: np.uint8 = 0
902
+ y_margins = (y_ends - y_starts) * margin# 0.3
903
+ # Loop and try to fill each row with all components, fail if the final number is wrong
904
+ for y_interval in np.arange(len(y_starts)):
905
+ for patch_i in np.arange(nb_components - 1):
906
+ # Compare the y coordinate of the centroid with the detected y intervals with
907
+ # an added margin in order to order coordinates
908
+ if np.logical_and(centroids[patch_i, 1] >= (y_starts[y_interval] - y_margins[y_interval]),
909
+ centroids[patch_i, 1] <= (y_ends[y_interval] + y_margins[y_interval])):
910
+ try:
911
+ y_order[count] = patch_i
912
+ count = count + 1
913
+ except IndexError as exc:
914
+ ranking_success = False
915
+
916
+ if ranking_success:
917
+ break
918
+ else:
919
+ ranking_success = False
920
+ # if that all tested margins failed, do not rank_from_top_to_bottom_from_left_to_right, i.e. keep automatic ranking
921
+ if not ranking_success:
922
+ y_order = np.arange(centroids.shape[0])
923
+
924
+
925
+ # Second order according to y: from top to bottom
926
+ ordered_centroids = centroids[y_order, :]
927
+ ordered_stats = stats[1:, :]
928
+ ordered_stats = ordered_stats[x_order, :]
929
+ ordered_stats = ordered_stats[y_order, :]
930
+
931
+ if get_ordered_image:
932
+ ordered_image = np.zeros(binary_image.shape, dtype=np.uint8)
933
+ for patch_j in np.arange(centroids.shape[0]):
934
+ sub_output = output[ordered_stats[patch_j, 1]: (ordered_stats[patch_j, 1] + ordered_stats[patch_j, 3]), ordered_stats[patch_j, 0]: (ordered_stats[patch_j, 0] + ordered_stats[patch_j, 2])]
935
+ sub_output = np.sort(np.unique(sub_output))
936
+ if len(sub_output) == 1:
937
+ ordered_image[output == sub_output[0]] = patch_j + 1
938
+ else:
939
+ ordered_image[output == sub_output[1]] = patch_j + 1
940
+
941
+
942
+ return ordered_stats, ordered_centroids, ordered_image
943
+ else:
944
+ return ordered_stats, ordered_centroids
945
+
946
+
947
+ def get_largest_connected_component(segmentation: NDArray[np.uint8]) -> Tuple[np.int64, NDArray[bool]]:
948
+ """
949
+ Find the largest connected component in a segmentation image.
950
+
951
+ This function labels all connected components in a binary
952
+ segmentation image, determines the size of each component,
953
+ and returns information about the largest connected component.
954
+
955
+ Parameters
956
+ ----------
957
+ segmentation : ndarray of uint8
958
+ Binary segmentation image where different integer values represent
959
+ different connected components.
960
+
961
+ Returns
962
+ -------
963
+ Tuple[int, ndarray of bool]
964
+ A tuple containing:
965
+ - The size of the largest connected component.
966
+ - A boolean mask representing the largest connected
967
+ component in the input segmentation image.
968
+
969
+ Examples
970
+ --------
971
+ >>> segmentation = np.zeros((10, 10), dtype=np.uint8)
972
+ >>> segmentation[2:6, 2:5] = 1
973
+ >>> segmentation[6:9, 6:9] = 1
974
+ >>> size, mask = get_largest_connected_component(segmentation)
975
+ >>> print(size)
976
+ 12
977
+ """
978
+ labels = label(segmentation)
979
+ assert(labels.max() != 0) # assume at least 1 CC
980
+ con_comp_sizes = np.bincount(labels.flat)[1:]
981
+ largest_idx = np.argmax(con_comp_sizes)
982
+ largest_connected_component = labels == largest_idx + 1
983
+ return con_comp_sizes[largest_idx], largest_connected_component
984
+
985
+
986
+ def expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand: NDArray[np.uint8], without_shape_i: NDArray[np.uint8],
987
+ shape_original_centroid: NDArray,
988
+ ref_centroids: NDArray, kernel: NDArray) -> NDArray[np.uint8]:
989
+ """
990
+ Expand a shape until its neighbor's centroid is closer than its own.
991
+
992
+ This function takes in several numpy arrays representing shapes and their
993
+ centroids, and expands the input shape until the distance to the nearest
994
+ neighboring centroid is less than or equal to the distance between the shape's
995
+ contour and its own centroid.
996
+
997
+ Parameters
998
+ ----------
999
+ shape_to_expand : ndarray of uint8
1000
+ The binary shape to be expanded.
1001
+ without_shape_i : ndarray of uint8
1002
+ A binary array representing the area without the shape.
1003
+ shape_original_centroid : ndarray
1004
+ The centroid of the original shape.
1005
+ ref_centroids : ndarray
1006
+ Reference centroids to compare distances with.
1007
+ kernel : ndarray
1008
+ The kernel for dilation operation.
1009
+
1010
+ Returns
1011
+ -------
1012
+ ndarray of uint8
1013
+ The expanded shape.
1014
+ """
1015
+
1016
+ without_shape = deepcopy(without_shape_i)
1017
+ # Calculate the distance between the focal shape centroid and its 10% nearest neighbor centroids
1018
+ centroid_distances = np.sqrt(np.square(ref_centroids[1:, 0] - shape_original_centroid[0]) + np.square(
1019
+ ref_centroids[1:, 1] - shape_original_centroid[1]))
1020
+ nearest_shapes = np.where(np.greater(np.quantile(centroid_distances, 0.1), centroid_distances))[0]
1021
+
1022
+ # Use the nearest neighbor distance as a maximal reference to get the minimal distance between the border of the shape and the neighboring centroids
1023
+ neighbor_mindist = np.min(centroid_distances)
1024
+ idx = np.nonzero(shape_to_expand)
1025
+ for shape_j in nearest_shapes:
1026
+ neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1027
+ np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1028
+ neighbor_mindist *= 0.5
1029
+ # Get the maximal distance of the focal shape between its contour and its centroids
1030
+ itself_maxdist = np.max(
1031
+ np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1032
+
1033
+ # Put 1 at the border of the reference image in order to be able to stop the while loop once border reached
1034
+ without_shape[0, :] = 1
1035
+ without_shape[:, 0] = 1
1036
+ without_shape[without_shape.shape[0] - 1, :] = 1
1037
+ without_shape[:, without_shape.shape[1] - 1] = 1
1038
+
1039
+ # Compare the distance between the contour of the shape and its centroid with this contout with the centroids of neighbors
1040
+ # Continue as the distance made by the shape (from its centroid) keeps being smaller than its distance with the nearest centroid.
1041
+ previous_shape_to_expand = deepcopy(shape_to_expand)
1042
+ while np.logical_and(np.any(np.less_equal(itself_maxdist, neighbor_mindist)),
1043
+ np.count_nonzero(shape_to_expand * without_shape) == 0):
1044
+ previous_shape_to_expand = deepcopy(shape_to_expand)
1045
+ # Dilate the shape by the kernel size
1046
+ shape_to_expand = cv2.dilate(shape_to_expand, kernel, iterations=1,
1047
+ borderType=cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED)
1048
+ # Extract the new connected component
1049
+ shape_nb, shape_to_expand = cv2.connectedComponents(shape_to_expand, ltype=cv2.CV_16U)
1050
+ shape_to_expand = shape_to_expand.astype(np.uint8)
1051
+ # Use the nex shape coordinates to calculate the new distances of the shape with its centroid and with neighboring centroids
1052
+ idx = np.nonzero(shape_to_expand)
1053
+ for shape_j in nearest_shapes:
1054
+ neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1055
+ np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1056
+ itself_maxdist = np.max(
1057
+ np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1058
+ return previous_shape_to_expand
1059
+
1060
+
1061
+ def image_borders(dimensions: tuple, shape: str="rectangular") -> NDArray[np.uint8]:
1062
+ """
1063
+ Create an image with borders, either rectangular or circular.
1064
+
1065
+ Parameters
1066
+ ----------
1067
+ dimensions : tuple
1068
+ The dimensions of the image (height, width).
1069
+ shape : str, optional
1070
+ The shape of the borders. Options are "rectangular" or "circular".
1071
+ Defaults to "rectangular".
1072
+
1073
+ Returns
1074
+ -------
1075
+ out : ndarray of uint8
1076
+ The image with borders. If the shape is "circular", an ellipse border;
1077
+ if "rectangular", a rectangular border.
1078
+
1079
+ Examples
1080
+ --------
1081
+ >>> borders = image_borders((3, 3), "rectangular")
1082
+ >>> print(borders)
1083
+ [[0 0 0]
1084
+ [0 1 0]
1085
+ [0 0 0]]
1086
+ """
1087
+ if shape == "circular":
1088
+ borders = Ellipse(dimensions).create()
1089
+ img_contours = image_borders(dimensions)
1090
+ borders = borders * img_contours
1091
+ else:
1092
+ borders = np.ones(dimensions, dtype=np.uint8)
1093
+ borders[0, :] = 0
1094
+ borders[:, 0] = 0
1095
+ borders[- 1, :] = 0
1096
+ borders[:, - 1] = 0
1097
+ return borders
1098
+
1099
+
1100
+ def get_radius_distance_against_time(binary_video: NDArray[np.uint8], field) -> Tuple[NDArray[np.float32], int, int]:
1101
+ """
1102
+ Calculate the radius distance against time from a binary video and field.
1103
+
1104
+ This function computes the change in radius distances over time
1105
+ by analyzing a binary video and mapping it to corresponding field values.
1106
+
1107
+ Parameters
1108
+ ----------
1109
+ binary_video : ndarray of uint8
1110
+ Binary video data.
1111
+ field : ndarray
1112
+ Field values to analyze the radius distances against.
1113
+
1114
+ Returns
1115
+ -------
1116
+ distance_against_time : ndarray of float32
1117
+ Radius distances over time.
1118
+ time_start : int
1119
+ Starting time index where the radius distance measurement begins.
1120
+ time_end : int
1121
+ Ending time index where the radius distance measurement ends.
1122
+
1123
+ Examples
1124
+ --------
1125
+ >>> binary_video = np.ones((10, 5, 5), dtype=np.uint8)
1126
+
1127
+ >>> distance_against_time, time_start, time_end = get_radius_distance_against_time(binary_video, field)
1128
+ """
1129
+ pixel_start = np.max(field[field > 0])
1130
+ pixel_end = np.min(field[field > 0])
1131
+ time_span = np.arange(binary_video.shape[0])
1132
+ time_start = 0
1133
+ time_end = time_span[-1]
1134
+ start_not_found: bool = True
1135
+ for t in time_span:
1136
+ if start_not_found:
1137
+ if np.any((field == pixel_start) * binary_video[t, :, :]):
1138
+ start_not_found = False
1139
+ time_start = t
1140
+ if np.any((field == pixel_end) * binary_video[t, :, :]):
1141
+ time_end = t
1142
+ break
1143
+ distance_against_time = np.linspace(pixel_start, pixel_end, (time_end - time_start + 1))
1144
+ distance_against_time = np.round(distance_against_time).astype(np.float32)
1145
+ return distance_against_time, time_start, time_end
1146
+
1147
+
1148
+ def close_holes(binary_img: NDArray[np.uint8]) -> NDArray[np.uint8]:
1149
+ """
1150
+ Close holes in a binary image using connected components analysis.
1151
+
1152
+ This function identifies and closes small holes within the foreground objects of a binary image. It uses connected component analysis to find and fill holes that are smaller than the main object.
1153
+
1154
+ Parameters
1155
+ ----------
1156
+ binary_img : ndarray of uint8
1157
+ Binary input image where holes need to be closed.
1158
+
1159
+ Returns
1160
+ -------
1161
+ out : ndarray of uint8
1162
+ Binary image with closed holes.
1163
+
1164
+ Examples
1165
+ --------
1166
+ >>> binary_img = np.zeros((10, 10), dtype=np.uint8)
1167
+ >>> binary_img[2:8, 2:8] = 1
1168
+ >>> binary_img[4:6, 4:6] = 0 # Creating a hole
1169
+ >>> result = close_holes(binary_img)
1170
+ >>> print(result)
1171
+ [[0 0 0 0 0 0 0 0 0 0]
1172
+ [0 0 0 0 0 0 0 0 0 0]
1173
+ [0 0 1 1 1 1 1 1 0 0]
1174
+ [0 0 1 1 1 1 1 1 0 0]
1175
+ [0 0 1 1 1 1 1 1 0 0]
1176
+ [0 0 1 1 1 1 1 1 0 0]
1177
+ [0 0 1 1 1 1 1 1 0 0]
1178
+ [0 0 1 1 1 1 1 1 0 0]
1179
+ [0 0 0 0 0 0 0 0 0 0]
1180
+ [0 0 0 0 0 0 0 0 0 0]]
1181
+ """
1182
+ #### Third version ####
1183
+ nb, new_order = cv2.connectedComponents(1 - binary_img)
1184
+ if nb > 2:
1185
+ binary_img[new_order > 1] = 1
1186
+ return binary_img
1187
+
1188
+
1189
+ def dynamically_expand_to_fill_holes(binary_video: NDArray[np.uint8], holes: NDArray[np.uint8]) -> Tuple[NDArray[np.uint8], int, NDArray[np.float32]]:
1190
+ """
1191
+ Fill the holes in a binary video by progressively expanding the shape made of ones.
1192
+
1193
+ Parameters
1194
+ ----------
1195
+ binary_video : ndarray of uint8
1196
+ The binary video where holes need to be filled.
1197
+ holes : ndarray of uint8
1198
+ Array representing the holes in the binary video.
1199
+
1200
+ Returns
1201
+ -------
1202
+ out : tuple of ndarray of uint8, int, and ndarray of float32
1203
+ The modified binary video with filled holes,
1204
+ the end time when all holes are filled, and
1205
+ an array of distances against time used to fill the holes.
1206
+
1207
+ Examples
1208
+ --------
1209
+ >>> binary_video = np.zeros((10, 640, 480), dtype=np.uint8)
1210
+ >>> binary_video[:, 300:400, 220:240] = 1
1211
+ >>> holes = np.zeros((640, 480), dtype=np.uint8)
1212
+ >>> holes[340:360, 228:232] = 1
1213
+ >>> filled_video, end_time, distances = dynamically_expand_to_fill_holes(binary_video, holes)
1214
+ >>> print(filled_video.shape) # Should print (10, 640, 480)
1215
+ (10, 640, 480)
1216
+ """
1217
+ #first move should be the time at wich the first pixel hole could have been covered
1218
+ #it should ask how much time the shape made to cross a distance long enough to overlap all holes
1219
+ holes_contours = cv2.dilate(holes, cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1220
+ field = rounded_inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1221
+ field2 = inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1222
+ holes_contours = holes_contours * field * binary_video[- 1, :, :]
1223
+ holes[np.nonzero(holes)] = field[np.nonzero(holes)]
1224
+ if np.any(holes_contours):
1225
+ # Find the relationship between distance and time
1226
+ distance_against_time, holes_time_start, holes_time_end = get_radius_distance_against_time(binary_video, holes_contours)
1227
+ # Use that vector to progressively fill holes at the same speed as shape grows
1228
+ for t in np.arange(len(distance_against_time)):
1229
+ new_order, stats, centers = cc((holes >= distance_against_time[t]).astype(np.uint8))
1230
+ for comp_i in np.arange(1, stats.shape[0]):
1231
+ past_image = deepcopy(binary_video[holes_time_start + t, :, :])
1232
+ with_new_comp = new_order == comp_i
1233
+ past_image[with_new_comp] = 1
1234
+ nb_comp, image_garbage = cv2.connectedComponents(past_image)
1235
+ if nb_comp == 2:
1236
+ binary_video[holes_time_start + t, :, :][with_new_comp] = 1
1237
+ # Make sure that holes remain filled from holes_time_end to the end of the video
1238
+ for t in np.arange((holes_time_end + 1), binary_video.shape[0]):
1239
+ past_image = binary_video[t, :, :]
1240
+ past_image[holes >= distance_against_time[-1]] = 1
1241
+ binary_video[t, :, :] = past_image
1242
+ else:
1243
+ holes_time_end = None
1244
+ distance_against_time = np.array([1, 2], dtype=np.float32)
1245
+
1246
+ return binary_video, holes_time_end, distance_against_time
1247
+
1248
+
1249
+ class Ellipse:
1250
+ """
1251
+ Create an ellipse with given vertical and horizontal sizes.
1252
+
1253
+ This class represents an ellipse defined by its vertical and horizontal
1254
+ dimensions. It provides methods to check if a point lies within the ellipse
1255
+ and to generate a 2D array representing the ellipse shape.
1256
+ """
1257
+ def __init__(self, sizes):
1258
+ """
1259
+ Initialize the object with given vertical and horizontal sizes.
1260
+
1261
+ Parameters
1262
+ ----------
1263
+ sizes : list or tuple of int, length 2
1264
+ List containing two integers representing vertical and horizontal sizes.
1265
+
1266
+ Attributes
1267
+ ----------
1268
+ vsize : int
1269
+ Vertical size of the object.
1270
+ hsize : int
1271
+ Horizontal size of the object.
1272
+ vr : int
1273
+ Half of the horizontal size.
1274
+ hr : int
1275
+ Half of the vertical size.
1276
+ """
1277
+ self.vsize = sizes[0]
1278
+ self.hsize = sizes[1]
1279
+ self.vr = self.hsize // 2
1280
+ self.hr = self.vsize // 2
1281
+
1282
+ def ellipse_fun(self, x, y):
1283
+ """
1284
+ Check if a point (x,y) lies within or on the ellipse.
1285
+
1286
+ This function checks if a given point lies inside or on the boundary
1287
+ of an ellipse defined by its horizontal radius (`self.hr`) and vertical
1288
+ radius (`self.vr`). The center of the ellipse is at (0, 0).
1289
+
1290
+ Parameters
1291
+ ----------
1292
+ x : float
1293
+ The x-coordinate of the point to be checked.
1294
+ y : float
1295
+ The y-coordinate of the point to be checked.
1296
+
1297
+ Returns
1298
+ -------
1299
+ bool
1300
+ True if the point (x, y) lies within or on the ellipse; False otherwise.
1301
+
1302
+ """
1303
+ return (((x - self.hr) ** 2) / (self.hr ** 2)) + (((y - self.vr) ** 2) / (self.vr ** 2)) <= 1
1304
+
1305
+ def create(self) -> NDArray:
1306
+ """
1307
+ Create a 2D array representing an ellipse.
1308
+
1309
+ This method generates a NumPy array where each element is determined by
1310
+ the `ellipse_fun` function, which computes values based on the horizontal
1311
+ and vertical sizes of the ellipse.
1312
+
1313
+ Returns
1314
+ -------
1315
+ ndarray
1316
+ A 2D NumPy array representing the ellipse shape.
1317
+ """
1318
+ return np.fromfunction(self.ellipse_fun, (self.vsize, self.hsize))
1319
+
1320
+
1321
+ rhombus_55 = Ellipse((5, 5)).create().astype(np.uint8)
1322
+
1323
+
1324
+ def get_contours(binary_image: NDArray[np.uint8]) -> NDArray[np.uint8]:
1325
+ """
1326
+ Find and return the contours of a binary image.
1327
+
1328
+ This function erodes the input binary image using a 3x3 cross-shaped
1329
+ structuring element and then subtracts the eroded image from the original to obtain the contours.
1330
+
1331
+ Parameters
1332
+ ----------
1333
+ binary_image : ndarray of uint8
1334
+ Input binary image from which to extract contours.
1335
+
1336
+ Returns
1337
+ -------
1338
+ out : ndarray of uint8
1339
+ Image containing only the contours extracted from `binary_image`.
1340
+
1341
+ Examples
1342
+ --------
1343
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1344
+ >>> binary_image[2:8, 2:8] = 1
1345
+ >>> result = get_contours(binary_image)
1346
+ >>> print(result)
1347
+ [[0 0 0 0 0 0 0 0 0 0]
1348
+ [0 0 0 0 0 0 0 0 0 0]
1349
+ [0 0 1 1 1 1 1 1 0 0]
1350
+ [0 0 1 0 0 0 0 1 0 0]
1351
+ [0 0 1 0 0 0 0 1 0 0]
1352
+ [0 0 1 0 0 0 0 1 0 0]
1353
+ [0 0 1 0 0 0 0 1 0 0]
1354
+ [0 0 1 1 1 1 1 1 0 0]
1355
+ [0 0 0 0 0 0 0 0 0 0]
1356
+ [0 0 0 0 0 0 0 0 0 0]]
1357
+ """
1358
+ if np.all(binary_image):
1359
+ contours = 1 - image_borders(binary_image.shape)
1360
+ elif np.any(binary_image):
1361
+ eroded_binary = cv2.erode(binary_image, cross_33)
1362
+ contours = binary_image - eroded_binary
1363
+ else:
1364
+ contours = binary_image
1365
+ return contours
1366
+
1367
+
1368
+ def prepare_box_counting(binary_image: NDArray[np.uint8], min_im_side: int=128, min_mesh_side: int=8, zoom_step: int=0, contours: bool=True)-> Tuple[NDArray[np.uint8], NDArray[np.uint8]]:
1369
+ """Prepare box counting parameters for image analysis.
1370
+
1371
+ Prepares parameters for box counting method based on binary
1372
+ image input. Adjusts image size, computes side lengths, and applies
1373
+ contour extraction if specified.
1374
+
1375
+ Parameters
1376
+ ----------
1377
+ binary_image : ndarray of uint8
1378
+ Binary image for analysis.
1379
+ min_im_side : int, optional
1380
+ Minimum side length threshold. Default is 128.
1381
+ min_mesh_side : int, optional
1382
+ Minimum mesh side length. Default is 8.
1383
+ zoom_step : int, optional
1384
+ Zoom step for side lengths computation. Default is 0.
1385
+ contours : bool, optional
1386
+ Whether to apply contour extraction. Default is True.
1387
+
1388
+ Returns
1389
+ -------
1390
+ out : tuple of ndarray of uint8, ndarray (or None)
1391
+ Cropped binary image and computed side lengths.
1392
+
1393
+ Examples
1394
+ --------
1395
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1396
+ >>> binary_image[2:4, 2:6] = 1
1397
+ >>> binary_image[7:9, 4:7] = 1
1398
+ >>> binary_image[4:7, 5] = 1
1399
+ >>> cropped_img, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1400
+ >>> print(cropped_img), print(side_lengths)
1401
+ [[0 0 0 0 0 0 0]
1402
+ [0 1 1 1 1 0 0]
1403
+ [0 1 1 1 1 0 0]
1404
+ [0 0 0 0 1 0 0]
1405
+ [0 0 0 0 1 0 0]
1406
+ [0 0 0 0 1 0 0]
1407
+ [0 0 0 1 0 1 0]
1408
+ [0 0 0 1 1 1 0]
1409
+ [0 0 0 0 0 0 0]]
1410
+ [4 2]
1411
+ """
1412
+ side_lengths = None
1413
+ zoomed_binary = binary_image
1414
+ binary_idx = np.nonzero(binary_image)
1415
+ if binary_idx[0].size:
1416
+ min_y = np.min(binary_idx[0])
1417
+ min_y = np.max((min_y - 1, 0))
1418
+
1419
+ min_x = np.min(binary_idx[1])
1420
+ min_x = np.max((min_x - 1, 0))
1421
+
1422
+ max_y = np.max(binary_idx[0])
1423
+ max_y = np.min((max_y + 1, binary_image.shape[0] - 1))
1424
+
1425
+ max_x = np.max(binary_idx[1])
1426
+ max_x = np.min((max_x + 1, binary_image.shape[1] - 1))
1427
+
1428
+ zoomed_binary = deepcopy(binary_image[min_y:(max_y + 1), min_x: (max_x + 1)])
1429
+ min_side = np.min(zoomed_binary.shape)
1430
+ if min_side >= min_im_side:
1431
+ if contours:
1432
+ eroded_zoomed_binary = cv2.erode(zoomed_binary, cross_33)
1433
+ zoomed_binary = zoomed_binary - eroded_zoomed_binary
1434
+ if zoom_step == 0:
1435
+ max_power = int(np.floor(np.log2(min_side))) # Largest integer power of 2
1436
+ side_lengths = 2 ** np.arange(max_power, int(np.log2(min_mesh_side // 2)), -1)
1437
+ else:
1438
+ side_lengths = np.arange(min_mesh_side, min_side, zoom_step)
1439
+ return zoomed_binary, side_lengths
1440
+
1441
+
1442
+ def box_counting_dimension(zoomed_binary: NDArray[np.uint8], side_lengths: NDArray, display: bool=False) -> Tuple[float, float, float]:
1443
+ """
1444
+ Box counting dimension calculation.
1445
+
1446
+ This function calculates the box-counting dimension of a binary image by analyzing the number
1447
+ of boxes (of varying sizes) that contain at least one pixel of the image. The function also
1448
+ provides the R-squared value from linear regression and the number of boxes used.
1449
+
1450
+ Parameters
1451
+ ----------
1452
+ zoomed_binary : NDArray[np.uint8]
1453
+ Binary image (0 or 255 values) for which the box-counting dimension is calculated.
1454
+ side_lengths : NDArray
1455
+ Array of side lengths for the boxes used in the box-counting calculation.
1456
+ display : bool, optional
1457
+ If True, displays a scatter plot of the log-transformed box counts and diameters,
1458
+ along with the linear regression fit. Default is False.
1459
+
1460
+ Returns
1461
+ -------
1462
+ out : Tuple[float, float, float]
1463
+ A tuple containing the calculated box-counting dimension (`d`), R-squared value (`r_value`),
1464
+ and the number of boxes used (`box_nb`).
1465
+
1466
+ Examples
1467
+ --------
1468
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1469
+ >>> binary_image[2:4, 2:6] = 1
1470
+ >>> binary_image[7:9, 4:7] = 1
1471
+ >>> binary_image[4:7, 5] = 1
1472
+ >>> zoomed_binary, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1473
+ >>> dimension, r_value, box_nb = box_counting_dimension(zoomed_binary, side_lengths)
1474
+ >>> print(dimension, r_value, box_nb)
1475
+ (np.float64(1.1699250014423126), np.float64(0.9999999999999998), 2)
1476
+ """
1477
+ dimension:float = 0.
1478
+ r_value:float = 0.
1479
+ box_nb:float = 0.
1480
+ if side_lengths is not None:
1481
+ box_counts = np.zeros(len(side_lengths), dtype=np.uint64)
1482
+ # Loop through side_lengths and compute block counts
1483
+ for idx, side_length in enumerate(side_lengths):
1484
+ S = np.add.reduceat(
1485
+ np.add.reduceat(zoomed_binary, np.arange(0, zoomed_binary.shape[0], side_length), axis=0),
1486
+ np.arange(0, zoomed_binary.shape[1], side_length),
1487
+ axis=1
1488
+ )
1489
+ box_counts[idx] = len(np.where(S > 0)[0])
1490
+
1491
+ valid_indices = box_counts > 0
1492
+ if valid_indices.sum() >= 2:
1493
+ log_box_counts = np.log(box_counts)
1494
+ log_reciprocal_lengths = np.log(1 / side_lengths)
1495
+ slope, intercept, r_value, p_value, stderr = linregress(log_reciprocal_lengths, log_box_counts)
1496
+ # coefficients = np.polyfit(log_reciprocal_lengths, log_box_counts, 1)
1497
+ dimension = slope
1498
+ box_nb = len(side_lengths)
1499
+ if display:
1500
+ plt.scatter(log_reciprocal_lengths, log_box_counts, label="Box counting")
1501
+ plt.plot([0, log_reciprocal_lengths.min()], [intercept, intercept + slope * log_reciprocal_lengths.min()], label="Linear regression")
1502
+ plt.plot([], [], ' ', label=f"D = {slope:.2f}")
1503
+ plt.plot([], [], ' ', label=f"R2 = {r_value:.6f}")
1504
+ plt.plot([], [], ' ', label=f"p-value = {p_value:.2e}")
1505
+ plt.legend(loc='best')
1506
+ plt.xlabel(f"log(1/Diameter) | Diameter ⊆ [{side_lengths[0]}:{side_lengths[-1]}] (n={box_nb})")
1507
+ plt.ylabel(f"log(Box number) | Box number ⊆ [{box_counts[0]}:{box_counts[-1]}]")
1508
+ plt.show()
1509
+ # plt.close()
1510
+
1511
+ return dimension, r_value, box_nb
1512
+
1513
+
1514
+ def keep_shape_connected_with_ref(all_shapes: NDArray[np.uint8], reference_shape: NDArray[np.uint8]) -> NDArray[np.uint8]:
1515
+ """
1516
+ Keep shape connected with reference.
1517
+
1518
+ This function analyzes the connected components of a binary image represented by `all_shapes`
1519
+ and returns the first component that intersects with the `reference_shape`.
1520
+ If no such component is found, it returns None.
1521
+
1522
+ Parameters
1523
+ ----------
1524
+ all_shapes : ndarray of uint8
1525
+ Binary image containing all shapes to analyze.
1526
+ reference_shape : ndarray of uint8
1527
+ Binary reference shape used for intersection check.
1528
+
1529
+ Returns
1530
+ -------
1531
+ out : ndarray of uint8 or None
1532
+ The first connected component that intersects with the reference shape,
1533
+ or None if no such component is found.
1534
+
1535
+ Examples
1536
+ -------
1537
+ >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
1538
+ >>> reference_shape = np.zeros((5, 5), dtype=np.uint8)
1539
+ >>> reference_shape[3, 3] = 1
1540
+ >>> all_shapes[0:2, 0:2] = 1
1541
+ >>> all_shapes[3:4, 3:4] = 1
1542
+ >>> res = keep_shape_connected_with_ref(all_shapes, reference_shape)
1543
+ >>> print(res)
1544
+ [[0 0 0 0 0]
1545
+ [0 0 0 0 0]
1546
+ [0 0 0 0 0]
1547
+ [0 0 0 1 0]
1548
+ [0 0 0 0 0]]
1549
+ """
1550
+ number, order = cv2.connectedComponents(all_shapes, ltype=cv2.CV_16U)
1551
+ expanded_shape = None
1552
+ if number > 1:
1553
+ for i in np.arange(1, number):
1554
+ expanded_shape_test = np.zeros(order.shape, np.uint8)
1555
+ expanded_shape_test[order == i] = 1
1556
+ if np.any(expanded_shape_test * reference_shape):
1557
+ break
1558
+ if np.any(expanded_shape_test * reference_shape):
1559
+ expanded_shape = expanded_shape_test
1560
+ else:
1561
+ expanded_shape = reference_shape
1562
+ return expanded_shape
1563
+
1564
+
1565
+ @njit()
1566
+ def keep_largest_shape(indexed_shapes: NDArray[np.int32]) -> NDArray[np.uint8]:
1567
+ """
1568
+ Keep the largest shape from an array of indexed shapes.
1569
+
1570
+ This function identifies the most frequent non-zero shape in the input
1571
+ array and returns a binary mask where elements matching this shape are set to 1,
1572
+ and others are set to 0. The function uses NumPy's bincount to count occurrences
1573
+ of each shape and assumes that the first element (index 0) is not part of any
1574
+ shape classification.
1575
+
1576
+ Parameters
1577
+ ----------
1578
+ indexed_shapes : ndarray of int32
1579
+ Input array containing indexed shapes.
1580
+
1581
+ Returns
1582
+ -------
1583
+ out : ndarray of uint8
1584
+ Binary mask where the largest shape is marked as 1.
1585
+
1586
+ Examples
1587
+ --------
1588
+ >>> indexed_shapes = np.array([0, 2, 2, 3, 1], dtype=np.int32)
1589
+ >>> keep_largest_shape(indexed_shapes)
1590
+ array([0, 1, 1, 0, 0], dtype=uint8)
1591
+ """
1592
+ label_counts = np.bincount(indexed_shapes.flatten())
1593
+ largest_label = 1 + np.argmax(label_counts[1:])
1594
+ return (indexed_shapes == largest_label).astype(np.uint8)
1595
+
1596
+
1597
+ def keep_one_connected_component(binary_image: NDArray[np.uint8])-> NDArray[np.uint8]:
1598
+ """
1599
+ Keep only one connected component in a binary image.
1600
+
1601
+ This function filters out all but the largest connected component in
1602
+ a binary image, effectively isolating it from other noise or objects.
1603
+ The function ensures the input is in uint8 format before processing.
1604
+
1605
+ Parameters
1606
+ ----------
1607
+ binary_image : ndarray of uint8
1608
+ Binary image containing one or more connected components.
1609
+
1610
+ Returns
1611
+ -------
1612
+ ndarray of uint8
1613
+ Image with only the largest connected component retained.
1614
+
1615
+ Examples
1616
+ -------
1617
+ >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
1618
+ >>> all_shapes[0:2, 0:2] = 1
1619
+ >>> all_shapes[3:4, 3:4] = 1
1620
+ >>> res = keep_one_connected_component(all_shapes)
1621
+ >>> print(res)
1622
+ [[1 1 0 0 0]
1623
+ [1 1 0 0 0]
1624
+ [0 0 0 0 0]
1625
+ [0 0 0 0 0]
1626
+ [0 0 0 0 0]]
1627
+ """
1628
+ if binary_image.dtype != np.uint8:
1629
+ binary_image = binary_image.astype(np.uint8)
1630
+ num_labels, sh = cv2.connectedComponents(binary_image)
1631
+ if num_labels <= 1:
1632
+ return binary_image.astype(np.uint8)
1633
+ else:
1634
+ return keep_largest_shape(sh)
1635
+