cellects 0.1.3__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +365 -326
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +4 -3
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +841 -450
  14. cellects/gui/required_output.py +100 -56
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +317 -135
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -105
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.7.dist-info/LICENSE +675 -0
  29. cellects-0.2.7.dist-info/METADATA +829 -0
  30. cellects-0.2.7.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.3.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.3.dist-info/METADATA +0 -176
  35. cellects-0.1.3.dist-info/RECORD +0 -44
  36. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.3.dist-info → cellects-0.2.7.dist-info}/top_level.txt +0 -0
@@ -1,1635 +1,2166 @@
1
- #!/usr/bin/env python3
2
- """
3
- This module provides methods to analyze and modify shapes in binary images.
4
- It includes functions for comparing neighboring pixels, generating shape descriptors,
5
- and performing morphological operations like expanding shapes and filling holes.
6
-
7
- Classes
8
- ---------
9
- CompareNeighborsWithValue : Class to compare neighboring pixels to a specified value
10
-
11
- Functions
12
- ---------------
13
- cc : Sort connected components according to size
14
- make_gravity_field : Create a gradient field around shapes
15
- find_median_shape : Generate median shape from multiple inputs
16
- make_numbered_rays : Create numbered rays for analysis
17
- CompareNeighborsWithFocal : Compare neighboring pixels to focal values
18
- ShapeDescriptors : Generate shape descriptors using provided functions
19
- get_radius_distance_against_time : Calculate radius distances over time
20
- expand_until_one : Expand shapes until a single connected component remains
21
- expand_and_rate_until_one : Expand and rate shapes until one remains
22
- expand_until_overlap : Expand shapes until overlap occurs
23
- dynamically_expand_to_fill_holes : Dynamically expand to fill holes in shapes
24
- expand_smalls_toward_biggest : Expand smaller shapes toward largest component
25
- change_thresh_until_one : Change threshold until one connected component remains
26
- Ellipse : Generate ellipse shape descriptors
27
- get_rolling_window_coordinates_list : Get coordinates for rolling window operations
28
-
29
- """
30
- import logging
31
- from copy import deepcopy
32
- import cv2
33
- import numpy as np
34
- from numpy.typing import NDArray
35
- from typing import Tuple
36
- from scipy.spatial import KDTree
37
- from cellects.utils.decorators import njit
38
- from cellects.image_analysis.shape_descriptors import ShapeDescriptors
39
- from cellects.utils.formulas import moving_average
40
- from skimage.filters import threshold_otsu
41
- from skimage.measure import label
42
- from scipy.stats import linregress
43
- from scipy.ndimage import distance_transform_edt
44
- import matplotlib.pyplot as plt
45
-
46
-
47
- cross_33 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
48
- square_33 = np.ones((3, 3), np.uint8)
49
-
50
-
51
- class CompareNeighborsWithValue:
52
- """
53
- CompareNeighborsWithValue class to summarize each pixel by comparing its neighbors to a value.
54
-
55
- This class analyzes pixels in a 2D array, comparing each pixel's neighbors
56
- to a specified value. The comparison can be equality, superiority,
57
- or inferiority, and neighbors can be the 4 or 8 nearest pixels based on
58
- the connectivity parameter.
59
- """
60
- def __init__(self, array: np.ndarray, connectivity: int=None, data_type: np.dtype=np.int8):
61
- """
62
- Initialize a class for array connectivity processing.
63
-
64
- This class processes arrays based on given connectivities, creating
65
- windows around the original data for both 1D and 2D arrays. Depending on
66
- the connectivity value (4 or 8), it creates different windows with borders.
67
-
68
- Parameters
69
- ----------
70
- array : ndarray
71
- Input array to process, can be 1D or 2D.
72
- connectivity : int, optional
73
- Connectivity type for processing (4 or 8), by default None.
74
- data_type : dtype, optional
75
- Data type for the array elements, by default np.int8.
76
-
77
- Attributes
78
- ----------
79
- array : ndarray
80
- The processed array based on the given data type.
81
- connectivity : int
82
- Connectivity value used for processing.
83
- on_the_right : ndarray
84
- Array with shifted elements to the right.
85
- on_the_left : ndarray
86
- Array with shifted elements to the left.
87
- on_the_bot : ndarray, optional
88
- Array with shifted elements to the bottom (for 2D arrays).
89
- on_the_top : ndarray, optional
90
- Array with shifted elements to the top (for 2D arrays).
91
- on_the_topleft : ndarray, optional
92
- Array with shifted elements to the top left (for 2D arrays).
93
- on_the_topright : ndarray, optional
94
- Array with shifted elements to the top right (for 2D arrays).
95
- on_the_botleft : ndarray, optional
96
- Array with shifted elements to the bottom left (for 2D arrays).
97
- on_the_botright : ndarray, optional
98
- Array with shifted elements to the bottom right (for 2D arrays).
99
- """
100
- array = array.astype(data_type)
101
- self.array = array
102
- self.connectivity = connectivity
103
- if len(self.array.shape) == 1:
104
- self.on_the_right = np.append(array[1:], array[-1])
105
- self.on_the_left = np.append(array[0], array[:-1])
106
- else:
107
- # Build 4 window of the original array, each missing one of the four borders
108
- # Grow each window with a copy of the last border at the opposite of the side a border have been deleted
109
- if self.connectivity == 4 or self.connectivity == 8:
110
- self.on_the_right = np.column_stack((array[:, 1:], array[:, -1]))
111
- self.on_the_left = np.column_stack((array[:, 0], array[:, :-1]))
112
- self.on_the_bot = np.vstack((array[1:, :], array[-1, :]))
113
- self.on_the_top = np.vstack((array[0, :], array[:-1, :]))
114
- if self.connectivity != 4:
115
- self.on_the_topleft = array[:-1, :-1]
116
- self.on_the_topright = array[:-1, 1:]
117
- self.on_the_botleft = array[1:, :-1]
118
- self.on_the_botright = array[1:, 1:]
119
-
120
- self.on_the_topleft = np.vstack((self.on_the_topleft[0, :], self.on_the_topleft))
121
- self.on_the_topleft = np.column_stack((self.on_the_topleft[:, 0], self.on_the_topleft))
122
-
123
- self.on_the_topright = np.vstack((self.on_the_topright[0, :], self.on_the_topright))
124
- self.on_the_topright = np.column_stack((self.on_the_topright, self.on_the_topright[:, -1]))
125
-
126
- self.on_the_botleft = np.vstack((self.on_the_botleft, self.on_the_botleft[-1, :]))
127
- self.on_the_botleft = np.column_stack((self.on_the_botleft[:, 0], self.on_the_botleft))
128
-
129
- self.on_the_botright = np.vstack((self.on_the_botright, self.on_the_botright[-1, :]))
130
- self.on_the_botright = np.column_stack((self.on_the_botright, self.on_the_botright[:, -1]))
131
-
132
- def is_equal(self, value, and_itself: bool=False):
133
- """
134
- Check equality of neighboring values in an array.
135
-
136
- This method compares the neighbors of each element in `self.array` to a given value.
137
- Depending on the dimensionality and connectivity settings, it checks different neighboring
138
- elements.
139
-
140
- Parameters
141
- ----------
142
- value : int or float
143
- The value to check equality with neighboring elements.
144
- and_itself : bool, optional
145
- If True, also check equality with the element itself. Defaults to False.
146
-
147
- Returns
148
- -------
149
- None
150
-
151
- Attributes (not standard Qt properties)
152
- --------------------------------------
153
- equal_neighbor_nb : ndarray of uint8
154
- Array that holds the number of equal neighbors for each element.
155
-
156
- Examples
157
- --------
158
- >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
159
- >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
160
- >>> compare.is_equal(1)
161
- >>> print(compare.equal_neighbor_nb)
162
- [[0 0 1 0]
163
- [0 1 1 1]
164
- [0 1 1 1]
165
- [0 0 1 0]]
166
- """
167
-
168
- if len(self.array.shape) == 1:
169
- self.equal_neighbor_nb = np.sum((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value)), axis=0)
170
- else:
171
- if self.connectivity == 4:
172
- self.equal_neighbor_nb = np.dstack((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
173
- np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value)))
174
- elif self.connectivity == 8:
175
- self.equal_neighbor_nb = np.dstack(
176
- (np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
177
- np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value),
178
- np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
179
- np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
180
- else:
181
- self.equal_neighbor_nb = np.dstack(
182
- (np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
183
- np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
184
- self.equal_neighbor_nb = np.sum(self.equal_neighbor_nb, 2, dtype=np.uint8)
185
-
186
- if and_itself:
187
- self.equal_neighbor_nb[np.not_equal(self.array, value)] = 0
188
-
189
- def is_sup(self, value, and_itself=False):
190
- """
191
- Determine if pixels have more neighbors with higher values than a given threshold.
192
-
193
- This method computes the number of neighboring pixels that have values greater
194
- than a specified `value` for each pixel in the array. Optionally, it can exclude
195
- the pixel itself if its value is less than or equal to `value`.
196
-
197
- Parameters
198
- ----------
199
- value : int
200
- The threshold value used to determine if a neighboring pixel's value is greater.
201
- and_itself : bool, optional
202
- If True, exclude the pixel itself if its value is less than or equal to `value`.
203
- Defaults to False.
204
-
205
- Examples
206
- --------
207
- >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
208
- >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
209
- >>> compare.is_sup(1)
210
- >>> print(compare.sup_neighbor_nb)
211
- [[3 3 2 4]
212
- [4 2 3 3]
213
- [4 2 3 3]
214
- [3 3 2 4]]
215
- """
216
- if len(self.array.shape) == 1:
217
- self.sup_neighbor_nb = (self.on_the_right > value).astype(self.array.dtype) + (self.on_the_left > value).astype(self.array.dtype)
218
- else:
219
- if self.connectivity == 4:
220
- self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
221
- self.on_the_bot > value, self.on_the_top > value))
222
- elif self.connectivity == 8:
223
- self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
224
- self.on_the_bot > value, self.on_the_top > value,
225
- self.on_the_topleft > value, self.on_the_topright > value,
226
- self.on_the_botleft > value, self.on_the_botright > value))
227
- else:
228
- self.sup_neighbor_nb = np.dstack((self.on_the_topleft > value, self.on_the_topright > value,
229
- self.on_the_botleft > value, self.on_the_botright > value))
230
-
231
- self.sup_neighbor_nb = np.sum(self.sup_neighbor_nb, 2, dtype=np.uint8)
232
- if and_itself:
233
- self.sup_neighbor_nb[np.less_equal(self.array, value)] = 0
234
-
235
- def is_inf(self, value, and_itself=False):
236
- """
237
- is_inf(value and_itself=False)
238
-
239
- Determine the number of neighbors that are infinitely small relative to a given value,
240
- considering optional connectivity and exclusion of the element itself.
241
-
242
- Parameters
243
- ----------
244
- value : numeric
245
- The value to compare neighbor elements against.
246
- and_itself : bool, optional
247
- If True, excludes the element itself from being counted. Default is False.
248
-
249
- Examples
250
- --------
251
- >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
252
- >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
253
- >>> compare.is_inf(1)
254
- >>> print(compare.inf_neighbor_nb)
255
- [[1 1 1 0]
256
- [0 1 0 0]
257
- [0 1 0 0]
258
- [1 1 1 0]]
259
- """
260
- if len(self.array.shape) == 1:
261
- self.inf_neighbor_nb = (self.on_the_right < value).astype(self.array.dtype) + (self.on_the_left < value).astype(self.array.dtype)
262
- else:
263
- if self.connectivity == 4:
264
- self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
265
- self.on_the_bot < value, self.on_the_top < value))
266
- elif self.connectivity == 8:
267
- self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
268
- self.on_the_bot < value, self.on_the_top < value,
269
- self.on_the_topleft < value, self.on_the_topright < value,
270
- self.on_the_botleft < value, self.on_the_botright < value))
271
- else:
272
- self.inf_neighbor_nb = np.dstack((self.on_the_topleft < value, self.on_the_topright < value,
273
- self.on_the_botleft < value, self.on_the_botright < value))
274
-
275
- self.inf_neighbor_nb = np.sum(self.inf_neighbor_nb, 2, dtype=np.uint8)
276
- if and_itself:
277
- self.inf_neighbor_nb[np.greater_equal(self.array, value)] = 0
278
-
279
-
280
- def cc(binary_img: NDArray[np.uint8]) -> Tuple[NDArray, NDArray, NDArray]:
281
- """
282
- Processes a binary image to reorder and label connected components.
283
-
284
- This function takes a binary image, analyses the connected components,
285
- reorders them by size, ensures background is correctly labeled as 0,
286
- and returns the new ordered labels along with their statistics and centers.
287
-
288
- Parameters
289
- ----------
290
- binary_img : ndarray of uint8
291
- Input binary image with connected components.
292
-
293
- Returns
294
- -------
295
- new_order : ndarray of uint8, uint16 or uint32
296
- Image with reordered labels for connected components.
297
- stats : ndarray of ints
298
- Statistics for each component (x, y, width, height, area).
299
- centers : ndarray of floats
300
- Centers for each component (x, y).
301
-
302
- Examples
303
- --------
304
- >>> binary_img = np.array([[0, 1, 0], [0, 1, 0]], dtype=np.uint8)
305
- >>> new_order, stats, centers = cc(binary_img)
306
- >>> print(stats)
307
- array([[0, 0, 3, 2, 4],
308
- [1, 0, 2, 2, 2]], dtype=int32)
309
- """
310
- number, img, stats, centers = cv2.connectedComponentsWithStats(binary_img, ltype=cv2.CV_16U)
311
- if number > 255:
312
- img_dtype = np.uint16
313
- if number > 65535:
314
- img_dtype = np.uint32
315
- else:
316
- img_dtype = np.uint8
317
- stats[:, 2] = stats[:, 0] + stats[:, 2]
318
- stats[:, 3] = stats[:, 1] + stats[:, 3]
319
- sorted_idx = np.argsort(stats[:, 4])[::-1]
320
-
321
- # Make sure that the first connected component (labelled 0) is the background and not the main shape
322
- size_ranked_stats = stats[sorted_idx, :]
323
- background = (size_ranked_stats[:, 0] == 0).astype(np.uint8) + (size_ranked_stats[:, 1] == 0).astype(np.uint8) + (
324
- size_ranked_stats[:, 2] == img.shape[1]).astype(np.uint8) + (
325
- size_ranked_stats[:, 3] == img.shape[0]).astype(np.uint8)
326
-
327
- # background = ((size_ranked_stats[:, 0] == 0) & (size_ranked_stats[:, 1] == 0) & (size_ranked_stats[:, 2] == img.shape[1]) & (size_ranked_stats[:, 3] == img.shape[0]))
328
-
329
- touch_borders = np.nonzero(background > 2)[0]
330
- # if not isinstance(touch_borders, np.int64):
331
- # touch_borders = touch_borders[0]
332
- # Most of the time, the background should be the largest shape and therefore has the index 0,
333
- # Then, if there is at least one shape touching more than 2 borders and having not the index 0, solve:
334
- if np.any(touch_borders != 0):
335
- # If there is only one shape touching borders, it means that background is not at its right position (i.e. 0)
336
- if len(touch_borders) == 1:
337
- # Then exchange that shape position with background position
338
- shape = sorted_idx[0] # Store shape position in the first place
339
- back = sorted_idx[touch_borders[0]] # Store back position in the first place
340
- sorted_idx[touch_borders[0]] = shape # Put shape position at the previous place of back and conversely
341
- sorted_idx[0] = back
342
- # If there are two shapes, it means that the main shape grew sufficiently to reach at least 3 borders
343
- # We assume that it grew larger than background
344
- else:
345
- shape = sorted_idx[0]
346
- back = sorted_idx[1]
347
- sorted_idx[1] = shape
348
- sorted_idx[0] = back
349
- # Put shape position at the previous place of back and conversely
350
-
351
-
352
- stats = stats[sorted_idx, :]
353
- centers = centers[sorted_idx, :]
354
-
355
- new_order = np.zeros_like(binary_img, dtype=img_dtype)
356
-
357
- for i, val in enumerate(sorted_idx):
358
- new_order[img == val] = i
359
- return new_order, stats, centers
360
-
361
-
362
- def rounded_inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
363
- """
364
- Perform rounded inverted distance transform on a binary image.
365
-
366
- This function computes the inverse of the Euclidean distance transform,
367
- where each pixel value represents its distance to the nearest zero
368
- pixel. The operation can include erosion and will stop either at a given
369
- max distance or until no further expansion is needed.
370
-
371
- Parameters
372
- ----------
373
- original_shape : ndarray of uint8
374
- Input binary image to be processed.
375
- max_distance : int, optional
376
- Maximum distance for the expansion. If None, no limit is applied.
377
- with_erosion : int, optional
378
- Number of erosion iterations to apply before the transform. Default is 0.
379
-
380
- Returns
381
- -------
382
- out : ndarray of uint32
383
- Output image containing the rounded inverted distance transform.
384
-
385
- Examples
386
- --------
387
- >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
388
- >>> segmentation[1:3, 1:3] = 1
389
- >>> gravity = rounded_inverted_distance_transform(segmentation, max_distance=2)
390
- >>> print(gravity)
391
- [[1 2 2 1]
392
- [2 0 0 2]
393
- [2 0 0 2]
394
- [1 2 2 1]]
395
- """
396
- if with_erosion > 0:
397
- original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
398
- expand = deepcopy(original_shape)
399
- if max_distance is not None:
400
- if max_distance > np.max(original_shape.shape):
401
- max_distance = np.max(original_shape.shape).astype(np.uint32)
402
- gravity_field = np.zeros(original_shape.shape , np.uint32)
403
- for gravi in np.arange(max_distance):
404
- expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
405
- gravity_field[np.logical_xor(expand, original_shape)] += 1
406
- else:
407
- gravity_field = np.zeros(original_shape.shape , np.uint32)
408
- while np.any(np.equal(original_shape + expand, 0)):
409
- expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
410
- gravity_field[np.logical_xor(expand, original_shape)] += 1
411
- return gravity_field
412
-
413
-
414
- def inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
415
- """
416
- Calculate the distance transform around ones in a binary image, with optional erosion.
417
-
418
- This function computes the Euclidean distance transform where zero values
419
- represent the background and ones represent the foreground. Optionally,
420
- it erodes the input image before computing the distance transform, and
421
- limits distances based on a maximum value.
422
-
423
- Parameters
424
- ----------
425
- original_shape : ndarray of uint8
426
- Input binary image where ones represent the foreground.
427
- max_distance : int, optional
428
- Maximum distance value to threshold. If None (default), no thresholding is applied.
429
- with_erosion : int, optional
430
- Number of iterations for erosion. If 0 (default), no erosion is applied.
431
-
432
- Returns
433
- -------
434
- out : ndarray of uint32
435
- Distance transform array where each element represents the distance
436
- to the nearest zero value in the input image.
437
-
438
- See also
439
- --------
440
- rounded_distance_transform : less precise (outputs int) and faster for small max_distance values.
441
-
442
- Examples
443
- --------
444
- >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
445
- >>> segmentation[1:3, 1:3] = 1
446
- >>> gravity = inverted_distance_transform(segmentation, max_distance=2)
447
- >>> print(gravity)
448
- [[1. 1.41421356 1.41421356 1. ]
449
- [1.41421356 0. 0. 1.41421356]
450
- [1.41421356 0. 0. 1.41421356]
451
- [1. 1.41421356 1.41421356 1. ]]
452
- """
453
- if with_erosion:
454
- original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
455
- gravity_field = distance_transform_edt(1 - original_shape)
456
- if max_distance is not None:
457
- if max_distance > np.min(original_shape.shape) / 2:
458
- max_distance = (np.min(original_shape.shape) // 2).astype(np.uint32)
459
- gravity_field[gravity_field >= max_distance] = 0
460
- gravity_field[gravity_field > 0] = 1 + gravity_field.max() - gravity_field[gravity_field > 0]
461
- return gravity_field
462
-
463
-
464
- @njit()
465
- def get_line_points(start, end) -> NDArray[int]:
466
- """
467
- Get line points between two endpoints using Bresenham's line algorithm.
468
-
469
- This function calculates all the integer coordinate points that form a
470
- line between two endpoints using Bresenham's line algorithm. It is
471
- optimized for performance using Numba's just-in-time compilation.
472
-
473
- Parameters
474
- ----------
475
- start : tuple of int
476
- The starting point coordinates (x0, y0).
477
- end : tuple of int
478
- The ending point coordinates (x1, y1).
479
-
480
- Returns
481
- -------
482
- out : ndarray of int
483
- Array of points representing the line, with shape (N, 2), where N is
484
- the number of points on the line.
485
-
486
- Examples
487
- --------
488
- >>> start = (0, 0)
489
- >>> end = (1, 2)
490
- >>> points = get_line_points(start, end)
491
- >>> print(points)
492
- [[0 0]
493
- [0 1]
494
- [1 2]]
495
- """
496
- y0, x0 = start
497
- y1, x1 = end
498
-
499
- # Calculate differences
500
- dx = np.abs(x1 - x0)
501
- dy = np.abs(y1 - y0)
502
-
503
- # Determine step direction
504
- sx = 1 if x0 < x1 else -1
505
- sy = 1 if y0 < y1 else -1
506
-
507
- # Initialize
508
- err = dx - dy
509
- points = []
510
- x, y = x0, y0
511
-
512
- while True:
513
- points.append([y, x])
514
-
515
- # Check if we've reached the end
516
- if x == x1 and y == y1:
517
- break
518
-
519
- # Calculate error for next step
520
- e2 = 2 * err
521
-
522
- if e2 > -dy:
523
- err -= dy
524
- x += sx
525
-
526
- if e2 < dx:
527
- err += dx
528
- y += sy
529
-
530
- return np.array(points)
531
-
532
-
533
- def get_all_line_coordinates(start_point: NDArray[int], end_points: NDArray[int]) -> NDArray[int]:
534
- """
535
- Get all line coordinates between start point and end points.
536
-
537
- This function computes the coordinates of lines connecting a
538
- start point to multiple end points, converting input arrays to float
539
- if necessary before processing.
540
-
541
- Parameters
542
- ----------
543
- start_point : NDArray[float]
544
- Starting coordinate point for the lines. Can be of any numeric type,
545
- will be converted to float if needed.
546
- end_points : NDArray[float]
547
- Array of end coordinate points for the lines. Can be of any
548
- numeric type, will be converted to float if needed.
549
-
550
- Returns
551
- -------
552
- out : List[NDArray[int]]
553
- A list of numpy arrays containing the coordinates of each line
554
- as integer values.
555
-
556
- Examples
557
- --------
558
- >>> start_point = np.array([0, 0])
559
- >>> end_points = np.array([[1, 2], [3, 4]])
560
- >>> get_all_line_coordinates(start_point, end_points)
561
- [array([[0, 0],
562
- [0, 1],
563
- [1, 2]], dtype=uint64), array([[0, 0],
564
- [1, 1],
565
- [1, 2],
566
- [2, 3],
567
- [3, 4]], dtype=uint64)]
568
- """
569
- lines = []
570
- for end_point in end_points:
571
- line_coords = get_line_points(start_point, end_point)
572
- lines.append(np.array(line_coords, dtype=np.uint64))
573
- return lines
574
-
575
-
576
- def draw_me_a_sun(main_shape: NDArray, ray_length_coef=4) -> Tuple[NDArray, NDArray]:
577
- """
578
- Draw a sun-shaped pattern on an image based on the main shape and ray length coefficient.
579
-
580
- This function takes an input binary image (main_shape) and draws sun rays
581
- from the perimeter of that shape. The length of the rays is controlled by a coefficient.
582
- The function ensures that rays do not extend beyond the image borders.
583
-
584
- Parameters
585
- ----------
586
- main_shape : ndarray of bool or int
587
- Binary input image where the main shape is defined.
588
- ray_length_coef : float, optional
589
- Coefficient to control the length of sun rays. Defaults to 2.
590
-
591
- Returns
592
- -------
593
- rays : ndarray
594
- Indices of the rays drawn.
595
- sun : ndarray
596
- Image with sun rays drawn on it.
597
-
598
- Examples
599
- --------
600
- >>> main_shape = np.zeros((10, 10), dtype=np.uint8)
601
- >>> main_shape[4:7, 3:6] = 1
602
- >>> rays, sun = draw_me_a_sun(main_shape)
603
- >>> print(sun)
604
-
605
- """
606
- nb, shapes, stats, center = cv2.connectedComponentsWithStats(main_shape)
607
- sun = np.zeros(main_shape.shape, np.uint32)
608
- rays = []
609
- r = 0
610
- for i in range(1, nb):
611
- shape_i = cv2.dilate((shapes == i).astype(np.uint8), kernel=cross_33)
612
- contours = get_contours(shape_i)
613
- first_ring_idx = np.nonzero(contours)
614
- centroid = np.round((center[i, 1], center[i, 0])).astype(np.int64)
615
- second_ring_y = centroid[0] + ((first_ring_idx[0] - centroid[0]) * ray_length_coef)
616
- second_ring_x = centroid[1] + ((first_ring_idx[1] - centroid[1]) * ray_length_coef)
617
-
618
- second_ring_y[second_ring_y < 0] = 0
619
- second_ring_x[second_ring_x < 0] = 0
620
-
621
- second_ring_y[second_ring_y > main_shape.shape[0] - 1] = main_shape.shape[0] - 1
622
- second_ring_x[second_ring_x > main_shape.shape[1] - 1] = main_shape.shape[1] - 1
623
- for j in range(len(second_ring_y)):
624
- r += 1
625
- fy, fx, sy, sx = first_ring_idx[0][j], first_ring_idx[1][j], second_ring_y[j], second_ring_x[j]
626
- line = get_line_points((fy, fx), (sy, sx))
627
- sun[line[:, 1], line[:, 0]] = r
628
- rays.append(r)
629
- return np.array(rays), sun
630
-
631
-
632
- def find_median_shape(binary_3d_matrix: NDArray[np.uint8]) -> NDArray[np.uint8]:
633
- """
634
- Find the median shape from a binary 3D matrix.
635
-
636
- This function computes the median 2D slice of a binary (0/1) 3D matrix
637
- by finding which voxels appear in at least half of the slices.
638
-
639
- Parameters
640
- ----------
641
- binary_3d_matrix : ndarray of uint8
642
- Input 3D binary matrix where each slice is a 2D array.
643
-
644
- Returns
645
- -------
646
- ndarray of uint8
647
- Median shape as a 2D binary matrix where the same voxels
648
- that appear in at least half of the input slices are set to 1.
649
-
650
- Examples
651
- --------
652
- >>> binary_3d_matrix = np.random.randint(0, 2, (10, 5, 5), dtype=np.uint8)
653
- >>> median_shape = find_median_shape(binary_3d_matrix)
654
- >>> print(median_shape)
655
- """
656
- binary_2d_matrix = np.apply_along_axis(np.sum, 0, binary_3d_matrix)
657
- median_shape = np.zeros(binary_2d_matrix.shape, dtype=np.uint8)
658
- median_shape[np.greater_equal(binary_2d_matrix, binary_3d_matrix.shape[0] // 2)] = 1
659
- return median_shape
660
-
661
-
662
- @njit()
663
- def reduce_image_size_for_speed(image_of_2_shapes: NDArray[np.uint8]) -> Tuple[Tuple, Tuple]:
664
- """
665
- Reduces the size of an image containing two shapes for faster processing.
666
-
667
- The function iteratively divides the image into quadrants and keeps only
668
- those that contain both shapes until a minimal size is reached.
669
-
670
- Parameters
671
- ----------
672
- image_of_2_shapes : ndarray of uint8
673
- The input image containing two shapes.
674
-
675
- Returns
676
- -------
677
- out : tuple of tuples
678
- The indices of the first and second shape in the reduced image.
679
-
680
- Examples
681
- --------
682
- >>> main_shape = np.zeros((10, 10), dtype=np.uint8)
683
- >>> main_shape[1:3, 1:3] = 1
684
- >>> main_shape[1:3, 4:6] = 2
685
- >>> shape1_idx, shape2_idx = reduce_image_size_for_speed(main_shape)
686
- >>> print(shape1_idx)
687
- (array([1, 1, 2, 2]), array([1, 2, 1, 2]))
688
- """
689
- sub_image = image_of_2_shapes.copy()
690
- y_size, x_size = sub_image.shape
691
- images_list = [sub_image]
692
- good_images = [0]
693
- sub_image = images_list[good_images[0]]
694
- while (len(good_images) == 1 | len(good_images) == 2) & y_size > 3 & x_size > 3:
695
- y_size, x_size = sub_image.shape
696
- images_list = []
697
- images_list.append(sub_image[:((y_size // 2) + 1), :((x_size // 2) + 1)])
698
- images_list.append(sub_image[:((y_size // 2) + 1), (x_size // 2):])
699
- images_list.append(sub_image[(y_size // 2):, :((x_size // 2) + 1)])
700
- images_list.append(sub_image[(y_size // 2):, (x_size // 2):])
701
- good_images = []
702
- for idx, image in enumerate(images_list):
703
- if np.any(image == 2):
704
- if np.any(image == 1):
705
- good_images.append(idx)
706
- if len(good_images) == 2:
707
- if good_images == [0, 1]:
708
- sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
709
- elif good_images == [0, 2]:
710
- sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
711
- elif good_images == [1, 3]:
712
- sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
713
- elif good_images == [2, 3]:
714
- sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
715
- else:
716
- pass
717
- else:
718
- sub_image = images_list[good_images[0]]
719
-
720
- shape1_idx = np.nonzero(sub_image == 1)
721
- shape2_idx = np.nonzero(sub_image == 2)
722
- return shape1_idx, shape2_idx
723
-
724
-
725
- def get_minimal_distance_between_2_shapes(image_of_2_shapes: NDArray[np.uint8], increase_speed: bool=True) -> float:
726
- """
727
- Get the minimal distance between two shapes in an image.
728
-
729
- This function calculates the minimal Euclidean distance between
730
- two different shapes represented by binary values 1 and 2 in a given image.
731
- It can optionally reduce the image size for faster processing.
732
-
733
- Parameters
734
- ----------
735
- image_of_2_shapes : ndarray of int8
736
- Binary image containing two shapes to measure distance between.
737
- increase_speed : bool, optional
738
- Flag to reduce image size for faster computation. Default is True.
739
-
740
- Returns
741
- -------
742
- min_distance : float64
743
- The minimal Euclidean distance between the two shapes.
744
-
745
- Examples
746
- --------
747
- >>> import numpy as np
748
- >>> image = np.array([[1, 0], [0, 2]])
749
- >>> distance = get_minimal_distance_between_2_shapes(image)
750
- >>> print(distance)
751
- expected output
752
- """
753
- if increase_speed:
754
- shape1_idx, shape2_idx = reduce_image_size_for_speed(image_of_2_shapes)
755
- else:
756
- shape1_idx, shape2_idx = np.nonzero(image_of_2_shapes == 1), np.nonzero(image_of_2_shapes == 2)
757
- t = KDTree(np.transpose(shape1_idx))
758
- dists, nns = t.query(np.transpose(shape2_idx), 1)
759
- return np.min(dists)
760
-
761
-
762
- def find_major_incline(vector: NDArray, natural_noise: float) -> Tuple[int, int]:
763
- """
764
- Find the major incline section in a vector.
765
-
766
- This function identifies the segment of a vector that exhibits
767
- the most significant change in values, considering a specified
768
- natural noise level. It returns the left and right indices that
769
- define this segment.
770
-
771
- Parameters
772
- ----------
773
- vector : ndarray of float64
774
- Input data vector where the incline needs to be detected.
775
- natural_noise : float
776
- The acceptable noise level for determining the incline.
777
-
778
- Returns
779
- -------
780
- Tuple[int, int]
781
- A tuple containing two integers: the left and right indices
782
- of the major incline section in the vector.
783
-
784
- Examples
785
- --------
786
- >>> vector = np.array([3, 5, 7, 9, 10])
787
- >>> natural_noise = 2.5
788
- >>> left, right = find_major_incline(vector, natural_noise)
789
- >>> (left, right)
790
- (0, 1)
791
- """
792
- left = 0
793
- right = 1
794
- ref_length = np.max((5, 2 * natural_noise))
795
- vector = moving_average(vector, 5)
796
- ref_extent = np.ptp(vector)
797
- extent = ref_extent
798
- # Find the left limit:
799
- while len(vector) > ref_length and extent > (ref_extent - (natural_noise / 4)):
800
- vector = vector[1:]
801
- extent = np.ptp(vector)
802
- left += 1
803
- # And the right one:
804
- extent = ref_extent
805
- while len(vector) > ref_length and extent > (ref_extent - natural_noise / 2):
806
- vector = vector[:-1]
807
- extent = np.ptp(vector)
808
- right += 1
809
- # And the left again, with stronger stringency:
810
- extent = ref_extent
811
- while len(vector) > ref_length and extent > (ref_extent - natural_noise):
812
- vector = vector[1:]
813
- extent = np.ptp(vector)
814
- left += 1
815
- # When there is no incline, put back left and right to 0
816
- if len(vector) <= ref_length:
817
- left = 0
818
- right = 1
819
- return left, right
820
-
821
-
822
- def rank_from_top_to_bottom_from_left_to_right(binary_image: NDArray[np.uint8], y_boundaries: NDArray[int], get_ordered_image: bool=False) -> Tuple:
823
- """
824
- Rank components in a binary image from top to bottom and from left to right.
825
-
826
- This function processes a binary image to rank its components based on
827
- their centroids. It first sorts the components row by row and then orders them
828
- within each row from left to right. If the ordering fails, it attempts an alternative
829
- algorithm and returns the ordered statistics and centroids.
830
-
831
- Parameters
832
- ----------
833
- binary_image : ndarray of uint8
834
- The input binary image to process.
835
- y_boundaries : ndarray of int
836
- Boundary information for the y-coordinates.
837
- get_ordered_image : bool, optional
838
- If True, returns an ordered image in addition to the statistics and centroids.
839
- Default is False.
840
-
841
- Returns
842
- -------
843
- tuple
844
- If `get_ordered_image` is True, returns a tuple containing:
845
- - ordered_stats : ndarray of int
846
- Statistics for the ordered components.
847
- - ordered_centroids : ndarray of float64
848
- Centroids for the ordered components.
849
- - ordered_image : ndarray of uint8
850
- The binary image with ordered component labels.
851
-
852
- If `get_ordered_image` is False, returns a tuple containing:
853
- - ordered_stats : ndarray of int
854
- Statistics for the ordered components.
855
- - ordered_centroids : ndarray of float64
856
- Centroids for the ordered components.
857
- """
858
- nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
859
- connectivity=8)
860
-
861
- centroids = centroids[1:, :]
862
- final_order = np.zeros(centroids.shape[0], dtype=np.uint8)
863
- sorted_against_y = np.argsort(centroids[:, 1])
864
- # row_nb = (y_boundaries == 1).sum()
865
- row_nb = np.max(((y_boundaries == 1).sum(), (y_boundaries == - 1).sum()))
866
- component_per_row = int(np.ceil((nb_components - 1) / row_nb))
867
- for row_i in range(row_nb):
868
- row_i_start = row_i * component_per_row
869
- if row_i == (row_nb - 1):
870
- sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:], 0])
871
- final_order[row_i_start:] = sorted_against_y[row_i_start:][sorted_against_x]
872
- else:
873
- row_i_end = (row_i + 1) * component_per_row
874
- sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:row_i_end], 0])
875
- final_order[row_i_start:row_i_end] = sorted_against_y[row_i_start:row_i_end][sorted_against_x]
876
- ordered_centroids = centroids[final_order, :]
877
- ordered_stats = stats[1:, :]
878
- ordered_stats = ordered_stats[final_order, :]
879
-
880
- # If it fails, use another algo
881
- if (final_order == 0).sum() > 1:
882
- nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
883
- connectivity=8)
884
- # First order according to x: from left to right
885
- # Remove the background and order centroids along x axis
886
- centroids = centroids[1:, :]
887
- x_order = np.argsort(centroids[:, 0])
888
- centroids = centroids[x_order, :]
889
-
890
-
891
- # Then use the boundaries of each Y peak to sort these shapes row by row
892
- if y_boundaries is not None:
893
- binary_image = deepcopy(output)
894
- binary_image[np.nonzero(binary_image)] = 1
895
- y_starts, y_ends = np.argwhere(y_boundaries == - 1), np.argwhere(y_boundaries == 1)
896
-
897
- margins_ci = np.array((0.5, 0.4, 0.3, 0.2, 0.1))
898
- for margin in margins_ci:
899
- ranking_success: bool = True
900
- y_order = np.zeros(centroids.shape[0], dtype=np.uint8)
901
- count: np.uint8 = 0
902
- y_margins = (y_ends - y_starts) * margin# 0.3
903
- # Loop and try to fill each row with all components, fail if the final number is wrong
904
- for y_interval in np.arange(len(y_starts)):
905
- for patch_i in np.arange(nb_components - 1):
906
- # Compare the y coordinate of the centroid with the detected y intervals with
907
- # an added margin in order to order coordinates
908
- if np.logical_and(centroids[patch_i, 1] >= (y_starts[y_interval] - y_margins[y_interval]),
909
- centroids[patch_i, 1] <= (y_ends[y_interval] + y_margins[y_interval])):
910
- try:
911
- y_order[count] = patch_i
912
- count = count + 1
913
- except IndexError as exc:
914
- ranking_success = False
915
-
916
- if ranking_success:
917
- break
918
- else:
919
- ranking_success = False
920
- # if that all tested margins failed, do not rank_from_top_to_bottom_from_left_to_right, i.e. keep automatic ranking
921
- if not ranking_success:
922
- y_order = np.arange(centroids.shape[0])
923
-
924
-
925
- # Second order according to y: from top to bottom
926
- ordered_centroids = centroids[y_order, :]
927
- ordered_stats = stats[1:, :]
928
- ordered_stats = ordered_stats[x_order, :]
929
- ordered_stats = ordered_stats[y_order, :]
930
-
931
- if get_ordered_image:
932
- ordered_image = np.zeros(binary_image.shape, dtype=np.uint8)
933
- for patch_j in np.arange(centroids.shape[0]):
934
- sub_output = output[ordered_stats[patch_j, 1]: (ordered_stats[patch_j, 1] + ordered_stats[patch_j, 3]), ordered_stats[patch_j, 0]: (ordered_stats[patch_j, 0] + ordered_stats[patch_j, 2])]
935
- sub_output = np.sort(np.unique(sub_output))
936
- if len(sub_output) == 1:
937
- ordered_image[output == sub_output[0]] = patch_j + 1
938
- else:
939
- ordered_image[output == sub_output[1]] = patch_j + 1
940
-
941
-
942
- return ordered_stats, ordered_centroids, ordered_image
943
- else:
944
- return ordered_stats, ordered_centroids
945
-
946
-
947
- def get_largest_connected_component(segmentation: NDArray[np.uint8]) -> Tuple[np.int64, NDArray[bool]]:
948
- """
949
- Find the largest connected component in a segmentation image.
950
-
951
- This function labels all connected components in a binary
952
- segmentation image, determines the size of each component,
953
- and returns information about the largest connected component.
954
-
955
- Parameters
956
- ----------
957
- segmentation : ndarray of uint8
958
- Binary segmentation image where different integer values represent
959
- different connected components.
960
-
961
- Returns
962
- -------
963
- Tuple[int, ndarray of bool]
964
- A tuple containing:
965
- - The size of the largest connected component.
966
- - A boolean mask representing the largest connected
967
- component in the input segmentation image.
968
-
969
- Examples
970
- --------
971
- >>> segmentation = np.zeros((10, 10), dtype=np.uint8)
972
- >>> segmentation[2:6, 2:5] = 1
973
- >>> segmentation[6:9, 6:9] = 1
974
- >>> size, mask = get_largest_connected_component(segmentation)
975
- >>> print(size)
976
- 12
977
- """
978
- labels = label(segmentation)
979
- assert(labels.max() != 0) # assume at least 1 CC
980
- con_comp_sizes = np.bincount(labels.flat)[1:]
981
- largest_idx = np.argmax(con_comp_sizes)
982
- largest_connected_component = labels == largest_idx + 1
983
- return con_comp_sizes[largest_idx], largest_connected_component
984
-
985
-
986
- def expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand: NDArray[np.uint8], without_shape_i: NDArray[np.uint8],
987
- shape_original_centroid: NDArray,
988
- ref_centroids: NDArray, kernel: NDArray) -> NDArray[np.uint8]:
989
- """
990
- Expand a shape until its neighbor's centroid is closer than its own.
991
-
992
- This function takes in several numpy arrays representing shapes and their
993
- centroids, and expands the input shape until the distance to the nearest
994
- neighboring centroid is less than or equal to the distance between the shape's
995
- contour and its own centroid.
996
-
997
- Parameters
998
- ----------
999
- shape_to_expand : ndarray of uint8
1000
- The binary shape to be expanded.
1001
- without_shape_i : ndarray of uint8
1002
- A binary array representing the area without the shape.
1003
- shape_original_centroid : ndarray
1004
- The centroid of the original shape.
1005
- ref_centroids : ndarray
1006
- Reference centroids to compare distances with.
1007
- kernel : ndarray
1008
- The kernel for dilation operation.
1009
-
1010
- Returns
1011
- -------
1012
- ndarray of uint8
1013
- The expanded shape.
1014
- """
1015
-
1016
- without_shape = deepcopy(without_shape_i)
1017
- # Calculate the distance between the focal shape centroid and its 10% nearest neighbor centroids
1018
- centroid_distances = np.sqrt(np.square(ref_centroids[1:, 0] - shape_original_centroid[0]) + np.square(
1019
- ref_centroids[1:, 1] - shape_original_centroid[1]))
1020
- nearest_shapes = np.where(np.greater(np.quantile(centroid_distances, 0.1), centroid_distances))[0]
1021
-
1022
- # Use the nearest neighbor distance as a maximal reference to get the minimal distance between the border of the shape and the neighboring centroids
1023
- neighbor_mindist = np.min(centroid_distances)
1024
- idx = np.nonzero(shape_to_expand)
1025
- for shape_j in nearest_shapes:
1026
- neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1027
- np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1028
- neighbor_mindist *= 0.5
1029
- # Get the maximal distance of the focal shape between its contour and its centroids
1030
- itself_maxdist = np.max(
1031
- np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1032
-
1033
- # Put 1 at the border of the reference image in order to be able to stop the while loop once border reached
1034
- without_shape[0, :] = 1
1035
- without_shape[:, 0] = 1
1036
- without_shape[without_shape.shape[0] - 1, :] = 1
1037
- without_shape[:, without_shape.shape[1] - 1] = 1
1038
-
1039
- # Compare the distance between the contour of the shape and its centroid with this contout with the centroids of neighbors
1040
- # Continue as the distance made by the shape (from its centroid) keeps being smaller than its distance with the nearest centroid.
1041
- previous_shape_to_expand = deepcopy(shape_to_expand)
1042
- while np.logical_and(np.any(np.less_equal(itself_maxdist, neighbor_mindist)),
1043
- np.count_nonzero(shape_to_expand * without_shape) == 0):
1044
- previous_shape_to_expand = deepcopy(shape_to_expand)
1045
- # Dilate the shape by the kernel size
1046
- shape_to_expand = cv2.dilate(shape_to_expand, kernel, iterations=1,
1047
- borderType=cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED)
1048
- # Extract the new connected component
1049
- shape_nb, shape_to_expand = cv2.connectedComponents(shape_to_expand, ltype=cv2.CV_16U)
1050
- shape_to_expand = shape_to_expand.astype(np.uint8)
1051
- # Use the nex shape coordinates to calculate the new distances of the shape with its centroid and with neighboring centroids
1052
- idx = np.nonzero(shape_to_expand)
1053
- for shape_j in nearest_shapes:
1054
- neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1055
- np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1056
- itself_maxdist = np.max(
1057
- np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1058
- return previous_shape_to_expand
1059
-
1060
-
1061
- def image_borders(dimensions: tuple, shape: str="rectangular") -> NDArray[np.uint8]:
1062
- """
1063
- Create an image with borders, either rectangular or circular.
1064
-
1065
- Parameters
1066
- ----------
1067
- dimensions : tuple
1068
- The dimensions of the image (height, width).
1069
- shape : str, optional
1070
- The shape of the borders. Options are "rectangular" or "circular".
1071
- Defaults to "rectangular".
1072
-
1073
- Returns
1074
- -------
1075
- out : ndarray of uint8
1076
- The image with borders. If the shape is "circular", an ellipse border;
1077
- if "rectangular", a rectangular border.
1078
-
1079
- Examples
1080
- --------
1081
- >>> borders = image_borders((3, 3), "rectangular")
1082
- >>> print(borders)
1083
- [[0 0 0]
1084
- [0 1 0]
1085
- [0 0 0]]
1086
- """
1087
- if shape == "circular":
1088
- borders = Ellipse(dimensions).create()
1089
- img_contours = image_borders(dimensions)
1090
- borders = borders * img_contours
1091
- else:
1092
- borders = np.ones(dimensions, dtype=np.uint8)
1093
- borders[0, :] = 0
1094
- borders[:, 0] = 0
1095
- borders[- 1, :] = 0
1096
- borders[:, - 1] = 0
1097
- return borders
1098
-
1099
-
1100
- def get_radius_distance_against_time(binary_video: NDArray[np.uint8], field) -> Tuple[NDArray[np.float32], int, int]:
1101
- """
1102
- Calculate the radius distance against time from a binary video and field.
1103
-
1104
- This function computes the change in radius distances over time
1105
- by analyzing a binary video and mapping it to corresponding field values.
1106
-
1107
- Parameters
1108
- ----------
1109
- binary_video : ndarray of uint8
1110
- Binary video data.
1111
- field : ndarray
1112
- Field values to analyze the radius distances against.
1113
-
1114
- Returns
1115
- -------
1116
- distance_against_time : ndarray of float32
1117
- Radius distances over time.
1118
- time_start : int
1119
- Starting time index where the radius distance measurement begins.
1120
- time_end : int
1121
- Ending time index where the radius distance measurement ends.
1122
-
1123
- Examples
1124
- --------
1125
- >>> binary_video = np.ones((10, 5, 5), dtype=np.uint8)
1126
-
1127
- >>> distance_against_time, time_start, time_end = get_radius_distance_against_time(binary_video, field)
1128
- """
1129
- pixel_start = np.max(field[field > 0])
1130
- pixel_end = np.min(field[field > 0])
1131
- time_span = np.arange(binary_video.shape[0])
1132
- time_start = 0
1133
- time_end = time_span[-1]
1134
- start_not_found: bool = True
1135
- for t in time_span:
1136
- if start_not_found:
1137
- if np.any((field == pixel_start) * binary_video[t, :, :]):
1138
- start_not_found = False
1139
- time_start = t
1140
- if np.any((field == pixel_end) * binary_video[t, :, :]):
1141
- time_end = t
1142
- break
1143
- distance_against_time = np.linspace(pixel_start, pixel_end, (time_end - time_start + 1))
1144
- distance_against_time = np.round(distance_against_time).astype(np.float32)
1145
- return distance_against_time, time_start, time_end
1146
-
1147
-
1148
- def close_holes(binary_img: NDArray[np.uint8]) -> NDArray[np.uint8]:
1149
- """
1150
- Close holes in a binary image using connected components analysis.
1151
-
1152
- This function identifies and closes small holes within the foreground objects of a binary image. It uses connected component analysis to find and fill holes that are smaller than the main object.
1153
-
1154
- Parameters
1155
- ----------
1156
- binary_img : ndarray of uint8
1157
- Binary input image where holes need to be closed.
1158
-
1159
- Returns
1160
- -------
1161
- out : ndarray of uint8
1162
- Binary image with closed holes.
1163
-
1164
- Examples
1165
- --------
1166
- >>> binary_img = np.zeros((10, 10), dtype=np.uint8)
1167
- >>> binary_img[2:8, 2:8] = 1
1168
- >>> binary_img[4:6, 4:6] = 0 # Creating a hole
1169
- >>> result = close_holes(binary_img)
1170
- >>> print(result)
1171
- [[0 0 0 0 0 0 0 0 0 0]
1172
- [0 0 0 0 0 0 0 0 0 0]
1173
- [0 0 1 1 1 1 1 1 0 0]
1174
- [0 0 1 1 1 1 1 1 0 0]
1175
- [0 0 1 1 1 1 1 1 0 0]
1176
- [0 0 1 1 1 1 1 1 0 0]
1177
- [0 0 1 1 1 1 1 1 0 0]
1178
- [0 0 1 1 1 1 1 1 0 0]
1179
- [0 0 0 0 0 0 0 0 0 0]
1180
- [0 0 0 0 0 0 0 0 0 0]]
1181
- """
1182
- #### Third version ####
1183
- nb, new_order = cv2.connectedComponents(1 - binary_img)
1184
- if nb > 2:
1185
- binary_img[new_order > 1] = 1
1186
- return binary_img
1187
-
1188
-
1189
- def dynamically_expand_to_fill_holes(binary_video: NDArray[np.uint8], holes: NDArray[np.uint8]) -> Tuple[NDArray[np.uint8], int, NDArray[np.float32]]:
1190
- """
1191
- Fill the holes in a binary video by progressively expanding the shape made of ones.
1192
-
1193
- Parameters
1194
- ----------
1195
- binary_video : ndarray of uint8
1196
- The binary video where holes need to be filled.
1197
- holes : ndarray of uint8
1198
- Array representing the holes in the binary video.
1199
-
1200
- Returns
1201
- -------
1202
- out : tuple of ndarray of uint8, int, and ndarray of float32
1203
- The modified binary video with filled holes,
1204
- the end time when all holes are filled, and
1205
- an array of distances against time used to fill the holes.
1206
-
1207
- Examples
1208
- --------
1209
- >>> binary_video = np.zeros((10, 640, 480), dtype=np.uint8)
1210
- >>> binary_video[:, 300:400, 220:240] = 1
1211
- >>> holes = np.zeros((640, 480), dtype=np.uint8)
1212
- >>> holes[340:360, 228:232] = 1
1213
- >>> filled_video, end_time, distances = dynamically_expand_to_fill_holes(binary_video, holes)
1214
- >>> print(filled_video.shape) # Should print (10, 640, 480)
1215
- (10, 640, 480)
1216
- """
1217
- #first move should be the time at wich the first pixel hole could have been covered
1218
- #it should ask how much time the shape made to cross a distance long enough to overlap all holes
1219
- holes_contours = cv2.dilate(holes, cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1220
- field = rounded_inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1221
- field2 = inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1222
- holes_contours = holes_contours * field * binary_video[- 1, :, :]
1223
- holes[np.nonzero(holes)] = field[np.nonzero(holes)]
1224
- if np.any(holes_contours):
1225
- # Find the relationship between distance and time
1226
- distance_against_time, holes_time_start, holes_time_end = get_radius_distance_against_time(binary_video, holes_contours)
1227
- # Use that vector to progressively fill holes at the same speed as shape grows
1228
- for t in np.arange(len(distance_against_time)):
1229
- new_order, stats, centers = cc((holes >= distance_against_time[t]).astype(np.uint8))
1230
- for comp_i in np.arange(1, stats.shape[0]):
1231
- past_image = deepcopy(binary_video[holes_time_start + t, :, :])
1232
- with_new_comp = new_order == comp_i
1233
- past_image[with_new_comp] = 1
1234
- nb_comp, image_garbage = cv2.connectedComponents(past_image)
1235
- if nb_comp == 2:
1236
- binary_video[holes_time_start + t, :, :][with_new_comp] = 1
1237
- # Make sure that holes remain filled from holes_time_end to the end of the video
1238
- for t in np.arange((holes_time_end + 1), binary_video.shape[0]):
1239
- past_image = binary_video[t, :, :]
1240
- past_image[holes >= distance_against_time[-1]] = 1
1241
- binary_video[t, :, :] = past_image
1242
- else:
1243
- holes_time_end = None
1244
- distance_against_time = np.array([1, 2], dtype=np.float32)
1245
-
1246
- return binary_video, holes_time_end, distance_against_time
1247
-
1248
-
1249
- class Ellipse:
1250
- """
1251
- Create an ellipse with given vertical and horizontal sizes.
1252
-
1253
- This class represents an ellipse defined by its vertical and horizontal
1254
- dimensions. It provides methods to check if a point lies within the ellipse
1255
- and to generate a 2D array representing the ellipse shape.
1256
- """
1257
- def __init__(self, sizes):
1258
- """
1259
- Initialize the object with given vertical and horizontal sizes.
1260
-
1261
- Parameters
1262
- ----------
1263
- sizes : list or tuple of int, length 2
1264
- List containing two integers representing vertical and horizontal sizes.
1265
-
1266
- Attributes
1267
- ----------
1268
- vsize : int
1269
- Vertical size of the object.
1270
- hsize : int
1271
- Horizontal size of the object.
1272
- vr : int
1273
- Half of the horizontal size.
1274
- hr : int
1275
- Half of the vertical size.
1276
- """
1277
- self.vsize = sizes[0]
1278
- self.hsize = sizes[1]
1279
- self.vr = self.hsize // 2
1280
- self.hr = self.vsize // 2
1281
-
1282
- def ellipse_fun(self, x, y):
1283
- """
1284
- Check if a point (x,y) lies within or on the ellipse.
1285
-
1286
- This function checks if a given point lies inside or on the boundary
1287
- of an ellipse defined by its horizontal radius (`self.hr`) and vertical
1288
- radius (`self.vr`). The center of the ellipse is at (0, 0).
1289
-
1290
- Parameters
1291
- ----------
1292
- x : float
1293
- The x-coordinate of the point to be checked.
1294
- y : float
1295
- The y-coordinate of the point to be checked.
1296
-
1297
- Returns
1298
- -------
1299
- bool
1300
- True if the point (x, y) lies within or on the ellipse; False otherwise.
1301
-
1302
- """
1303
- return (((x - self.hr) ** 2) / (self.hr ** 2)) + (((y - self.vr) ** 2) / (self.vr ** 2)) <= 1
1304
-
1305
- def create(self) -> NDArray:
1306
- """
1307
- Create a 2D array representing an ellipse.
1308
-
1309
- This method generates a NumPy array where each element is determined by
1310
- the `ellipse_fun` function, which computes values based on the horizontal
1311
- and vertical sizes of the ellipse.
1312
-
1313
- Returns
1314
- -------
1315
- ndarray
1316
- A 2D NumPy array representing the ellipse shape.
1317
- """
1318
- return np.fromfunction(self.ellipse_fun, (self.vsize, self.hsize))
1319
-
1320
-
1321
- rhombus_55 = Ellipse((5, 5)).create().astype(np.uint8)
1322
-
1323
-
1324
- def get_contours(binary_image: NDArray[np.uint8]) -> NDArray[np.uint8]:
1325
- """
1326
- Find and return the contours of a binary image.
1327
-
1328
- This function erodes the input binary image using a 3x3 cross-shaped
1329
- structuring element and then subtracts the eroded image from the original to obtain the contours.
1330
-
1331
- Parameters
1332
- ----------
1333
- binary_image : ndarray of uint8
1334
- Input binary image from which to extract contours.
1335
-
1336
- Returns
1337
- -------
1338
- out : ndarray of uint8
1339
- Image containing only the contours extracted from `binary_image`.
1340
-
1341
- Examples
1342
- --------
1343
- >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1344
- >>> binary_image[2:8, 2:8] = 1
1345
- >>> result = get_contours(binary_image)
1346
- >>> print(result)
1347
- [[0 0 0 0 0 0 0 0 0 0]
1348
- [0 0 0 0 0 0 0 0 0 0]
1349
- [0 0 1 1 1 1 1 1 0 0]
1350
- [0 0 1 0 0 0 0 1 0 0]
1351
- [0 0 1 0 0 0 0 1 0 0]
1352
- [0 0 1 0 0 0 0 1 0 0]
1353
- [0 0 1 0 0 0 0 1 0 0]
1354
- [0 0 1 1 1 1 1 1 0 0]
1355
- [0 0 0 0 0 0 0 0 0 0]
1356
- [0 0 0 0 0 0 0 0 0 0]]
1357
- """
1358
- if np.all(binary_image):
1359
- contours = 1 - image_borders(binary_image.shape)
1360
- elif np.any(binary_image):
1361
- eroded_binary = cv2.erode(binary_image, cross_33)
1362
- contours = binary_image - eroded_binary
1363
- else:
1364
- contours = binary_image
1365
- return contours
1366
-
1367
-
1368
- def prepare_box_counting(binary_image: NDArray[np.uint8], min_im_side: int=128, min_mesh_side: int=8, zoom_step: int=0, contours: bool=True)-> Tuple[NDArray[np.uint8], NDArray[np.uint8]]:
1369
- """Prepare box counting parameters for image analysis.
1370
-
1371
- Prepares parameters for box counting method based on binary
1372
- image input. Adjusts image size, computes side lengths, and applies
1373
- contour extraction if specified.
1374
-
1375
- Parameters
1376
- ----------
1377
- binary_image : ndarray of uint8
1378
- Binary image for analysis.
1379
- min_im_side : int, optional
1380
- Minimum side length threshold. Default is 128.
1381
- min_mesh_side : int, optional
1382
- Minimum mesh side length. Default is 8.
1383
- zoom_step : int, optional
1384
- Zoom step for side lengths computation. Default is 0.
1385
- contours : bool, optional
1386
- Whether to apply contour extraction. Default is True.
1387
-
1388
- Returns
1389
- -------
1390
- out : tuple of ndarray of uint8, ndarray (or None)
1391
- Cropped binary image and computed side lengths.
1392
-
1393
- Examples
1394
- --------
1395
- >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1396
- >>> binary_image[2:4, 2:6] = 1
1397
- >>> binary_image[7:9, 4:7] = 1
1398
- >>> binary_image[4:7, 5] = 1
1399
- >>> cropped_img, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1400
- >>> print(cropped_img), print(side_lengths)
1401
- [[0 0 0 0 0 0 0]
1402
- [0 1 1 1 1 0 0]
1403
- [0 1 1 1 1 0 0]
1404
- [0 0 0 0 1 0 0]
1405
- [0 0 0 0 1 0 0]
1406
- [0 0 0 0 1 0 0]
1407
- [0 0 0 1 0 1 0]
1408
- [0 0 0 1 1 1 0]
1409
- [0 0 0 0 0 0 0]]
1410
- [4 2]
1411
- """
1412
- side_lengths = None
1413
- zoomed_binary = binary_image
1414
- binary_idx = np.nonzero(binary_image)
1415
- if binary_idx[0].size:
1416
- min_y = np.min(binary_idx[0])
1417
- min_y = np.max((min_y - 1, 0))
1418
-
1419
- min_x = np.min(binary_idx[1])
1420
- min_x = np.max((min_x - 1, 0))
1421
-
1422
- max_y = np.max(binary_idx[0])
1423
- max_y = np.min((max_y + 1, binary_image.shape[0] - 1))
1424
-
1425
- max_x = np.max(binary_idx[1])
1426
- max_x = np.min((max_x + 1, binary_image.shape[1] - 1))
1427
-
1428
- zoomed_binary = deepcopy(binary_image[min_y:(max_y + 1), min_x: (max_x + 1)])
1429
- min_side = np.min(zoomed_binary.shape)
1430
- if min_side >= min_im_side:
1431
- if contours:
1432
- eroded_zoomed_binary = cv2.erode(zoomed_binary, cross_33)
1433
- zoomed_binary = zoomed_binary - eroded_zoomed_binary
1434
- if zoom_step == 0:
1435
- max_power = int(np.floor(np.log2(min_side))) # Largest integer power of 2
1436
- side_lengths = 2 ** np.arange(max_power, int(np.log2(min_mesh_side // 2)), -1)
1437
- else:
1438
- side_lengths = np.arange(min_mesh_side, min_side, zoom_step)
1439
- return zoomed_binary, side_lengths
1440
-
1441
-
1442
- def box_counting_dimension(zoomed_binary: NDArray[np.uint8], side_lengths: NDArray, display: bool=False) -> Tuple[float, float, float]:
1443
- """
1444
- Box counting dimension calculation.
1445
-
1446
- This function calculates the box-counting dimension of a binary image by analyzing the number
1447
- of boxes (of varying sizes) that contain at least one pixel of the image. The function also
1448
- provides the R-squared value from linear regression and the number of boxes used.
1449
-
1450
- Parameters
1451
- ----------
1452
- zoomed_binary : NDArray[np.uint8]
1453
- Binary image (0 or 255 values) for which the box-counting dimension is calculated.
1454
- side_lengths : NDArray
1455
- Array of side lengths for the boxes used in the box-counting calculation.
1456
- display : bool, optional
1457
- If True, displays a scatter plot of the log-transformed box counts and diameters,
1458
- along with the linear regression fit. Default is False.
1459
-
1460
- Returns
1461
- -------
1462
- out : Tuple[float, float, float]
1463
- A tuple containing the calculated box-counting dimension (`d`), R-squared value (`r_value`),
1464
- and the number of boxes used (`box_nb`).
1465
-
1466
- Examples
1467
- --------
1468
- >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1469
- >>> binary_image[2:4, 2:6] = 1
1470
- >>> binary_image[7:9, 4:7] = 1
1471
- >>> binary_image[4:7, 5] = 1
1472
- >>> zoomed_binary, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1473
- >>> dimension, r_value, box_nb = box_counting_dimension(zoomed_binary, side_lengths)
1474
- >>> print(dimension, r_value, box_nb)
1475
- (np.float64(1.1699250014423126), np.float64(0.9999999999999998), 2)
1476
- """
1477
- dimension:float = 0.
1478
- r_value:float = 0.
1479
- box_nb:float = 0.
1480
- if side_lengths is not None:
1481
- box_counts = np.zeros(len(side_lengths), dtype=np.uint64)
1482
- # Loop through side_lengths and compute block counts
1483
- for idx, side_length in enumerate(side_lengths):
1484
- S = np.add.reduceat(
1485
- np.add.reduceat(zoomed_binary, np.arange(0, zoomed_binary.shape[0], side_length), axis=0),
1486
- np.arange(0, zoomed_binary.shape[1], side_length),
1487
- axis=1
1488
- )
1489
- box_counts[idx] = len(np.where(S > 0)[0])
1490
-
1491
- valid_indices = box_counts > 0
1492
- if valid_indices.sum() >= 2:
1493
- log_box_counts = np.log(box_counts)
1494
- log_reciprocal_lengths = np.log(1 / side_lengths)
1495
- slope, intercept, r_value, p_value, stderr = linregress(log_reciprocal_lengths, log_box_counts)
1496
- # coefficients = np.polyfit(log_reciprocal_lengths, log_box_counts, 1)
1497
- dimension = slope
1498
- box_nb = len(side_lengths)
1499
- if display:
1500
- plt.scatter(log_reciprocal_lengths, log_box_counts, label="Box counting")
1501
- plt.plot([0, log_reciprocal_lengths.min()], [intercept, intercept + slope * log_reciprocal_lengths.min()], label="Linear regression")
1502
- plt.plot([], [], ' ', label=f"D = {slope:.2f}")
1503
- plt.plot([], [], ' ', label=f"R2 = {r_value:.6f}")
1504
- plt.plot([], [], ' ', label=f"p-value = {p_value:.2e}")
1505
- plt.legend(loc='best')
1506
- plt.xlabel(f"log(1/Diameter) | Diameter ⊆ [{side_lengths[0]}:{side_lengths[-1]}] (n={box_nb})")
1507
- plt.ylabel(f"log(Box number) | Box number ⊆ [{box_counts[0]}:{box_counts[-1]}]")
1508
- plt.show()
1509
- # plt.close()
1510
-
1511
- return dimension, r_value, box_nb
1512
-
1513
-
1514
- def keep_shape_connected_with_ref(all_shapes: NDArray[np.uint8], reference_shape: NDArray[np.uint8]) -> NDArray[np.uint8]:
1515
- """
1516
- Keep shape connected with reference.
1517
-
1518
- This function analyzes the connected components of a binary image represented by `all_shapes`
1519
- and returns the first component that intersects with the `reference_shape`.
1520
- If no such component is found, it returns None.
1521
-
1522
- Parameters
1523
- ----------
1524
- all_shapes : ndarray of uint8
1525
- Binary image containing all shapes to analyze.
1526
- reference_shape : ndarray of uint8
1527
- Binary reference shape used for intersection check.
1528
-
1529
- Returns
1530
- -------
1531
- out : ndarray of uint8 or None
1532
- The first connected component that intersects with the reference shape,
1533
- or None if no such component is found.
1534
-
1535
- Examples
1536
- -------
1537
- >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
1538
- >>> reference_shape = np.zeros((5, 5), dtype=np.uint8)
1539
- >>> reference_shape[3, 3] = 1
1540
- >>> all_shapes[0:2, 0:2] = 1
1541
- >>> all_shapes[3:4, 3:4] = 1
1542
- >>> res = keep_shape_connected_with_ref(all_shapes, reference_shape)
1543
- >>> print(res)
1544
- [[0 0 0 0 0]
1545
- [0 0 0 0 0]
1546
- [0 0 0 0 0]
1547
- [0 0 0 1 0]
1548
- [0 0 0 0 0]]
1549
- """
1550
- number, order = cv2.connectedComponents(all_shapes, ltype=cv2.CV_16U)
1551
- expanded_shape = None
1552
- if number > 1:
1553
- for i in np.arange(1, number):
1554
- expanded_shape_test = np.zeros(order.shape, np.uint8)
1555
- expanded_shape_test[order == i] = 1
1556
- if np.any(expanded_shape_test * reference_shape):
1557
- break
1558
- if np.any(expanded_shape_test * reference_shape):
1559
- expanded_shape = expanded_shape_test
1560
- else:
1561
- expanded_shape = reference_shape
1562
- return expanded_shape
1563
-
1564
-
1565
- @njit()
1566
- def keep_largest_shape(indexed_shapes: NDArray[np.int32]) -> NDArray[np.uint8]:
1567
- """
1568
- Keep the largest shape from an array of indexed shapes.
1569
-
1570
- This function identifies the most frequent non-zero shape in the input
1571
- array and returns a binary mask where elements matching this shape are set to 1,
1572
- and others are set to 0. The function uses NumPy's bincount to count occurrences
1573
- of each shape and assumes that the first element (index 0) is not part of any
1574
- shape classification.
1575
-
1576
- Parameters
1577
- ----------
1578
- indexed_shapes : ndarray of int32
1579
- Input array containing indexed shapes.
1580
-
1581
- Returns
1582
- -------
1583
- out : ndarray of uint8
1584
- Binary mask where the largest shape is marked as 1.
1585
-
1586
- Examples
1587
- --------
1588
- >>> indexed_shapes = np.array([0, 2, 2, 3, 1], dtype=np.int32)
1589
- >>> keep_largest_shape(indexed_shapes)
1590
- array([0, 1, 1, 0, 0], dtype=uint8)
1591
- """
1592
- label_counts = np.bincount(indexed_shapes.flatten())
1593
- largest_label = 1 + np.argmax(label_counts[1:])
1594
- return (indexed_shapes == largest_label).astype(np.uint8)
1595
-
1596
-
1597
- def keep_one_connected_component(binary_image: NDArray[np.uint8])-> NDArray[np.uint8]:
1598
- """
1599
- Keep only one connected component in a binary image.
1600
-
1601
- This function filters out all but the largest connected component in
1602
- a binary image, effectively isolating it from other noise or objects.
1603
- The function ensures the input is in uint8 format before processing.
1604
-
1605
- Parameters
1606
- ----------
1607
- binary_image : ndarray of uint8
1608
- Binary image containing one or more connected components.
1609
-
1610
- Returns
1611
- -------
1612
- ndarray of uint8
1613
- Image with only the largest connected component retained.
1614
-
1615
- Examples
1616
- -------
1617
- >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
1618
- >>> all_shapes[0:2, 0:2] = 1
1619
- >>> all_shapes[3:4, 3:4] = 1
1620
- >>> res = keep_one_connected_component(all_shapes)
1621
- >>> print(res)
1622
- [[1 1 0 0 0]
1623
- [1 1 0 0 0]
1624
- [0 0 0 0 0]
1625
- [0 0 0 0 0]
1626
- [0 0 0 0 0]]
1627
- """
1628
- if binary_image.dtype != np.uint8:
1629
- binary_image = binary_image.astype(np.uint8)
1630
- num_labels, sh = cv2.connectedComponents(binary_image)
1631
- if num_labels <= 1:
1632
- return binary_image.astype(np.uint8)
1633
- else:
1634
- return keep_largest_shape(sh)
1635
-
1
+ #!/usr/bin/env python3
2
+ """
3
+ This module provides methods to analyze and modify shapes in binary images.
4
+ It includes functions for comparing neighboring pixels, generating shape descriptors,
5
+ and performing morphological operations like expanding shapes and filling holes.
6
+
7
+ Classes
8
+ ---------
9
+ CompareNeighborsWithValue : Class to compare neighboring pixels to a specified value
10
+
11
+ Functions
12
+ ---------------
13
+ cc : Sort connected components according to size
14
+ make_gravity_field : Create a gradient field around shapes
15
+ find_median_shape : Generate median shape from multiple inputs
16
+ make_numbered_rays : Create numbered rays for analysis
17
+ CompareNeighborsWithFocal : Compare neighboring pixels to focal values
18
+ ShapeDescriptors : Generate shape descriptors using provided functions
19
+ get_radius_distance_against_time : Calculate radius distances over time
20
+ expand_until_one : Expand shapes until a single connected component remains
21
+ expand_and_rate_until_one : Expand and rate shapes until one remains
22
+ expand_until_overlap : Expand shapes until overlap occurs
23
+ dynamically_expand_to_fill_holes : Dynamically expand to fill holes in shapes
24
+ expand_smalls_toward_biggest : Expand smaller shapes toward largest component
25
+ change_thresh_until_one : Change threshold until one connected component remains
26
+ create_ellipse : Generate ellipse shape descriptors
27
+ get_rolling_window_coordinates_list : Get coordinates for rolling window operations
28
+
29
+ """
30
+ import logging
31
+ from copy import deepcopy
32
+ import cv2
33
+ import numpy as np
34
+ from numpy.typing import NDArray
35
+ from typing import Tuple
36
+ from scipy.spatial import KDTree
37
+ from scipy.spatial.distance import pdist
38
+ from cellects.utils.decorators import njit
39
+ from cellects.image_analysis.shape_descriptors import ShapeDescriptors
40
+ from cellects.utils.formulas import moving_average, bracket_to_uint8_image_contrast
41
+ from skimage.filters import threshold_otsu
42
+ from skimage.measure import label
43
+ from scipy.stats import linregress
44
+ from scipy.ndimage import distance_transform_edt
45
+ import matplotlib.pyplot as plt
46
+
47
+
48
+ cross_33 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
49
+ square_33 = np.ones((3, 3), np.uint8)
50
+
51
+
52
+ class CompareNeighborsWithValue:
53
+ """
54
+ CompareNeighborsWithValue class to summarize each pixel by comparing its neighbors to a value.
55
+
56
+ This class analyzes pixels in a 2D array, comparing each pixel's neighbors
57
+ to a specified value. The comparison can be equality, superiority,
58
+ or inferiority, and neighbors can be the 4 or 8 nearest pixels based on
59
+ the connectivity parameter.
60
+ """
61
+ def __init__(self, array: np.ndarray, connectivity: int=None, data_type: np.dtype=np.int8):
62
+ """
63
+ Initialize a class for array connectivity processing.
64
+
65
+ This class processes arrays based on given connectivities, creating
66
+ windows around the original data for both 1D and 2D arrays. Depending on
67
+ the connectivity value (4 or 8), it creates different windows with borders.
68
+
69
+ Parameters
70
+ ----------
71
+ array : ndarray
72
+ Input array to process, can be 1D or 2D.
73
+ connectivity : int, optional
74
+ Connectivity type for processing (4 or 8), by default None.
75
+ data_type : dtype, optional
76
+ Data type for the array elements, by default np.int8.
77
+
78
+ Attributes
79
+ ----------
80
+ array : ndarray
81
+ The processed array based on the given data type.
82
+ connectivity : int
83
+ Connectivity value used for processing.
84
+ on_the_right : ndarray
85
+ Array with shifted elements to the right.
86
+ on_the_left : ndarray
87
+ Array with shifted elements to the left.
88
+ on_the_bot : ndarray, optional
89
+ Array with shifted elements to the bottom (for 2D arrays).
90
+ on_the_top : ndarray, optional
91
+ Array with shifted elements to the top (for 2D arrays).
92
+ on_the_topleft : ndarray, optional
93
+ Array with shifted elements to the top left (for 2D arrays).
94
+ on_the_topright : ndarray, optional
95
+ Array with shifted elements to the top right (for 2D arrays).
96
+ on_the_botleft : ndarray, optional
97
+ Array with shifted elements to the bottom left (for 2D arrays).
98
+ on_the_botright : ndarray, optional
99
+ Array with shifted elements to the bottom right (for 2D arrays).
100
+ """
101
+ array = array.astype(data_type)
102
+ self.array = array
103
+ self.connectivity = connectivity
104
+ if len(self.array.shape) == 1:
105
+ self.on_the_right = np.append(array[1:], array[-1])
106
+ self.on_the_left = np.append(array[0], array[:-1])
107
+ else:
108
+ # Build 4 window of the original array, each missing one of the four borders
109
+ # Grow each window with a copy of the last border at the opposite of the side a border have been deleted
110
+ if self.connectivity == 4 or self.connectivity == 8:
111
+ self.on_the_right = np.column_stack((array[:, 1:], array[:, -1]))
112
+ self.on_the_left = np.column_stack((array[:, 0], array[:, :-1]))
113
+ self.on_the_bot = np.vstack((array[1:, :], array[-1, :]))
114
+ self.on_the_top = np.vstack((array[0, :], array[:-1, :]))
115
+ if self.connectivity != 4:
116
+ self.on_the_topleft = array[:-1, :-1]
117
+ self.on_the_topright = array[:-1, 1:]
118
+ self.on_the_botleft = array[1:, :-1]
119
+ self.on_the_botright = array[1:, 1:]
120
+
121
+ self.on_the_topleft = np.vstack((self.on_the_topleft[0, :], self.on_the_topleft))
122
+ self.on_the_topleft = np.column_stack((self.on_the_topleft[:, 0], self.on_the_topleft))
123
+
124
+ self.on_the_topright = np.vstack((self.on_the_topright[0, :], self.on_the_topright))
125
+ self.on_the_topright = np.column_stack((self.on_the_topright, self.on_the_topright[:, -1]))
126
+
127
+ self.on_the_botleft = np.vstack((self.on_the_botleft, self.on_the_botleft[-1, :]))
128
+ self.on_the_botleft = np.column_stack((self.on_the_botleft[:, 0], self.on_the_botleft))
129
+
130
+ self.on_the_botright = np.vstack((self.on_the_botright, self.on_the_botright[-1, :]))
131
+ self.on_the_botright = np.column_stack((self.on_the_botright, self.on_the_botright[:, -1]))
132
+
133
+ def is_equal(self, value, and_itself: bool=False):
134
+ """
135
+ Check equality of neighboring values in an array.
136
+
137
+ This method compares the neighbors of each element in `self.array` to a given value.
138
+ Depending on the dimensionality and connectivity settings, it checks different neighboring
139
+ elements.
140
+
141
+ Parameters
142
+ ----------
143
+ value : int or float
144
+ The value to check equality with neighboring elements.
145
+ and_itself : bool, optional
146
+ If True, also check equality with the element itself. Defaults to False.
147
+
148
+ Returns
149
+ -------
150
+ None
151
+
152
+ Attributes (not standard Qt properties)
153
+ --------------------------------------
154
+ equal_neighbor_nb : ndarray of uint8
155
+ Array that holds the number of equal neighbors for each element.
156
+
157
+ Examples
158
+ --------
159
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
160
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
161
+ >>> compare.is_equal(1)
162
+ >>> print(compare.equal_neighbor_nb)
163
+ [[0 0 1 0]
164
+ [0 1 1 1]
165
+ [0 1 1 1]
166
+ [0 0 1 0]]
167
+ """
168
+
169
+ if len(self.array.shape) == 1:
170
+ self.equal_neighbor_nb = np.sum((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value)), axis=0)
171
+ else:
172
+ if self.connectivity == 4:
173
+ self.equal_neighbor_nb = np.dstack((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
174
+ np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value)))
175
+ elif self.connectivity == 8:
176
+ self.equal_neighbor_nb = np.dstack(
177
+ (np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
178
+ np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value),
179
+ np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
180
+ np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
181
+ else:
182
+ self.equal_neighbor_nb = np.dstack(
183
+ (np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
184
+ np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
185
+ self.equal_neighbor_nb = np.sum(self.equal_neighbor_nb, 2, dtype=np.uint8)
186
+
187
+ if and_itself:
188
+ self.equal_neighbor_nb[np.not_equal(self.array, value)] = 0
189
+
190
+ def is_sup(self, value, and_itself=False):
191
+ """
192
+ Determine if pixels have more neighbors with higher values than a given threshold.
193
+
194
+ This method computes the number of neighboring pixels that have values greater
195
+ than a specified `value` for each pixel in the array. Optionally, it can exclude
196
+ the pixel itself if its value is less than or equal to `value`.
197
+
198
+ Parameters
199
+ ----------
200
+ value : int
201
+ The threshold value used to determine if a neighboring pixel's value is greater.
202
+ and_itself : bool, optional
203
+ If True, exclude the pixel itself if its value is less than or equal to `value`.
204
+ Defaults to False.
205
+
206
+ Examples
207
+ --------
208
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
209
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
210
+ >>> compare.is_sup(1)
211
+ >>> print(compare.sup_neighbor_nb)
212
+ [[3 3 2 4]
213
+ [4 2 3 3]
214
+ [4 2 3 3]
215
+ [3 3 2 4]]
216
+ """
217
+ if len(self.array.shape) == 1:
218
+ self.sup_neighbor_nb = (self.on_the_right > value).astype(self.array.dtype) + (self.on_the_left > value).astype(self.array.dtype)
219
+ else:
220
+ if self.connectivity == 4:
221
+ self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
222
+ self.on_the_bot > value, self.on_the_top > value))
223
+ elif self.connectivity == 8:
224
+ self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
225
+ self.on_the_bot > value, self.on_the_top > value,
226
+ self.on_the_topleft > value, self.on_the_topright > value,
227
+ self.on_the_botleft > value, self.on_the_botright > value))
228
+ else:
229
+ self.sup_neighbor_nb = np.dstack((self.on_the_topleft > value, self.on_the_topright > value,
230
+ self.on_the_botleft > value, self.on_the_botright > value))
231
+
232
+ self.sup_neighbor_nb = np.sum(self.sup_neighbor_nb, 2, dtype=np.uint8)
233
+ if and_itself:
234
+ self.sup_neighbor_nb[np.less_equal(self.array, value)] = 0
235
+
236
+ def is_inf(self, value, and_itself=False):
237
+ """
238
+ is_inf(value and_itself=False)
239
+
240
+ Determine the number of neighbors that are infinitely small relative to a given value,
241
+ considering optional connectivity and exclusion of the element itself.
242
+
243
+ Parameters
244
+ ----------
245
+ value : numeric
246
+ The value to compare neighbor elements against.
247
+ and_itself : bool, optional
248
+ If True, excludes the element itself from being counted. Default is False.
249
+
250
+ Examples
251
+ --------
252
+ >>> matrix = np.array([[9, 0, 4, 6], [4, 9, 1, 3], [7, 2, 1, 4], [9, 0, 8, 5]], dtype=np.int8)
253
+ >>> compare = CompareNeighborsWithValue(matrix, connectivity=4)
254
+ >>> compare.is_inf(1)
255
+ >>> print(compare.inf_neighbor_nb)
256
+ [[1 1 1 0]
257
+ [0 1 0 0]
258
+ [0 1 0 0]
259
+ [1 1 1 0]]
260
+ """
261
+ if len(self.array.shape) == 1:
262
+ self.inf_neighbor_nb = (self.on_the_right < value).astype(self.array.dtype) + (self.on_the_left < value).astype(self.array.dtype)
263
+ else:
264
+ if self.connectivity == 4:
265
+ self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
266
+ self.on_the_bot < value, self.on_the_top < value))
267
+ elif self.connectivity == 8:
268
+ self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
269
+ self.on_the_bot < value, self.on_the_top < value,
270
+ self.on_the_topleft < value, self.on_the_topright < value,
271
+ self.on_the_botleft < value, self.on_the_botright < value))
272
+ else:
273
+ self.inf_neighbor_nb = np.dstack((self.on_the_topleft < value, self.on_the_topright < value,
274
+ self.on_the_botleft < value, self.on_the_botright < value))
275
+
276
+ self.inf_neighbor_nb = np.sum(self.inf_neighbor_nb, 2, dtype=np.uint8)
277
+ if and_itself:
278
+ self.inf_neighbor_nb[np.greater_equal(self.array, value)] = 0
279
+
280
+
281
+ def cc(binary_img: NDArray[np.uint8]) -> Tuple[NDArray, NDArray, NDArray]:
282
+ """
283
+ Processes a binary image to reorder and label connected components.
284
+
285
+ This function takes a binary image, analyses the connected components,
286
+ reorders them by size, ensures background is correctly labeled as 0,
287
+ and returns the new ordered labels along with their statistics and centers.
288
+
289
+ Parameters
290
+ ----------
291
+ binary_img : ndarray of uint8
292
+ Input binary image with connected components.
293
+
294
+ Returns
295
+ -------
296
+ new_order : ndarray of uint8, uint16 or uint32
297
+ Image with reordered labels for connected components.
298
+ stats : ndarray of ints
299
+ Statistics for each component (x, y, width, height, area).
300
+ centers : ndarray of floats
301
+ Centers for each component (x, y).
302
+
303
+ Examples
304
+ --------
305
+ >>> binary_img = np.array([[0, 1, 0], [0, 1, 0]], dtype=np.uint8)
306
+ >>> new_order, stats, centers = cc(binary_img)
307
+ >>> print(stats)
308
+ array([[0, 0, 3, 2, 4],
309
+ [1, 0, 2, 2, 2]], dtype=int32)
310
+ """
311
+ number, img, stats, centers = cv2.connectedComponentsWithStats(binary_img, ltype=cv2.CV_16U)
312
+ if number > 255:
313
+ img_dtype = np.uint16
314
+ if number > 65535:
315
+ img_dtype = np.uint32
316
+ else:
317
+ img_dtype = np.uint8
318
+ stats[:, 2] = stats[:, 0] + stats[:, 2]
319
+ stats[:, 3] = stats[:, 1] + stats[:, 3]
320
+ sorted_idx = np.argsort(stats[:, 4])[::-1]
321
+
322
+ # Make sure that the first connected component (labelled 0) is the background and not the main shape
323
+ size_ranked_stats = stats[sorted_idx, :]
324
+ background = (size_ranked_stats[:, 0] == 0).astype(np.uint8) + (size_ranked_stats[:, 1] == 0).astype(np.uint8) + (
325
+ size_ranked_stats[:, 2] == img.shape[1]).astype(np.uint8) + (
326
+ size_ranked_stats[:, 3] == img.shape[0]).astype(np.uint8)
327
+
328
+ # background = ((size_ranked_stats[:, 0] == 0) & (size_ranked_stats[:, 1] == 0) & (size_ranked_stats[:, 2] == img.shape[1]) & (size_ranked_stats[:, 3] == img.shape[0]))
329
+
330
+ touch_borders = np.nonzero(background > 2)[0]
331
+ # if not isinstance(touch_borders, np.int64):
332
+ # touch_borders = touch_borders[0]
333
+ # Most of the time, the background should be the largest shape and therefore has the index 0,
334
+ # Then, if there is at least one shape touching more than 2 borders and having not the index 0, solve:
335
+ if np.any(touch_borders != 0):
336
+ # If there is only one shape touching borders, it means that background is not at its right position (i.e. 0)
337
+ if len(touch_borders) == 1:
338
+ # Then exchange that shape position with background position
339
+ shape = sorted_idx[0] # Store shape position in the first place
340
+ back = sorted_idx[touch_borders[0]] # Store back position in the first place
341
+ sorted_idx[touch_borders[0]] = shape # Put shape position at the previous place of back and conversely
342
+ sorted_idx[0] = back
343
+ # If there are two shapes, it means that the main shape grew sufficiently to reach at least 3 borders
344
+ # We assume that it grew larger than background
345
+ else:
346
+ shape = sorted_idx[0]
347
+ back = sorted_idx[1]
348
+ sorted_idx[1] = shape
349
+ sorted_idx[0] = back
350
+ # Put shape position at the previous place of back and conversely
351
+
352
+
353
+ stats = stats[sorted_idx, :]
354
+ centers = centers[sorted_idx, :]
355
+
356
+ new_order = np.zeros_like(binary_img, dtype=img_dtype)
357
+
358
+ for i, val in enumerate(sorted_idx):
359
+ new_order[img == val] = i
360
+ return new_order, stats, centers
361
+
362
+
363
+ spot_size_coefficients = np.arange(0.75, 0.00, - 0.05)
364
+ spot_shapes = np.tile(["circle", "rectangle"], len(spot_size_coefficients))
365
+ spot_sizes = np.repeat(spot_size_coefficients, 2)
366
+
367
+
368
+ def shape_selection(binary_image:NDArray, several_blob_per_arena: bool, true_shape_number: int=None,
369
+ horizontal_size: int=None, spot_shape: str=None, bio_mask:NDArray=None, back_mask:NDArray=None):
370
+ """
371
+ Process the binary image to identify and validate shapes.
372
+
373
+ This method processes a binary image to detect connected components,
374
+ validate their sizes, and handle bio and back masks if specified.
375
+ It ensures that the number of validated shapes matches the expected
376
+ sample number or applies additional filtering if necessary.
377
+
378
+ Args:
379
+ use_bio_and_back_masks (bool): Whether to use bio and back masks
380
+ during the processing. Default is False.
381
+
382
+ Selects and validates the shapes of stains based on their size and shape.
383
+
384
+ This method performs two main tasks:
385
+ 1. Removes stains whose horizontal size varies too much from a reference value.
386
+ 2. Determines the shape of each remaining stain and only keeps those that correspond to a reference shape.
387
+
388
+ The method first removes stains whose horizontal size is outside the specified confidence interval. Then, it identifies shapes that do not correspond to a predefined reference shape and removes them as well.
389
+
390
+ Args:
391
+ horizontal_size (int): The expected horizontal size of the stains to use as a reference.
392
+ shape (str): The shape type ('circle' or 'rectangle')
393
+ that the stains should match. Other shapes are not currently supported.
394
+ confint (float): The confidence interval as a decimal
395
+ representing the percentage within which the size of the stains should fall.
396
+ do_not_delete (NDArray, optional): An array of stain indices that should not be deleted.
397
+ Default is None.
398
+
399
+ """
400
+
401
+ shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(binary_image, connectivity=8)
402
+ do_not_delete = None
403
+ if bio_mask is not None or back_mask is not None:
404
+ if back_mask is not None:
405
+ if np.any(shapes[back_mask]):
406
+ shapes[np.isin(shapes, np.unique(shapes[back_mask]))] = 0
407
+ shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(
408
+ (shapes > 0).astype(np.uint8), connectivity=8)
409
+ if bio_mask is not None:
410
+ if np.any(shapes[bio_mask]):
411
+ do_not_delete = np.unique(shapes[bio_mask])
412
+ do_not_delete = do_not_delete[do_not_delete != 0]
413
+ shape_number -= 1
414
+
415
+ if not several_blob_per_arena and horizontal_size is not None:
416
+ ordered_shapes = shapes.copy()
417
+ if spot_shape is None:
418
+ c_spot_shapes = spot_shapes
419
+ c_spot_sizes = spot_sizes
420
+ else:
421
+ if spot_shape == 'circle':
422
+ c_spot_shapes = spot_shapes[::2]
423
+ else:
424
+ c_spot_shapes = spot_shapes[::-2]
425
+ c_spot_sizes = spot_sizes[::2]
426
+
427
+ # shape_number = stats.shape[0]
428
+ counter = 0
429
+ while shape_number != true_shape_number and counter < len(spot_size_coefficients):
430
+ shape = c_spot_shapes[counter]
431
+ confint = c_spot_sizes[counter]
432
+ # counter+=1;horizontal_size = self.spot_size; shape = self.parent.spot_shapes[counter];confint = self.parent.spot_size_confints[::-1][counter]
433
+ # stats columns contain in that order:
434
+ # - x leftmost coordinate of boundingbox
435
+ # - y topmost coordinate of boundingbox
436
+ # - The horizontal size of the bounding box.
437
+ # - The vertical size of the bounding box.
438
+ # - The total area (in pixels) of the connected component.
439
+
440
+ # First, remove each stain which horizontal size varies too much from reference
441
+ size_interval = [horizontal_size * (1 - confint), horizontal_size * (1 + confint)]
442
+ cc_to_remove = np.argwhere(np.logical_or(stats[:, 2] < size_interval[0], stats[:, 2] > size_interval[1]))
443
+
444
+ if do_not_delete is None:
445
+ ordered_shapes[np.isin(ordered_shapes, cc_to_remove)] = 0
446
+ else:
447
+ ordered_shapes[np.logical_and(np.isin(ordered_shapes, cc_to_remove),
448
+ np.logical_not(np.isin(ordered_shapes, do_not_delete)))] = 0
449
+
450
+ # Second, determine the shape of each stain to only keep the ones corresponding to the reference shape
451
+ validated_shapes = np.zeros(ordered_shapes.shape, dtype=np.uint8)
452
+ validated_shapes[ordered_shapes > 0] = 1
453
+ nb_components, ordered_shapes, stats, centroids = cv2.connectedComponentsWithStats(validated_shapes,
454
+ connectivity=8)
455
+ if nb_components > 1:
456
+ if shape == 'circle':
457
+ surf_interval = [np.pi * np.square(horizontal_size // 2) * (1 - confint),
458
+ np.pi * np.square(horizontal_size // 2) * (1 + confint)]
459
+ cc_to_remove = np.argwhere(
460
+ np.logical_or(stats[:, 4] < surf_interval[0], stats[:, 4] > surf_interval[1]))
461
+ elif shape == 'rectangle':
462
+ # If the smaller side is the horizontal one, use the user provided horizontal side
463
+ if np.argmin((np.mean(stats[1:, 2]), np.mean(stats[1:, 3]))) == 0:
464
+ surf_interval = [np.square(horizontal_size) * (1 - confint),
465
+ np.square(horizontal_size) * (1 + confint)]
466
+ cc_to_remove = np.argwhere(
467
+ np.logical_or(stats[:, 4] < surf_interval[0], stats[:, 4] > surf_interval[1]))
468
+ # If the smaller side is the vertical one, use the median vertical length shape
469
+ else:
470
+ surf_interval = [np.square(np.median(stats[1:, 3])) * (1 - confint),
471
+ np.square(np.median(stats[1:, 3])) * (1 + confint)]
472
+ cc_to_remove = np.argwhere(
473
+ np.logical_or(stats[:, 4] < surf_interval[0], stats[:, 4] > surf_interval[1]))
474
+ else:
475
+ logging.info("Original blob shape not well written")
476
+
477
+ if do_not_delete is None:
478
+ ordered_shapes[np.isin(ordered_shapes, cc_to_remove)] = 0
479
+ else:
480
+ ordered_shapes[np.logical_and(np.isin(ordered_shapes, cc_to_remove),
481
+ np.logical_not(np.isin(ordered_shapes, do_not_delete)))] = 0
482
+ # There was only that before:
483
+ validated_shapes = np.zeros(ordered_shapes.shape, dtype=np.uint8)
484
+ validated_shapes[np.nonzero(ordered_shapes)] = 1
485
+ nb_components, ordered_shapes, stats, centroids = cv2.connectedComponentsWithStats(validated_shapes,
486
+ connectivity=8)
487
+
488
+ shape_number = nb_components - 1
489
+ counter += 1
490
+
491
+ if shape_number == true_shape_number:
492
+ shapes = ordered_shapes
493
+ if true_shape_number is None or shape_number == true_shape_number:
494
+ validated_shapes = np.zeros(shapes.shape, dtype=np.uint8)
495
+ validated_shapes[shapes > 0] = 1
496
+ else:
497
+ max_size = binary_image.size * 0.75
498
+ min_size = 10
499
+ cc_to_remove = np.argwhere(np.logical_or(stats[1:, 4] < min_size, stats[1:, 4] > max_size)) + 1
500
+ shapes[np.isin(shapes, cc_to_remove)] = 0
501
+ validated_shapes = np.zeros(shapes.shape, dtype=np.uint8)
502
+ validated_shapes[shapes > 0] = 1
503
+ shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(validated_shapes, connectivity=8)
504
+ if not several_blob_per_arena and true_shape_number is not None and shape_number > true_shape_number:
505
+ # Sort shapes by size and compare the largest with the second largest
506
+ # If the difference is too large, remove that largest shape.
507
+ cc_to_remove = np.array([], dtype=np.uint8)
508
+ to_remove = np.array([], dtype=np.uint8)
509
+ stats = stats[1:, :]
510
+ while stats.shape[0] > true_shape_number and to_remove is not None:
511
+ # 1) rank by height
512
+ sorted_height = np.argsort(stats[:, 2])
513
+ # and only consider the number of shapes we want to detect
514
+ standard_error = np.std(stats[sorted_height, 2][-true_shape_number:])
515
+ differences = np.diff(stats[sorted_height, 2])
516
+ # Look for very big changes from one height to the next
517
+ if differences.any() and np.max(differences) > 2 * standard_error:
518
+ # Within these, remove shapes that are too large
519
+ to_remove = sorted_height[np.argmax(differences)]
520
+ cc_to_remove = np.append(cc_to_remove, to_remove + 1)
521
+ stats = np.delete(stats, to_remove, 0)
522
+
523
+ else:
524
+ to_remove = None
525
+ shapes[np.isin(shapes, cc_to_remove)] = 0
526
+ validated_shapes = np.zeros(shapes.shape, dtype=np.uint8)
527
+ validated_shapes[shapes > 0] = 1
528
+ shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(validated_shapes, connectivity=8)
529
+
530
+ shape_number -= 1
531
+ return validated_shapes, shape_number, stats, centroids
532
+
533
+
534
+
535
+ def rounded_inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
536
+ """
537
+ Perform rounded inverted distance transform on a binary image.
538
+
539
+ This function computes the inverse of the Euclidean distance transform,
540
+ where each pixel value represents its distance to the nearest zero
541
+ pixel. The operation can include erosion and will stop either at a given
542
+ max distance or until no further expansion is needed.
543
+
544
+ Parameters
545
+ ----------
546
+ original_shape : ndarray of uint8
547
+ Input binary image to be processed.
548
+ max_distance : int, optional
549
+ Maximum distance for the expansion. If None, no limit is applied.
550
+ with_erosion : int, optional
551
+ Number of erosion iterations to apply before the transform. Default is 0.
552
+
553
+ Returns
554
+ -------
555
+ out : ndarray of uint32
556
+ Output image containing the rounded inverted distance transform.
557
+
558
+ Examples
559
+ --------
560
+ >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
561
+ >>> segmentation[1:3, 1:3] = 1
562
+ >>> gravity = rounded_inverted_distance_transform(segmentation, max_distance=2)
563
+ >>> print(gravity)
564
+ [[1 2 2 1]
565
+ [2 0 0 2]
566
+ [2 0 0 2]
567
+ [1 2 2 1]]
568
+ """
569
+ if with_erosion > 0:
570
+ original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
571
+ expand = deepcopy(original_shape)
572
+ if max_distance is not None:
573
+ if max_distance > np.max(original_shape.shape):
574
+ max_distance = np.max(original_shape.shape).astype(np.uint32)
575
+ gravity_field = np.zeros(original_shape.shape , np.uint32)
576
+ for gravi in np.arange(max_distance):
577
+ expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
578
+ gravity_field[np.logical_xor(expand, original_shape)] += 1
579
+ else:
580
+ gravity_field = np.zeros(original_shape.shape , np.uint32)
581
+ while np.any(np.equal(original_shape + expand, 0)):
582
+ expand = cv2.dilate(expand, cross_33, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
583
+ gravity_field[np.logical_xor(expand, original_shape)] += 1
584
+ return gravity_field
585
+
586
+
587
+ def inverted_distance_transform(original_shape: NDArray[np.uint8], max_distance: int=None, with_erosion: int=0) -> NDArray[np.uint32]:
588
+ """
589
+ Calculate the distance transform around ones in a binary image, with optional erosion.
590
+
591
+ This function computes the Euclidean distance transform where zero values
592
+ represent the background and ones represent the foreground. Optionally,
593
+ it erodes the input image before computing the distance transform, and
594
+ limits distances based on a maximum value.
595
+
596
+ Parameters
597
+ ----------
598
+ original_shape : ndarray of uint8
599
+ Input binary image where ones represent the foreground.
600
+ max_distance : int, optional
601
+ Maximum distance value to threshold. If None (default), no thresholding is applied.
602
+ with_erosion : int, optional
603
+ Number of iterations for erosion. If 0 (default), no erosion is applied.
604
+
605
+ Returns
606
+ -------
607
+ out : ndarray of uint32
608
+ Distance transform array where each element represents the distance
609
+ to the nearest zero value in the input image.
610
+
611
+ See also
612
+ --------
613
+ rounded_distance_transform : less precise (outputs int) and faster for small max_distance values.
614
+
615
+ Examples
616
+ --------
617
+ >>> segmentation = np.zeros((4, 4), dtype=np.uint8)
618
+ >>> segmentation[1:3, 1:3] = 1
619
+ >>> gravity = inverted_distance_transform(segmentation, max_distance=2)
620
+ >>> print(gravity)
621
+ [[1. 1.41421356 1.41421356 1. ]
622
+ [1.41421356 0. 0. 1.41421356]
623
+ [1.41421356 0. 0. 1.41421356]
624
+ [1. 1.41421356 1.41421356 1. ]]
625
+ """
626
+ if with_erosion:
627
+ original_shape = cv2.erode(original_shape, cross_33, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
628
+ gravity_field = distance_transform_edt(1 - original_shape)
629
+ if max_distance is not None:
630
+ if max_distance > np.min(original_shape.shape) / 2:
631
+ max_distance = (np.min(original_shape.shape) // 2).astype(np.uint32)
632
+ gravity_field[gravity_field >= max_distance] = 0
633
+ gravity_field[gravity_field > 0] = 1 + gravity_field.max() - gravity_field[gravity_field > 0]
634
+ return gravity_field
635
+
636
+
637
+ @njit()
638
+ def get_line_points(start, end) -> NDArray[int]:
639
+ """
640
+ Get line points between two endpoints using Bresenham's line algorithm.
641
+
642
+ This function calculates all the integer coordinate points that form a
643
+ line between two endpoints using Bresenham's line algorithm. It is
644
+ optimized for performance using Numba's just-in-time compilation.
645
+
646
+ Parameters
647
+ ----------
648
+ start : tuple of int
649
+ The starting point coordinates (x0, y0).
650
+ end : tuple of int
651
+ The ending point coordinates (x1, y1).
652
+
653
+ Returns
654
+ -------
655
+ out : ndarray of int
656
+ Array of points representing the line, with shape (N, 2), where N is
657
+ the number of points on the line.
658
+
659
+ Examples
660
+ --------
661
+ >>> start = (0, 0)
662
+ >>> end = (1, 2)
663
+ >>> points = get_line_points(start, end)
664
+ >>> print(points)
665
+ [[0 0]
666
+ [0 1]
667
+ [1 2]]
668
+ """
669
+ y0, x0 = start
670
+ y1, x1 = end
671
+
672
+ # Calculate differences
673
+ dx = np.abs(x1 - x0)
674
+ dy = np.abs(y1 - y0)
675
+
676
+ # Determine step direction
677
+ sx = 1 if x0 < x1 else -1
678
+ sy = 1 if y0 < y1 else -1
679
+
680
+ # Initialize
681
+ err = dx - dy
682
+ points = []
683
+ x, y = x0, y0
684
+
685
+ while True:
686
+ points.append([y, x])
687
+
688
+ # Check if we've reached the end
689
+ if x == x1 and y == y1:
690
+ break
691
+
692
+ # Calculate error for next step
693
+ e2 = 2 * err
694
+
695
+ if e2 > -dy:
696
+ err -= dy
697
+ x += sx
698
+
699
+ if e2 < dx:
700
+ err += dx
701
+ y += sy
702
+
703
+ return np.array(points)
704
+
705
+
706
+ def get_all_line_coordinates(start_point: NDArray[int], end_points: NDArray[int]) -> NDArray[int]:
707
+ """
708
+ Get all line coordinates between start point and end points.
709
+
710
+ This function computes the coordinates of lines connecting a
711
+ start point to multiple end points, converting input arrays to float
712
+ if necessary before processing.
713
+
714
+ Parameters
715
+ ----------
716
+ start_point : NDArray[float]
717
+ Starting coordinate point for the lines. Can be of any numeric type,
718
+ will be converted to float if needed.
719
+ end_points : NDArray[float]
720
+ Array of end coordinate points for the lines. Can be of any
721
+ numeric type, will be converted to float if needed.
722
+
723
+ Returns
724
+ -------
725
+ out : List[NDArray[int]]
726
+ A list of numpy arrays containing the coordinates of each line
727
+ as integer values.
728
+
729
+ Examples
730
+ --------
731
+ >>> start_point = np.array([0, 0])
732
+ >>> end_points = np.array([[1, 2], [3, 4]])
733
+ >>> get_all_line_coordinates(start_point, end_points)
734
+ [array([[0, 0],
735
+ [0, 1],
736
+ [1, 2]], dtype=uint64), array([[0, 0],
737
+ [1, 1],
738
+ [1, 2],
739
+ [2, 3],
740
+ [3, 4]], dtype=uint64)]
741
+ """
742
+ lines = []
743
+ for end_point in end_points:
744
+ line_coords = get_line_points(start_point, end_point)
745
+ lines.append(np.array(line_coords, dtype=np.uint64))
746
+ return lines
747
+
748
+
749
+ def draw_me_a_sun(main_shape: NDArray, ray_length_coef: int=4) -> Tuple[NDArray, NDArray]:
750
+ """
751
+ Draw a sun-shaped pattern on an image based on the main shape and ray length coefficient.
752
+
753
+ This function takes an input binary image (main_shape) and draws sun rays
754
+ from the perimeter of that shape. The length of the rays is controlled by a coefficient.
755
+ The function ensures that rays do not extend beyond the image borders.
756
+
757
+ Parameters
758
+ ----------
759
+ main_shape : ndarray of bool or int
760
+ Binary input image where the main shape is defined.
761
+ ray_length_coef : float, optional
762
+ Coefficient to control the length of sun rays. Defaults to 2.
763
+
764
+ Returns
765
+ -------
766
+ rays : ndarray
767
+ Indices of the rays drawn.
768
+ sun : ndarray
769
+ Image with sun rays drawn on it.
770
+
771
+ Examples
772
+ --------
773
+ >>> main_shape = np.zeros((10, 10), dtype=np.uint8)
774
+ >>> main_shape[4:7, 3:6] = 1
775
+ >>> rays, sun = draw_me_a_sun(main_shape)
776
+ >>> print(sun)
777
+
778
+ """
779
+ nb, shapes, stats, center = cv2.connectedComponentsWithStats(main_shape)
780
+ sun = np.zeros(main_shape.shape, np.uint32)
781
+ rays = []
782
+ r = 0
783
+ for i in range(1, nb):
784
+ shape_i = cv2.dilate((shapes == i).astype(np.uint8), kernel=cross_33)
785
+ # shape_i = (shapes == i).astype(np.uint8)
786
+ contours = get_contours(shape_i)
787
+ first_ring_idx = np.nonzero(contours)
788
+ centroid = np.round((center[i, 1], center[i, 0])).astype(np.int64)
789
+ second_ring_y = centroid[0] + ((first_ring_idx[0] - centroid[0]) * ray_length_coef)
790
+ second_ring_x = centroid[1] + ((first_ring_idx[1] - centroid[1]) * ray_length_coef)
791
+
792
+ second_ring_y[second_ring_y < 0] = 0
793
+ second_ring_x[second_ring_x < 0] = 0
794
+
795
+ second_ring_y[second_ring_y > main_shape.shape[0] - 1] = main_shape.shape[0] - 1
796
+ second_ring_x[second_ring_x > main_shape.shape[1] - 1] = main_shape.shape[1] - 1
797
+ for j in range(len(second_ring_y)):
798
+ r += 1
799
+ fy, fx, sy, sx = first_ring_idx[0][j], first_ring_idx[1][j], second_ring_y[j], second_ring_x[j]
800
+ line = get_line_points((fy, fx), (sy, sx))
801
+ sun[line[:, 1], line[:, 0]] = r
802
+ rays.append(r)
803
+ return np.array(rays), sun
804
+
805
+
806
+ def find_median_shape(binary_3d_matrix: NDArray[np.uint8]) -> NDArray[np.uint8]:
807
+ """
808
+ Find the median shape from a binary 3D matrix.
809
+
810
+ This function computes the median 2D slice of a binary (0/1) 3D matrix
811
+ by finding which voxels appear in at least half of the slices.
812
+
813
+ Parameters
814
+ ----------
815
+ binary_3d_matrix : ndarray of uint8
816
+ Input 3D binary matrix where each slice is a 2D array.
817
+
818
+ Returns
819
+ -------
820
+ ndarray of uint8
821
+ Median shape as a 2D binary matrix where the same voxels
822
+ that appear in at least half of the input slices are set to 1.
823
+
824
+ Examples
825
+ --------
826
+ >>> binary_3d_matrix = np.random.randint(0, 2, (10, 5, 5), dtype=np.uint8)
827
+ >>> median_shape = find_median_shape(binary_3d_matrix)
828
+ >>> print(median_shape)
829
+ """
830
+ binary_2d_matrix = np.apply_along_axis(np.sum, 0, binary_3d_matrix)
831
+ median_shape = np.zeros(binary_2d_matrix.shape, dtype=np.uint8)
832
+ median_shape[np.greater_equal(binary_2d_matrix, binary_3d_matrix.shape[0] // 2)] = 1
833
+ return median_shape
834
+
835
+
836
+ @njit()
837
+ def reduce_image_size_for_speed(image_of_2_shapes: NDArray[np.uint8]) -> Tuple[Tuple, Tuple]:
838
+ """
839
+ Reduces the size of an image containing two shapes for faster processing.
840
+
841
+ The function iteratively divides the image into quadrants and keeps only
842
+ those that contain both shapes until a minimal size is reached.
843
+
844
+ Parameters
845
+ ----------
846
+ image_of_2_shapes : ndarray of uint8
847
+ The input image containing two shapes.
848
+
849
+ Returns
850
+ -------
851
+ out : tuple of tuples
852
+ The indices of the first and second shape in the reduced image.
853
+
854
+ Examples
855
+ --------
856
+ >>> image_of_2_shapes = np.zeros((10, 10), dtype=np.uint8)
857
+ >>> image_of_2_shapes[1:3, 1:3] = 1
858
+ >>> image_of_2_shapes[1:3, 4:6] = 2
859
+ >>> shape1_idx, shape2_idx = reduce_image_size_for_speed(image_of_2_shapes)
860
+ >>> print(shape1_idx)
861
+ (array([1, 1, 2, 2]), array([1, 2, 1, 2]))
862
+ """
863
+ sub_image = image_of_2_shapes.copy()
864
+ y_size, x_size = sub_image.shape
865
+ images_list = [sub_image]
866
+ good_images = [0]
867
+ sub_image = images_list[good_images[0]]
868
+ while (len(good_images) == 1 or len(good_images) == 2) and y_size > 3 and x_size > 3:
869
+ y_size, x_size = sub_image.shape
870
+ images_list = []
871
+ images_list.append(sub_image[:((y_size // 2) + 1), :((x_size // 2) + 1)])
872
+ images_list.append(sub_image[:((y_size // 2) + 1), (x_size // 2):])
873
+ images_list.append(sub_image[(y_size // 2):, :((x_size // 2) + 1)])
874
+ images_list.append(sub_image[(y_size // 2):, (x_size // 2):])
875
+ good_images = []
876
+ for idx, image in enumerate(images_list):
877
+ if np.any(image == 2):
878
+ if np.any(image == 1):
879
+ good_images.append(idx)
880
+ if len(good_images) == 0:
881
+ break
882
+ elif len(good_images) == 2:
883
+ if good_images == [0, 1]:
884
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
885
+ elif good_images == [0, 2]:
886
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
887
+ elif good_images == [1, 3]:
888
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
889
+ elif good_images == [2, 3]:
890
+ sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
891
+ else:
892
+ pass
893
+ else:
894
+ sub_image = images_list[good_images[0]]
895
+
896
+ shape1_idx = np.nonzero(sub_image == 1)
897
+ shape2_idx = np.nonzero(sub_image == 2)
898
+ return shape1_idx, shape2_idx
899
+
900
+
901
+ def get_minimal_distance_between_2_shapes(image_of_2_shapes: NDArray[np.uint8], increase_speed: bool=True) -> float:
902
+ """
903
+ Get the minimal distance between two shapes in an image.
904
+
905
+ This function calculates the minimal Euclidean distance between
906
+ two different shapes represented by binary values 1 and 2 in a given image.
907
+ It can optionally reduce the image size for faster processing.
908
+
909
+ Parameters
910
+ ----------
911
+ image_of_2_shapes : ndarray of int8
912
+ Binary image containing two shapes to measure distance between.
913
+ increase_speed : bool, optional
914
+ Flag to reduce image size for faster computation. Default is True.
915
+
916
+ Returns
917
+ -------
918
+ min_distance : float64
919
+ The minimal Euclidean distance between the two shapes.
920
+
921
+ Examples
922
+ --------
923
+ >>> import numpy as np
924
+ >>> image = np.array([[1, 0], [0, 2]])
925
+ >>> distance = get_minimal_distance_between_2_shapes(image)
926
+ >>> print(distance)
927
+ expected output
928
+ """
929
+ if increase_speed:
930
+ shape1_idx, shape2_idx = reduce_image_size_for_speed(image_of_2_shapes)
931
+ else:
932
+ shape1_idx, shape2_idx = np.nonzero(image_of_2_shapes == 1), np.nonzero(image_of_2_shapes == 2)
933
+ t = KDTree(np.transpose(shape1_idx))
934
+ dists, nns = t.query(np.transpose(shape2_idx), 1)
935
+ return np.min(dists)
936
+
937
+ def get_min_or_max_euclidean_pair(coords, min_or_max: str="max") -> Tuple[np.ndarray, np.ndarray]:
938
+ """
939
+ Find the pair of points in a given set with the minimum or maximum Euclidean distance.
940
+
941
+ Parameters
942
+ ----------
943
+ coords : Union[np.ndarray, Tuple]
944
+ An Nx2 numpy array or a tuple of two arrays, each containing the x and y coordinates of points.
945
+ min_or_max : str, optional
946
+ Whether to find the 'min' or 'max' distance pair. Default is 'max'.
947
+
948
+ Returns
949
+ -------
950
+ Tuple[np.ndarray, np.ndarray]
951
+ A tuple containing the coordinates of the two points that form the minimum or maximum distance pair.
952
+
953
+ Raises
954
+ ------
955
+ ValueError
956
+ If `min_or_max` is not 'min' or 'max'.
957
+
958
+ Notes
959
+ -----
960
+ - The function first computes all pairwise distances in condensed form using `pdist`.
961
+ - Then, it finds the index of the minimum or maximum distance.
962
+ - Finally, it maps this index to the actual point indices using a binary search method.
963
+
964
+ Examples
965
+ --------
966
+ >>> coords = np.array([[0, 1], [2, 3], [4, 5]])
967
+ >>> point1, point2 = get_min_or_max_euclidean_pair(coords, min_or_max="max")
968
+ >>> print(point1)
969
+ [0 1]
970
+ >>> print(point2)
971
+ [4 5]
972
+ >>> coords = (np.array([0, 2, 4, 8, 1, 5]), np.array([0, 2, 4, 8, 0, 5]))
973
+ >>> point1, point2 = get_min_or_max_euclidean_pair(coords, min_or_max="min")
974
+ >>> print(point1)
975
+ [0 0]
976
+ >>> print(point2)
977
+ [1 0]
978
+
979
+ """
980
+ if isinstance(coords, Tuple):
981
+ coords = np.column_stack(coords)
982
+ N = coords.shape[0]
983
+ if N <= 1:
984
+ return (coords[0], coords[0]) if N == 1 else None
985
+
986
+ # Step 1: Compute all pairwise distances in condensed form
987
+ distances = pdist(coords)
988
+
989
+ # Step 2: Find the index of the maximum distance
990
+ if min_or_max == "max":
991
+ idx = np.argmax(distances)
992
+ elif min_or_max == "min":
993
+ idx = np.argmin(distances)
994
+ else:
995
+ raise ValueError
996
+
997
+ # Step 3: Map this index to (i, j) using a binary search method
998
+
999
+ def get_pair_index(k):
1000
+ low, high = 0, N
1001
+ while low < high:
1002
+ mid = (low + high) // 2
1003
+ total = mid * (2 * N - mid - 1) // 2
1004
+ if total <= k:
1005
+ low = mid + 1
1006
+ else:
1007
+ high = mid
1008
+
1009
+ i = low - 1
1010
+ prev_sum = i * (2 * N - i - 1) // 2
1011
+ j_index_in_row = k - prev_sum
1012
+ return i, i + j_index_in_row + 1 # Ensure j > i
1013
+
1014
+ i, j = get_pair_index(idx)
1015
+ return coords[i], coords[j]
1016
+
1017
+ def find_major_incline(vector: NDArray, natural_noise: float) -> Tuple[int, int]:
1018
+ """
1019
+ Find the major incline section in a vector.
1020
+
1021
+ This function identifies the segment of a vector that exhibits
1022
+ the most significant change in values, considering a specified
1023
+ natural noise level. It returns the left and right indices that
1024
+ define this segment.
1025
+
1026
+ Parameters
1027
+ ----------
1028
+ vector : ndarray of float64
1029
+ Input data vector where the incline needs to be detected.
1030
+ natural_noise : float
1031
+ The acceptable noise level for determining the incline.
1032
+
1033
+ Returns
1034
+ -------
1035
+ Tuple[int, int]
1036
+ A tuple containing two integers: the left and right indices
1037
+ of the major incline section in the vector.
1038
+
1039
+ Examples
1040
+ --------
1041
+ >>> vector = np.array([3, 5, 7, 9, 10])
1042
+ >>> natural_noise = 2.5
1043
+ >>> left, right = find_major_incline(vector, natural_noise)
1044
+ >>> (left, right)
1045
+ (0, 1)
1046
+ """
1047
+ left = 0
1048
+ right = 1
1049
+ ref_length = np.max((5, 2 * natural_noise))
1050
+ vector = moving_average(vector, 5)
1051
+ ref_extent = np.ptp(vector)
1052
+ extent = ref_extent
1053
+ # Find the left limit:
1054
+ while len(vector) > ref_length and extent > (ref_extent - (natural_noise / 4)):
1055
+ vector = vector[1:]
1056
+ extent = np.ptp(vector)
1057
+ left += 1
1058
+ # And the right one:
1059
+ extent = ref_extent
1060
+ while len(vector) > ref_length and extent > (ref_extent - natural_noise / 2):
1061
+ vector = vector[:-1]
1062
+ extent = np.ptp(vector)
1063
+ right += 1
1064
+ # And the left again, with stronger stringency:
1065
+ extent = ref_extent
1066
+ while len(vector) > ref_length and extent > (ref_extent - natural_noise):
1067
+ vector = vector[1:]
1068
+ extent = np.ptp(vector)
1069
+ left += 1
1070
+ # When there is no incline, put back left and right to 0
1071
+ if len(vector) <= ref_length:
1072
+ left = 0
1073
+ right = 1
1074
+ return left, right
1075
+
1076
+
1077
+ def rank_from_top_to_bottom_from_left_to_right(binary_image: NDArray[np.uint8], y_boundaries: NDArray[int], get_ordered_image: bool=False) -> Tuple:
1078
+ """
1079
+ Rank components in a binary image from top to bottom and from left to right.
1080
+
1081
+ This function processes a binary image to rank its components based on
1082
+ their centroids. It first sorts the components row by row and then orders them
1083
+ within each row from left to right. If the ordering fails, it attempts an alternative
1084
+ algorithm and returns the ordered statistics and centroids.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ binary_image : ndarray of uint8
1089
+ The input binary image to process.
1090
+ y_boundaries : ndarray of int
1091
+ Boundary information for the y-coordinates.
1092
+ get_ordered_image : bool, optional
1093
+ If True, returns an ordered image in addition to the statistics and centroids.
1094
+ Default is False.
1095
+
1096
+ Returns
1097
+ -------
1098
+ tuple
1099
+ If `get_ordered_image` is True, returns a tuple containing:
1100
+ - ordered_stats : ndarray of int
1101
+ Statistics for the ordered components.
1102
+ - ordered_centroids : ndarray of float64
1103
+ Centroids for the ordered components.
1104
+ - ordered_image : ndarray of uint8
1105
+ The binary image with ordered component labels.
1106
+
1107
+ If `get_ordered_image` is False, returns a tuple containing:
1108
+ - ordered_stats : ndarray of int
1109
+ Statistics for the ordered components.
1110
+ - ordered_centroids : ndarray of float64
1111
+ Centroids for the ordered components.
1112
+ """
1113
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
1114
+ connectivity=8)
1115
+
1116
+ centroids = centroids[1:, :]
1117
+ final_order = np.zeros(centroids.shape[0], dtype=np.uint8)
1118
+ sorted_against_y = np.argsort(centroids[:, 1])
1119
+ # row_nb = (y_boundaries == 1).sum()
1120
+ row_nb = np.max(((y_boundaries == 1).sum(), (y_boundaries == - 1).sum()))
1121
+ if row_nb > 0:
1122
+ component_per_row = int(np.ceil((nb_components - 1) / row_nb))
1123
+ for row_i in range(row_nb):
1124
+ row_i_start = row_i * component_per_row
1125
+ if row_i == (row_nb - 1):
1126
+ sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:], 0])
1127
+ final_order[row_i_start:] = sorted_against_y[row_i_start:][sorted_against_x]
1128
+ else:
1129
+ row_i_end = (row_i + 1) * component_per_row
1130
+ sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:row_i_end], 0])
1131
+ final_order[row_i_start:row_i_end] = sorted_against_y[row_i_start:row_i_end][sorted_against_x]
1132
+ else:
1133
+ final_order = np.argsort(centroids[:, 0])
1134
+ ordered_centroids = centroids[final_order, :]
1135
+ ordered_stats = stats[1:, :]
1136
+ ordered_stats = ordered_stats[final_order, :]
1137
+
1138
+ if get_ordered_image:
1139
+ old_to_new = np.zeros_like(final_order)
1140
+ old_to_new[final_order] = np.arange(len(final_order))
1141
+ mapping_array = np.zeros(binary_image.shape, dtype=np.uint8)
1142
+ mapping_array[output != 0] = old_to_new[output[output != 0] - 1] + 1
1143
+ ordered_image = mapping_array.copy()
1144
+ return ordered_stats, ordered_centroids, ordered_image
1145
+ else:
1146
+ return ordered_stats, ordered_centroids
1147
+
1148
+
1149
+ def get_largest_connected_component(segmentation: NDArray[np.uint8]) -> Tuple[np.int64, NDArray[bool]]:
1150
+ """
1151
+ Find the largest connected component in a segmentation image.
1152
+
1153
+ This function labels all connected components in a binary
1154
+ segmentation image, determines the size of each component,
1155
+ and returns information about the largest connected component.
1156
+
1157
+ Parameters
1158
+ ----------
1159
+ segmentation : ndarray of uint8
1160
+ Binary segmentation image where different integer values represent
1161
+ different connected components.
1162
+
1163
+ Returns
1164
+ -------
1165
+ Tuple[int, ndarray of bool]
1166
+ A tuple containing:
1167
+ - The size of the largest connected component.
1168
+ - A boolean mask representing the largest connected
1169
+ component in the input segmentation image.
1170
+
1171
+ Examples
1172
+ --------
1173
+ >>> segmentation = np.zeros((10, 10), dtype=np.uint8)
1174
+ >>> segmentation[2:6, 2:5] = 1
1175
+ >>> segmentation[6:9, 6:9] = 1
1176
+ >>> size, mask = get_largest_connected_component(segmentation)
1177
+ >>> print(size)
1178
+ 12
1179
+ """
1180
+ labels = label(segmentation)
1181
+ assert(labels.max() != 0) # assume at least 1 CC
1182
+ con_comp_sizes = np.bincount(labels.flat)[1:]
1183
+ largest_idx = np.argmax(con_comp_sizes)
1184
+ largest_connected_component = labels == largest_idx + 1
1185
+ return con_comp_sizes[largest_idx], largest_connected_component
1186
+
1187
+
1188
+ def expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand: NDArray[np.uint8], without_shape_i: NDArray[np.uint8],
1189
+ shape_original_centroid: NDArray,
1190
+ ref_centroids: NDArray, kernel: NDArray) -> NDArray[np.uint8]:
1191
+ """
1192
+ Expand a shape until its neighbor's centroid is closer than its own.
1193
+
1194
+ This function takes in several numpy arrays representing shapes and their
1195
+ centroids, and expands the input shape until the distance to the nearest
1196
+ neighboring centroid is less than or equal to the distance between the shape's
1197
+ contour and its own centroid.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ shape_to_expand : ndarray of uint8
1202
+ The binary shape to be expanded.
1203
+ without_shape_i : ndarray of uint8
1204
+ A binary array representing the area without the shape.
1205
+ shape_original_centroid : ndarray
1206
+ The centroid of the original shape.
1207
+ ref_centroids : ndarray
1208
+ Reference centroids to compare distances with.
1209
+ kernel : ndarray
1210
+ The kernel for dilation operation.
1211
+
1212
+ Returns
1213
+ -------
1214
+ ndarray of uint8
1215
+ The expanded shape.
1216
+ """
1217
+ # shape_to_expand=test_shape
1218
+ # shape_i=0
1219
+ # shape_original_centroid=ordered_centroids[shape_i, :]
1220
+ # ref_centroids=np.delete(ordered_centroids, shape_i, axis=0)
1221
+ # kernel=self.small_kernels
1222
+ previous_shape_to_expand = shape_to_expand.copy()
1223
+ without_shape = deepcopy(without_shape_i)
1224
+ if ref_centroids.shape[0] > 1:
1225
+ # Calculate the distance between the focal shape centroid and its 10% nearest neighbor centroids
1226
+ centroid_distances = np.sqrt(np.square(ref_centroids[1:, 0] - shape_original_centroid[0]) + np.square(
1227
+ ref_centroids[1:, 1] - shape_original_centroid[1]))
1228
+ nearest_shapes = np.where(np.greater_equal(np.quantile(centroid_distances, 0.1), centroid_distances))[0]
1229
+
1230
+ # Use the nearest neighbor distance as a maximal reference to get the minimal distance between the border of the shape and the neighboring centroids
1231
+ neighbor_mindist = np.min(centroid_distances)
1232
+ idx = np.nonzero(shape_to_expand)
1233
+ for shape_j in nearest_shapes:
1234
+ neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1235
+ np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1236
+ neighbor_mindist *= 0.5
1237
+ # Get the maximal distance of the focal shape between its contour and its centroids
1238
+ itself_maxdist = np.max(
1239
+ np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1240
+ else:
1241
+ itself_maxdist = np.max(shape_to_expand.shape)
1242
+ neighbor_mindist = itself_maxdist
1243
+ nearest_shapes = []
1244
+ # Put 1 at the border of the reference image in order to be able to stop the while loop once border reached
1245
+ without_shape[0, :] = 1
1246
+ without_shape[:, 0] = 1
1247
+ without_shape[without_shape.shape[0] - 1, :] = 1
1248
+ without_shape[:, without_shape.shape[1] - 1] = 1
1249
+
1250
+ # Compare the distance between the contour of the shape and its centroid with this contour with the centroids of neighbors
1251
+ # Continue as the distance made by the shape (from its centroid) keeps being smaller than its distance with the nearest centroid.
1252
+ while np.logical_and(np.any(np.less_equal(itself_maxdist, neighbor_mindist)),
1253
+ np.count_nonzero(shape_to_expand * without_shape) == 0):
1254
+ previous_shape_to_expand = shape_to_expand.copy()
1255
+ # Dilate the shape by the kernel size
1256
+ shape_to_expand = cv2.dilate(shape_to_expand, kernel, iterations=1,
1257
+ borderType=cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED)
1258
+ # Extract the new connected component
1259
+ shape_nb, shape_to_expand = cv2.connectedComponents(shape_to_expand, ltype=cv2.CV_16U)
1260
+ shape_to_expand = shape_to_expand.astype(np.uint8)
1261
+ # Use the nex shape coordinates to calculate the new distances of the shape with its centroid and with neighboring centroids
1262
+ idx = np.nonzero(shape_to_expand)
1263
+ for shape_j in nearest_shapes:
1264
+ neighbor_mindist = np.minimum(neighbor_mindist, np.min(
1265
+ np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
1266
+ itself_maxdist = np.max(
1267
+ np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
1268
+ return previous_shape_to_expand
1269
+
1270
+
1271
+ def image_borders(dimensions: tuple, shape: str="rectangular") -> NDArray[np.uint8]:
1272
+ """
1273
+ Create an image with borders, either rectangular or circular.
1274
+
1275
+ Parameters
1276
+ ----------
1277
+ dimensions : tuple
1278
+ The dimensions of the image (height, width).
1279
+ shape : str, optional
1280
+ The shape of the borders. Options are "rectangular" or "circular".
1281
+ Defaults to "rectangular".
1282
+
1283
+ Returns
1284
+ -------
1285
+ out : ndarray of uint8
1286
+ The image with borders. If the shape is "circular", an ellipse border;
1287
+ if "rectangular", a rectangular border.
1288
+
1289
+ Examples
1290
+ --------
1291
+ >>> borders = image_borders((3, 3), "rectangular")
1292
+ >>> print(borders)
1293
+ [[0 0 0]
1294
+ [0 1 0]
1295
+ [0 0 0]]
1296
+ """
1297
+ if shape == "circular":
1298
+ borders = create_ellipse(dimensions[0], dimensions[0])
1299
+ img_contours = image_borders(dimensions)
1300
+ borders = borders * img_contours
1301
+ else:
1302
+ borders = np.ones(dimensions, dtype=np.uint8)
1303
+ borders[0, :] = 0
1304
+ borders[:, 0] = 0
1305
+ borders[- 1, :] = 0
1306
+ borders[:, - 1] = 0
1307
+ return borders
1308
+
1309
+
1310
+ def get_radius_distance_against_time(binary_video: NDArray[np.uint8], field) -> Tuple[NDArray[np.float32], int, int]:
1311
+ """
1312
+ Calculate the radius distance against time from a binary video and field.
1313
+
1314
+ This function computes the change in radius distances over time
1315
+ by analyzing a binary video and mapping it to corresponding field values.
1316
+
1317
+ Parameters
1318
+ ----------
1319
+ binary_video : ndarray of uint8
1320
+ Binary video data.
1321
+ field : ndarray
1322
+ Field values to analyze the radius distances against.
1323
+
1324
+ Returns
1325
+ -------
1326
+ distance_against_time : ndarray of float32
1327
+ Radius distances over time.
1328
+ time_start : int
1329
+ Starting time index where the radius distance measurement begins.
1330
+ time_end : int
1331
+ Ending time index where the radius distance measurement ends.
1332
+
1333
+ Examples
1334
+ --------
1335
+ >>> binary_video = np.ones((10, 5, 5), dtype=np.uint8)
1336
+
1337
+ >>> distance_against_time, time_start, time_end = get_radius_distance_against_time(binary_video, field)
1338
+ """
1339
+ pixel_start = np.max(field[field > 0])
1340
+ pixel_end = np.min(field[field > 0])
1341
+ time_span = np.arange(binary_video.shape[0])
1342
+ time_start = 0
1343
+ time_end = time_span[-1]
1344
+ start_not_found: bool = True
1345
+ for t in time_span:
1346
+ if start_not_found:
1347
+ if np.any((field == pixel_start) * binary_video[t, :, :]):
1348
+ start_not_found = False
1349
+ time_start = t
1350
+ if np.any((field == pixel_end) * binary_video[t, :, :]):
1351
+ time_end = t
1352
+ break
1353
+ distance_against_time = np.linspace(pixel_start, pixel_end, (time_end - time_start + 1))
1354
+ distance_against_time = np.round(distance_against_time).astype(np.float32)
1355
+ return distance_against_time, time_start, time_end
1356
+
1357
+
1358
+ def close_holes(binary_img: NDArray[np.uint8]) -> NDArray[np.uint8]:
1359
+ """
1360
+ Close holes in a binary image using connected components analysis.
1361
+
1362
+ This function identifies and closes small holes within the foreground objects of a binary image. It uses connected component analysis to find and fill holes that are smaller than the main object.
1363
+
1364
+ Parameters
1365
+ ----------
1366
+ binary_img : ndarray of uint8
1367
+ Binary input image where holes need to be closed.
1368
+
1369
+ Returns
1370
+ -------
1371
+ out : ndarray of uint8
1372
+ Binary image with closed holes.
1373
+
1374
+ Examples
1375
+ --------
1376
+ >>> binary_img = np.zeros((10, 10), dtype=np.uint8)
1377
+ >>> binary_img[2:8, 2:8] = 1
1378
+ >>> binary_img[4:6, 4:6] = 0 # Creating a hole
1379
+ >>> result = close_holes(binary_img)
1380
+ >>> print(result)
1381
+ [[0 0 0 0 0 0 0 0 0 0]
1382
+ [0 0 0 0 0 0 0 0 0 0]
1383
+ [0 0 1 1 1 1 1 1 0 0]
1384
+ [0 0 1 1 1 1 1 1 0 0]
1385
+ [0 0 1 1 1 1 1 1 0 0]
1386
+ [0 0 1 1 1 1 1 1 0 0]
1387
+ [0 0 1 1 1 1 1 1 0 0]
1388
+ [0 0 1 1 1 1 1 1 0 0]
1389
+ [0 0 0 0 0 0 0 0 0 0]
1390
+ [0 0 0 0 0 0 0 0 0 0]]
1391
+ """
1392
+ #### Third version ####
1393
+ nb, new_order = cv2.connectedComponents(1 - binary_img)
1394
+ if nb > 2:
1395
+ binary_img[new_order > 1] = 1
1396
+ return binary_img
1397
+
1398
+
1399
+ def dynamically_expand_to_fill_holes(binary_video: NDArray[np.uint8], holes: NDArray[np.uint8]) -> Tuple[NDArray[np.uint8], int, NDArray[np.float32]]:
1400
+ """
1401
+ Fill the holes in a binary video by progressively expanding the shape made of ones.
1402
+
1403
+ Parameters
1404
+ ----------
1405
+ binary_video : ndarray of uint8
1406
+ The binary video where holes need to be filled.
1407
+ holes : ndarray of uint8
1408
+ Array representing the holes in the binary video.
1409
+
1410
+ Returns
1411
+ -------
1412
+ out : tuple of ndarray of uint8, int, and ndarray of float32
1413
+ The modified binary video with filled holes,
1414
+ the end time when all holes are filled, and
1415
+ an array of distances against time used to fill the holes.
1416
+
1417
+ Examples
1418
+ --------
1419
+ >>> binary_video = np.zeros((10, 640, 480), dtype=np.uint8)
1420
+ >>> binary_video[:, 300:400, 220:240] = 1
1421
+ >>> holes = np.zeros((640, 480), dtype=np.uint8)
1422
+ >>> holes[340:360, 228:232] = 1
1423
+ >>> filled_video, end_time, distances = dynamically_expand_to_fill_holes(binary_video, holes)
1424
+ >>> print(filled_video.shape) # Should print (10, 640, 480)
1425
+ (10, 640, 480)
1426
+ """
1427
+ #first move should be the time at wich the first pixel hole could have been covered
1428
+ #it should ask how much time the shape made to cross a distance long enough to overlap all holes
1429
+ holes_contours = cv2.dilate(holes, cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1430
+ field = rounded_inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1431
+ field2 = inverted_distance_transform(binary_video[0, :, :], (binary_video.shape[0] - 1))
1432
+ holes_contours = holes_contours * field * binary_video[- 1, :, :]
1433
+ holes[np.nonzero(holes)] = field[np.nonzero(holes)]
1434
+ if np.any(holes_contours):
1435
+ # Find the relationship between distance and time
1436
+ distance_against_time, holes_time_start, holes_time_end = get_radius_distance_against_time(binary_video, holes_contours)
1437
+ # Use that vector to progressively fill holes at the same speed as shape grows
1438
+ for t in np.arange(len(distance_against_time)):
1439
+ new_order, stats, centers = cc((holes >= distance_against_time[t]).astype(np.uint8))
1440
+ for comp_i in np.arange(1, stats.shape[0]):
1441
+ past_image = deepcopy(binary_video[holes_time_start + t, :, :])
1442
+ with_new_comp = new_order == comp_i
1443
+ past_image[with_new_comp] = 1
1444
+ nb_comp, image_garbage = cv2.connectedComponents(past_image)
1445
+ if nb_comp == 2:
1446
+ binary_video[holes_time_start + t, :, :][with_new_comp] = 1
1447
+ # Make sure that holes remain filled from holes_time_end to the end of the video
1448
+ for t in np.arange((holes_time_end + 1), binary_video.shape[0]):
1449
+ past_image = binary_video[t, :, :]
1450
+ past_image[holes >= distance_against_time[-1]] = 1
1451
+ binary_video[t, :, :] = past_image
1452
+ else:
1453
+ holes_time_end = None
1454
+ distance_against_time = np.array([1, 2], dtype=np.float32)
1455
+
1456
+ return binary_video, holes_time_end, distance_against_time
1457
+
1458
+
1459
+ @njit()
1460
+ def create_ellipse(vsize_in, hsize_in):
1461
+ """
1462
+ Create a 2D array representing an ellipse with given vertical and horizontal sizes.
1463
+
1464
+ This function generates a NumPy boolean array where each element is `True` if the point lies within or on
1465
+ the boundary of an ellipse defined by its vertical and horizontal radii. The ellipse is centered at the center
1466
+ of the array, which corresponds to the midpoint of the given dimensions.
1467
+
1468
+ Parameters
1469
+ ----------
1470
+ vsize_in : int
1471
+ Vertical size (number of rows) in the output 2D array.
1472
+ hsize_in : int
1473
+ Horizontal size (number of columns) in the output 2D array.
1474
+
1475
+ Returns
1476
+ -------
1477
+ NDArray[bool]
1478
+ A boolean NumPy array of shape `(vsize, hsize)` where `True` indicates that a pixel lies within or on
1479
+ the boundary of an ellipse centered at the image's center with radii determined by half of the dimensions.
1480
+
1481
+ Notes
1482
+ -----
1483
+ If either vertical or horizontal size is zero, it defaults to 3 as in the original class behavior.
1484
+ """
1485
+ # Use default values if input sizes are zero
1486
+ vsize = 3 if vsize_in == 0 else vsize_in
1487
+ hsize = 3 if hsize_in == 0 else hsize_in
1488
+
1489
+ # Compute radii (half of each size)
1490
+ vr = hsize // 2
1491
+ hr = vsize // 2
1492
+
1493
+ result = np.empty((vsize, hsize), dtype=np.bool_)
1494
+ if vr > 0 and hr > 0:
1495
+ for i in range(vsize):
1496
+ for j in range(hsize):
1497
+ x = i
1498
+ y = j
1499
+ lhs = ((x - hr) ** 2 / (hr ** 2)) + ((y - vr) ** 2 / (vr ** 2))
1500
+ result[i, j] = lhs <= 1
1501
+ else:
1502
+ result[0, 0] = True
1503
+ return result
1504
+
1505
+ rhombus_55 = create_ellipse(5, 5).astype(np.uint8)
1506
+
1507
+ def get_contours(binary_image: NDArray[np.uint8]) -> NDArray[np.uint8]:
1508
+ """
1509
+ Find and return the contours of a binary image.
1510
+
1511
+ This function erodes the input binary image using a 3x3 cross-shaped
1512
+ structuring element and then subtracts the eroded image from the original to obtain the contours.
1513
+
1514
+ Parameters
1515
+ ----------
1516
+ binary_image : ndarray of uint8
1517
+ Input binary image from which to extract contours.
1518
+
1519
+ Returns
1520
+ -------
1521
+ out : ndarray of uint8
1522
+ Image containing only the contours extracted from `binary_image`.
1523
+
1524
+ Examples
1525
+ --------
1526
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1527
+ >>> binary_image[2:8, 2:8] = 1
1528
+ >>> result = get_contours(binary_image)
1529
+ >>> print(result)
1530
+ [[0 0 0 0 0 0 0 0 0 0]
1531
+ [0 0 0 0 0 0 0 0 0 0]
1532
+ [0 0 1 1 1 1 1 1 0 0]
1533
+ [0 0 1 0 0 0 0 1 0 0]
1534
+ [0 0 1 0 0 0 0 1 0 0]
1535
+ [0 0 1 0 0 0 0 1 0 0]
1536
+ [0 0 1 0 0 0 0 1 0 0]
1537
+ [0 0 1 1 1 1 1 1 0 0]
1538
+ [0 0 0 0 0 0 0 0 0 0]
1539
+ [0 0 0 0 0 0 0 0 0 0]]
1540
+ """
1541
+ if not isinstance(binary_image.dtype, np.uint8):
1542
+ binary_image = binary_image.astype(np.uint8)
1543
+ if np.all(binary_image):
1544
+ contours = 1 - image_borders(binary_image.shape)
1545
+ elif np.any(binary_image):
1546
+ eroded_binary = cv2.erode(binary_image, cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1547
+ contours = binary_image - eroded_binary
1548
+ else:
1549
+ contours = binary_image
1550
+ return contours
1551
+
1552
+
1553
+ def get_quick_bounding_boxes(binary_image: NDArray[np.uint8], ordered_image: NDArray, ordered_stats: NDArray) -> Tuple[NDArray, NDArray, NDArray, NDArray]:
1554
+ """
1555
+ Compute bounding boxes for shapes in a binary image.
1556
+
1557
+ Parameters
1558
+ ----------
1559
+ binary_image : NDArray[np.uint8]
1560
+ A 2D array representing the binary image.
1561
+ ordered_image : NDArray
1562
+ An array containing the ordered image data.
1563
+ ordered_stats : NDArray
1564
+ A 2D array with statistics about the shapes in the image.
1565
+
1566
+ Returns
1567
+ -------
1568
+ Tuple[NDArray, NDArray, NDArray, NDArray]
1569
+ A tuple containing four arrays:
1570
+ - top: Array of y-coordinates for the top edge of bounding boxes.
1571
+ - bot: Array of y-coordinates for the bottom edge of bounding boxes.
1572
+ - left: Array of x-coordinates for the left edge of bounding boxes.
1573
+ - right: Array of x-coordinates for the right edge of bounding boxes.
1574
+
1575
+ Examples
1576
+ --------
1577
+ >>> binary_image = np.array([[0, 1], [0, 0], [1, 0]], dtype=np.uint8)
1578
+ >>> ordered_image = np.array([[0, 1], [0, 0], [2, 0]], dtype=np.uint8)
1579
+ >>> ordered_stats = np.array([[1, 0, 1, 1, 1], [0, 2, 1, 1, 1]], dtype=np.int32)
1580
+ >>> top, bot, left, right = get_quick_bounding_boxes(binary_image, ordered_image, ordered_stats)
1581
+ >>> print(top)
1582
+ [-1 1]
1583
+ >>> print(bot)
1584
+ [2 4]
1585
+ >>> print(left)
1586
+ [0 -1]
1587
+ >>> print(right)
1588
+ [3 2]
1589
+ """
1590
+ shapes = get_contours(binary_image)
1591
+ x_min = ordered_stats[:, 0]
1592
+ y_min = ordered_stats[:, 1]
1593
+ x_max = ordered_stats[:, 0] + ordered_stats[:, 2]
1594
+ y_max = ordered_stats[:, 1] + ordered_stats[:, 3]
1595
+ x_min_dist = shapes.shape[1]
1596
+ y_min_dist = shapes.shape[0]
1597
+
1598
+ shapes *= ordered_image
1599
+ shape_nb = (len(np.unique(shapes)) - 1)
1600
+ i = 0
1601
+ a_indices, b_indices = np.triu_indices(shape_nb, 1)
1602
+ a_indices, b_indices = a_indices + 1, b_indices + 1
1603
+ all_distances = np.zeros((len(a_indices), 3), dtype=float)
1604
+ # For every pair of components, find the minimal distance
1605
+ for (a, b) in zip(a_indices, b_indices):
1606
+ x_dist = np.absolute(x_max[a - 1] - x_min[b - 1])
1607
+ y_dist = np.absolute(y_max[a - 1] - y_min[b - 1])
1608
+ if x_dist < 2 * x_min_dist and y_dist < 2 * y_min_dist:
1609
+ sub_shapes = np.logical_or(shapes == a, shapes == b) * shapes
1610
+ sub_shapes = sub_shapes[np.min((y_min[a - 1], y_min[b - 1])):np.max((y_max[a - 1], y_max[b - 1])),
1611
+ np.min((x_min[a - 1], x_min[b - 1])):np.max((x_max[a - 1], x_max[b - 1]))]
1612
+ sub_shapes[sub_shapes == a] = 1
1613
+ sub_shapes[sub_shapes == b] = 2
1614
+ if np.any(sub_shapes == 1) and np.any(sub_shapes == 2):
1615
+ all_distances[i, :] = a, b, get_minimal_distance_between_2_shapes(sub_shapes, False)
1616
+
1617
+ if x_dist > y_dist:
1618
+ x_min_dist = np.min((x_min_dist, x_dist))
1619
+ else:
1620
+ y_min_dist = np.min((y_min_dist, y_dist))
1621
+ i += 1
1622
+ shape_number = ordered_stats.shape[0]
1623
+ top = np.zeros(shape_number, dtype=np.int64)
1624
+ bot = np.repeat(binary_image.shape[0], shape_number)
1625
+ left = np.zeros(shape_number, dtype=np.int64)
1626
+ right = np.repeat(binary_image.shape[1], shape_number)
1627
+ for shape_i in np.arange(1, shape_nb + 1):
1628
+ # Get where the shape i appear in pairwise comparisons
1629
+ idx = np.nonzero(np.logical_or(all_distances[:, 0] == shape_i, all_distances[:, 1] == shape_i))
1630
+ # Compute the minimal distance related to shape i and divide by 2
1631
+ if len(all_distances[idx, 2]) > 0:
1632
+ dist = all_distances[idx, 2].min() // 2
1633
+ else:
1634
+ dist = 1
1635
+ # Save the coordinates of the arena around shape i
1636
+ top[shape_i - 1] = y_min[shape_i - 1] - dist.astype(np.int64)
1637
+ bot[shape_i - 1] = y_max[shape_i - 1] + dist.astype(np.int64)
1638
+ left[shape_i - 1] = x_min[shape_i - 1] - dist.astype(np.int64)
1639
+ right[shape_i - 1] = x_max[shape_i - 1] + dist.astype(np.int64)
1640
+ return top, bot, left, right
1641
+
1642
+
1643
+
1644
+ def get_bb_with_moving_centers(motion_list: list, all_specimens_have_same_direction: bool,
1645
+ original_shape_hsize: int, binary_image: NDArray,
1646
+ y_boundaries: NDArray):
1647
+ """
1648
+ Get the bounding boxes with moving centers.
1649
+
1650
+ Parameters
1651
+ ----------
1652
+ motion_list : list
1653
+ List of binary images representing the motion frames.
1654
+ all_specimens_have_same_direction : bool
1655
+ Boolean indicating if all specimens move in the same direction.
1656
+ original_shape_hsize : int or None
1657
+ Original height size of the shape. If `None`, a default kernel size is used.
1658
+ binary_image : NDArray
1659
+ Binary image of the initial frame.
1660
+ y_boundaries : NDArray
1661
+ Array defining the y-boundaries for ranking shapes.
1662
+
1663
+ Returns
1664
+ -------
1665
+ tuple
1666
+ A tuple containing:
1667
+ - top : NDArray
1668
+ Array of top coordinates for each bounding box.
1669
+ - bot : NDArray
1670
+ Array of bottom coordinates for each bounding box.
1671
+ - left : NDArray
1672
+ Array of left coordinates for each bounding box.
1673
+ - right : NDArray
1674
+ Array of right coordinates for each bounding box.
1675
+ - ordered_image_i : NDArray
1676
+ Updated binary image with the final ranked shapes.
1677
+
1678
+ Notes
1679
+ -----
1680
+ This function processes each frame to expand and confirm shapes, updating centroids if necessary.
1681
+ It uses morphological operations like dilation to detect shape changes over frames.
1682
+
1683
+ Examples
1684
+ --------
1685
+ >>> top, bot, left, right, ordered_image = _get_bb_with_moving_centers(motion_frames, True, None, binary_img, y_bounds)
1686
+ >>> print("Top coordinates:", top)
1687
+ >>> print("Bottom coordinates:", bot)
1688
+ """
1689
+ print("Read and segment each sample image and rank shapes from top to bot and from left to right")
1690
+ k_size = 3
1691
+ if original_shape_hsize is not None:
1692
+ k_size = int((np.ceil(original_shape_hsize / 5) * 2) + 1)
1693
+ big_kernel = create_ellipse(k_size, k_size).astype(np.uint8)
1694
+
1695
+ ordered_stats, ordered_centroids, ordered_image = rank_from_top_to_bottom_from_left_to_right(
1696
+ binary_image, y_boundaries, get_ordered_image=True)
1697
+ blob_number = ordered_stats.shape[0]
1698
+
1699
+ ordered_image_i = deepcopy(ordered_image)
1700
+ logging.info("For each frame, expand each previously confirmed shape to add area to its maximal bounding box")
1701
+ for step_i in np.arange(1, len(motion_list)):
1702
+ previously_ordered_centroids = deepcopy(ordered_centroids)
1703
+ new_image_i = motion_list[step_i].copy()
1704
+ detected_shape_number = blob_number + 1
1705
+ c = 0
1706
+ while c < 5 and detected_shape_number == blob_number + 1:
1707
+ c += 1
1708
+ image_i = new_image_i
1709
+ new_image_i = cv2.dilate(image_i, cross_33, iterations=1)
1710
+ detected_shape_number, _ = cv2.connectedComponents(new_image_i, connectivity=8)
1711
+ if c == 0:
1712
+ break
1713
+ else:
1714
+ for shape_i in range(blob_number):
1715
+ shape_to_expand = np.zeros(image_i.shape, dtype=np.uint8)
1716
+ shape_to_expand[ordered_image_i == (shape_i + 1)] = 1
1717
+ without_shape_i = ordered_image_i.copy()
1718
+ without_shape_i[ordered_image_i == (shape_i + 1)] = 0
1719
+ if k_size != 3:
1720
+ test_shape = expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand, without_shape_i,
1721
+ ordered_centroids[shape_i, :],
1722
+ np.delete(ordered_centroids, shape_i,
1723
+ axis=0), big_kernel)
1724
+ else:
1725
+ test_shape = shape_to_expand
1726
+ test_shape = expand_until_neighbor_center_gets_nearer_than_own(test_shape, without_shape_i,
1727
+ ordered_centroids[shape_i, :],
1728
+ np.delete(ordered_centroids, shape_i,
1729
+ axis=0), cross_33)
1730
+ confirmed_shape = test_shape * image_i
1731
+ ordered_image_i[confirmed_shape > 0] = shape_i + 1
1732
+
1733
+
1734
+ mask_to_display = np.zeros(image_i.shape, dtype=np.uint8)
1735
+ mask_to_display[ordered_image_i > 0] = 1
1736
+
1737
+ # If the blob moves enough to drastically change its gravity center,
1738
+ # update the ordered centroids at each frame.
1739
+ detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
1740
+ connectivity=8)
1741
+
1742
+ mask_to_display = mask_to_display.astype(np.uint8)
1743
+ while np.logical_and(detected_shape_number - 1 != blob_number,
1744
+ np.sum(mask_to_display > 0) < mask_to_display.size):
1745
+ mask_to_display = cv2.dilate(mask_to_display, cross_33, iterations=1)
1746
+ detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
1747
+ connectivity=8)
1748
+ mask_to_display[np.nonzero(mask_to_display)] = 1
1749
+ mask_to_display = mask_to_display.astype(np.uint8)
1750
+ ordered_stats, ordered_centroids = rank_from_top_to_bottom_from_left_to_right(mask_to_display, y_boundaries)
1751
+
1752
+ if all_specimens_have_same_direction:
1753
+ # Adjust each centroid position according to the maximal centroid displacement.
1754
+ x_diffs = ordered_centroids[:, 0] - previously_ordered_centroids[:, 0]
1755
+ if np.mean(x_diffs) > 0: # They moved left, we add to x
1756
+ add_to_x = np.max(x_diffs) - x_diffs
1757
+ else: #They moved right, we remove from x
1758
+ add_to_x = np.min(x_diffs) - x_diffs
1759
+ ordered_centroids[:, 0] = ordered_centroids[:, 0] + add_to_x
1760
+
1761
+ y_diffs = ordered_centroids[:, 1] - previously_ordered_centroids[:, 1]
1762
+ if np.mean(y_diffs) > 0: # They moved down, we add to y
1763
+ add_to_y = np.max(y_diffs) - y_diffs
1764
+ else: # They moved up, we remove from y
1765
+ add_to_y = np.min(y_diffs) - y_diffs
1766
+ ordered_centroids[:, 1] = ordered_centroids[:, 1] + add_to_y
1767
+
1768
+ ordered_image_i = mask_to_display
1769
+
1770
+ # Save each bounding box
1771
+ top = np.zeros(blob_number, dtype=np.int64)
1772
+ bot = np.repeat(binary_image.shape[0], blob_number)
1773
+ left = np.zeros(blob_number, dtype=np.int64)
1774
+ right = np.repeat(binary_image.shape[1], blob_number)
1775
+ for shape_i in range(blob_number):
1776
+ shape_i_indices = np.where(ordered_image_i == shape_i + 1)
1777
+ left[shape_i] = np.min(shape_i_indices[1])
1778
+ right[shape_i] = np.max(shape_i_indices[1])
1779
+ top[shape_i] = np.min(shape_i_indices[0])
1780
+ bot[shape_i] = np.max(shape_i_indices[0])
1781
+ return top, bot, left, right, ordered_image_i
1782
+
1783
+
1784
+ def prepare_box_counting(binary_image: NDArray[np.uint8], min_im_side: int=128, min_mesh_side: int=8, zoom_step: int=0, contours: bool=True)-> Tuple[NDArray[np.uint8], NDArray[np.uint8]]:
1785
+ """Prepare box counting parameters for image analysis.
1786
+
1787
+ Prepares parameters for box counting method based on binary
1788
+ image input. Adjusts image size, computes side lengths, and applies
1789
+ contour extraction if specified.
1790
+
1791
+ Parameters
1792
+ ----------
1793
+ binary_image : ndarray of uint8
1794
+ Binary image for analysis.
1795
+ min_im_side : int, optional
1796
+ Minimum side length threshold. Default is 128.
1797
+ min_mesh_side : int, optional
1798
+ Minimum mesh side length. Default is 8.
1799
+ zoom_step : int, optional
1800
+ Zoom step for side lengths computation. Default is 0.
1801
+ contours : bool, optional
1802
+ Whether to apply contour extraction. Default is True.
1803
+
1804
+ Returns
1805
+ -------
1806
+ out : tuple of ndarray of uint8, ndarray (or None)
1807
+ Cropped binary image and computed side lengths.
1808
+
1809
+ Examples
1810
+ --------
1811
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1812
+ >>> binary_image[2:4, 2:6] = 1
1813
+ >>> binary_image[7:9, 4:7] = 1
1814
+ >>> binary_image[4:7, 5] = 1
1815
+ >>> cropped_img, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1816
+ >>> print(cropped_img), print(side_lengths)
1817
+ [[0 0 0 0 0 0 0]
1818
+ [0 1 1 1 1 0 0]
1819
+ [0 1 1 1 1 0 0]
1820
+ [0 0 0 0 1 0 0]
1821
+ [0 0 0 0 1 0 0]
1822
+ [0 0 0 0 1 0 0]
1823
+ [0 0 0 1 0 1 0]
1824
+ [0 0 0 1 1 1 0]
1825
+ [0 0 0 0 0 0 0]]
1826
+ [4 2]
1827
+ """
1828
+ side_lengths = None
1829
+ zoomed_binary = binary_image
1830
+ binary_idx = np.nonzero(binary_image)
1831
+ if binary_idx[0].size:
1832
+ min_y = np.min(binary_idx[0])
1833
+ min_y = np.max((min_y - 1, 0))
1834
+
1835
+ min_x = np.min(binary_idx[1])
1836
+ min_x = np.max((min_x - 1, 0))
1837
+
1838
+ max_y = np.max(binary_idx[0])
1839
+ max_y = np.min((max_y + 1, binary_image.shape[0] - 1))
1840
+
1841
+ max_x = np.max(binary_idx[1])
1842
+ max_x = np.min((max_x + 1, binary_image.shape[1] - 1))
1843
+
1844
+ zoomed_binary = deepcopy(binary_image[min_y:(max_y + 1), min_x: (max_x + 1)])
1845
+ min_side = np.min(zoomed_binary.shape)
1846
+ if min_side >= min_im_side:
1847
+ if contours:
1848
+ zoomed_binary = get_contours(zoomed_binary)
1849
+ if zoom_step == 0:
1850
+ max_power = int(np.floor(np.log2(min_side))) # Largest integer power of 2
1851
+ side_lengths = 2 ** np.arange(max_power, int(np.log2(min_mesh_side // 2)), -1)
1852
+ else:
1853
+ side_lengths = np.arange(min_mesh_side, min_side, zoom_step)
1854
+ return zoomed_binary, side_lengths
1855
+
1856
+
1857
+ def box_counting_dimension(zoomed_binary: NDArray[np.uint8], side_lengths: NDArray, display: bool=False) -> Tuple[float, float, float]:
1858
+ """
1859
+ Box counting dimension calculation.
1860
+
1861
+ This function calculates the box-counting dimension of a binary image by analyzing the number
1862
+ of boxes (of varying sizes) that contain at least one pixel of the image. The function also
1863
+ provides the R-squared value from linear regression and the number of boxes used.
1864
+
1865
+ Parameters
1866
+ ----------
1867
+ zoomed_binary : NDArray[np.uint8]
1868
+ Binary image (0 or 255 values) for which the box-counting dimension is calculated.
1869
+ side_lengths : NDArray
1870
+ Array of side lengths for the boxes used in the box-counting calculation.
1871
+ display : bool, optional
1872
+ If True, displays a scatter plot of the log-transformed box counts and diameters,
1873
+ along with the linear regression fit. Default is False.
1874
+
1875
+ Returns
1876
+ -------
1877
+ out : Tuple[float, float, float]
1878
+ A tuple containing the calculated box-counting dimension (`d`), R-squared value (`r_value`),
1879
+ and the number of boxes used (`box_nb`).
1880
+
1881
+ Examples
1882
+ --------
1883
+ >>> binary_image = np.zeros((10, 10), dtype=np.uint8)
1884
+ >>> binary_image[2:4, 2:6] = 1
1885
+ >>> binary_image[7:9, 4:7] = 1
1886
+ >>> binary_image[4:7, 5] = 1
1887
+ >>> zoomed_binary, side_lengths = prepare_box_counting(binary_image, min_im_side=2, min_mesh_side=2)
1888
+ >>> dimension, r_value, box_nb = box_counting_dimension(zoomed_binary, side_lengths)
1889
+ >>> print(dimension, r_value, box_nb)
1890
+ (np.float64(1.1699250014423126), np.float64(0.9999999999999998), 2)
1891
+ """
1892
+ dimension:float = 0.
1893
+ r_value:float = 0.
1894
+ box_nb:float = 0.
1895
+ if side_lengths is not None:
1896
+ box_counts = np.zeros(len(side_lengths), dtype=np.uint64)
1897
+ # Loop through side_lengths and compute block counts
1898
+ for idx, side_length in enumerate(side_lengths):
1899
+ S = np.add.reduceat(
1900
+ np.add.reduceat(zoomed_binary, np.arange(0, zoomed_binary.shape[0], side_length), axis=0),
1901
+ np.arange(0, zoomed_binary.shape[1], side_length),
1902
+ axis=1
1903
+ )
1904
+ box_counts[idx] = len(np.where(S > 0)[0])
1905
+
1906
+ valid_indices = box_counts > 0
1907
+ if valid_indices.sum() >= 2:
1908
+ log_box_counts = np.log(box_counts)
1909
+ log_reciprocal_lengths = np.log(1 / side_lengths)
1910
+ slope, intercept, r_value, p_value, stderr = linregress(log_reciprocal_lengths, log_box_counts)
1911
+ # coefficients = np.polyfit(log_reciprocal_lengths, log_box_counts, 1)
1912
+ dimension = slope
1913
+ box_nb = len(side_lengths)
1914
+ if display:
1915
+ plt.scatter(log_reciprocal_lengths, log_box_counts, label="Box counting")
1916
+ plt.plot([0, log_reciprocal_lengths.min()], [intercept, intercept + slope * log_reciprocal_lengths.min()], label="Linear regression")
1917
+ plt.plot([], [], ' ', label=f"D = {slope:.2f}")
1918
+ plt.plot([], [], ' ', label=f"R2 = {r_value:.6f}")
1919
+ plt.plot([], [], ' ', label=f"p-value = {p_value:.2e}")
1920
+ plt.legend(loc='best')
1921
+ plt.xlabel(f"log(1/Diameter) | Diameter ⊆ [{side_lengths[0]}:{side_lengths[-1]}] (n={box_nb})")
1922
+ plt.ylabel(f"log(Box number) | Box number ⊆ [{box_counts[0]}:{box_counts[-1]}]")
1923
+ plt.show()
1924
+ # plt.close()
1925
+
1926
+ return dimension, r_value, box_nb
1927
+
1928
+
1929
+ def keep_shape_connected_with_ref(all_shapes: NDArray[np.uint8], reference_shape: NDArray[np.uint8]) -> NDArray[np.uint8]:
1930
+ """
1931
+ Keep shape connected with reference.
1932
+
1933
+ This function analyzes the connected components of a binary image represented by `all_shapes`
1934
+ and returns the first component that intersects with the `reference_shape`.
1935
+ If no such component is found, it returns None.
1936
+
1937
+ Parameters
1938
+ ----------
1939
+ all_shapes : ndarray of uint8
1940
+ Binary image containing all shapes to analyze.
1941
+ reference_shape : ndarray of uint8
1942
+ Binary reference shape used for intersection check.
1943
+
1944
+ Returns
1945
+ -------
1946
+ out : ndarray of uint8 or None
1947
+ The first connected component that intersects with the reference shape,
1948
+ or None if no such component is found.
1949
+
1950
+ Examples
1951
+ -------
1952
+ >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
1953
+ >>> reference_shape = np.zeros((5, 5), dtype=np.uint8)
1954
+ >>> reference_shape[3, 3] = 1
1955
+ >>> all_shapes[0:2, 0:2] = 1
1956
+ >>> all_shapes[3:4, 3:4] = 1
1957
+ >>> res = keep_shape_connected_with_ref(all_shapes, reference_shape)
1958
+ >>> print(res)
1959
+ [[0 0 0 0 0]
1960
+ [0 0 0 0 0]
1961
+ [0 0 0 0 0]
1962
+ [0 0 0 1 0]
1963
+ [0 0 0 0 0]]
1964
+ """
1965
+ number, order = cv2.connectedComponents(all_shapes, ltype=cv2.CV_16U)
1966
+ expanded_shape = None
1967
+ if number > 1:
1968
+ for i in np.arange(1, number):
1969
+ expanded_shape_test = np.zeros(order.shape, np.uint8)
1970
+ expanded_shape_test[order == i] = 1
1971
+ if np.any(expanded_shape_test * reference_shape):
1972
+ break
1973
+ if np.any(expanded_shape_test * reference_shape):
1974
+ expanded_shape = expanded_shape_test
1975
+ else:
1976
+ expanded_shape = reference_shape
1977
+ return expanded_shape
1978
+
1979
+
1980
+ @njit()
1981
+ def keep_largest_shape(indexed_shapes: NDArray[np.int32]) -> NDArray[np.uint8]:
1982
+ """
1983
+ Keep the largest shape from an array of indexed shapes.
1984
+
1985
+ This function identifies the most frequent non-zero shape in the input
1986
+ array and returns a binary mask where elements matching this shape are set to 1,
1987
+ and others are set to 0. The function uses NumPy's bincount to count occurrences
1988
+ of each shape and assumes that the first element (index 0) is not part of any
1989
+ shape classification.
1990
+
1991
+ Parameters
1992
+ ----------
1993
+ indexed_shapes : ndarray of int32
1994
+ Input array containing indexed shapes.
1995
+
1996
+ Returns
1997
+ -------
1998
+ out : ndarray of uint8
1999
+ Binary mask where the largest shape is marked as 1.
2000
+
2001
+ Examples
2002
+ --------
2003
+ >>> indexed_shapes = np.array([0, 2, 2, 3, 1], dtype=np.int32)
2004
+ >>> keep_largest_shape(indexed_shapes)
2005
+ array([0, 1, 1, 0, 0], dtype=uint8)
2006
+ """
2007
+ label_counts = np.bincount(indexed_shapes.flatten())
2008
+ largest_label = 1 + np.argmax(label_counts[1:])
2009
+ return (indexed_shapes == largest_label).astype(np.uint8)
2010
+
2011
+
2012
+ def keep_one_connected_component(binary_image: NDArray[np.uint8])-> NDArray[np.uint8]:
2013
+ """
2014
+ Keep only one connected component in a binary image.
2015
+
2016
+ This function filters out all but the largest connected component in
2017
+ a binary image, effectively isolating it from other noise or objects.
2018
+ The function ensures the input is in uint8 format before processing.
2019
+
2020
+ Parameters
2021
+ ----------
2022
+ binary_image : ndarray of uint8
2023
+ Binary image containing one or more connected components.
2024
+
2025
+ Returns
2026
+ -------
2027
+ ndarray of uint8
2028
+ Image with only the largest connected component retained.
2029
+
2030
+ Examples
2031
+ -------
2032
+ >>> all_shapes = np.zeros((5, 5), dtype=np.uint8)
2033
+ >>> all_shapes[0:2, 0:2] = 1
2034
+ >>> all_shapes[3:4, 3:4] = 1
2035
+ >>> res = keep_one_connected_component(all_shapes)
2036
+ >>> print(res)
2037
+ [[1 1 0 0 0]
2038
+ [1 1 0 0 0]
2039
+ [0 0 0 0 0]
2040
+ [0 0 0 0 0]
2041
+ [0 0 0 0 0]]
2042
+ """
2043
+ if binary_image.dtype != np.uint8:
2044
+ binary_image = binary_image.astype(np.uint8)
2045
+ num_labels, sh = cv2.connectedComponents(binary_image)
2046
+ if num_labels <= 1:
2047
+ return binary_image.astype(np.uint8)
2048
+ else:
2049
+ return keep_largest_shape(sh)
2050
+
2051
+ def create_mask(dims: Tuple, minmax: Tuple, shape: str):
2052
+ """
2053
+
2054
+ Create a boolean mask based on given dimensions and min/max coordinates.
2055
+
2056
+ Parameters
2057
+ ----------
2058
+ dims : Tuple[int, int]
2059
+ The dimensions of the mask (height and width).
2060
+ minmax : Tuple[int, int, int, int]
2061
+ The minimum and maximum coordinates for the mask (x_min, x_max,
2062
+ y_min, y_max).
2063
+ shape : str
2064
+ The shape of the mask. Should be either 'circle' or any other value for a rectangular mask.
2065
+
2066
+ Returns
2067
+ -------
2068
+ np.ndarray[bool]
2069
+ A boolean NumPy array with the same dimensions as `dims`, initialized to False,
2070
+ where the specified region (or circle) is set to True.
2071
+
2072
+ Raises
2073
+ ------
2074
+ ValueError
2075
+ If the shape is 'circle' and the ellipse creation fails.
2076
+
2077
+ Notes
2078
+ -----
2079
+ If `shape` is not 'circle', a rectangular mask will be created. The ellipse
2080
+ creation method used may have specific performance considerations.
2081
+
2082
+ Examples
2083
+ --------
2084
+ >>> mask = create_mask((5, 6), (0, 5, 1, 5), 'circle')
2085
+ >>> print(mask)
2086
+ [[False False False True False False]
2087
+ [False False True True True False]
2088
+ [False True True True True False]
2089
+ [False False True True True False]
2090
+ [False False False True False False]]
2091
+ """
2092
+ mask = np.zeros(dims[:2], dtype=bool)
2093
+ if shape == 'circle':
2094
+ ellipse = create_ellipse(minmax[1] - minmax[0], minmax[3] - minmax[2])
2095
+ mask[minmax[0]:minmax[1], minmax[2]:minmax[3], ...] = ellipse
2096
+ else:
2097
+ mask[minmax[0]:minmax[1], minmax[2]:minmax[3]] = 1
2098
+ return mask
2099
+
2100
+ def draw_img_with_mask(img:NDArray, dims: Tuple, minmax: Tuple, shape: str, drawing: Tuple, only_contours: bool=False,
2101
+ dilate_mask: int=0) -> NDArray:
2102
+ """
2103
+
2104
+ Draw an image with a mask and optional contours.
2105
+
2106
+ Draws a subregion of the input image using a specified shape (circle or rectangle),
2107
+ which can be dilated. The mask can be limited to contours only, and an optional
2108
+ drawing (overlay) can be applied within the masked region.
2109
+
2110
+ Parameters
2111
+ ----------
2112
+ img : NDArray
2113
+ The input image to draw on.
2114
+ dims : Tuple[int, int]
2115
+ Dimensions of the subregion (width, height).
2116
+ minmax : Tuple[int, int, int, int]
2117
+ Coordinates of the subregion (x_start, x_end, y_start, y_end).
2118
+ shape : str
2119
+ Shape of the mask to draw ('circle' or 'rectangle').
2120
+ drawing : Tuple[NDArray, NDArray, NDArray]
2121
+ Optional drawing (overlay) to apply within the masked region.
2122
+ only_contours : bool, optional
2123
+ If True, draw only the contours of the shape. Default is False.
2124
+ dilate_mask : int, optional
2125
+ Number of iterations for dilating the mask. Default is 0.
2126
+
2127
+ Returns
2128
+ -------
2129
+ NDArray
2130
+ The modified image with the applied mask and drawing.
2131
+
2132
+ Notes
2133
+ -----
2134
+ This function assumes that the input image is in BGR format (OpenCV style).
2135
+
2136
+ Examples
2137
+ --------
2138
+ >>> dim = (100, 100, 3)
2139
+ >>> img = np.zeros(dim)
2140
+ >>> result = draw_img_with_mask(img, dim, (50, 75, 50, 75), 'circle', (0, 255, 0))
2141
+ >>> print((result == 255).sum())
2142
+ 441
2143
+ """
2144
+ if shape == 'circle':
2145
+ mask = create_ellipse(minmax[1] - minmax[0], minmax[3] - minmax[2]).astype(np.uint8)
2146
+ if only_contours:
2147
+ mask = get_contours(mask)
2148
+ else:
2149
+ if only_contours:
2150
+ mask = 1 - image_borders((minmax[1] - minmax[0], minmax[3] - minmax[2]))
2151
+ else:
2152
+ mask = np.ones((minmax[1] - minmax[0], minmax[3] - minmax[2]), np.uint8)
2153
+ if dilate_mask:
2154
+ mask = cv2.dilate(mask, kernel=cross_33, iterations=dilate_mask)
2155
+ anti_mask = 1 - mask
2156
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 0] *= anti_mask
2157
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 1] *= anti_mask
2158
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 2] *= anti_mask
2159
+ if isinstance(drawing, np.ndarray):
2160
+ if drawing.dtype != np.uint8:
2161
+ drawing = bracket_to_uint8_image_contrast(drawing)
2162
+ drawing = [drawing[:, :, 0], drawing[:, :, 1], drawing[:, :, 2]]
2163
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 0] += mask * drawing[0]
2164
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 1] += mask * drawing[1]
2165
+ img[minmax[0]:minmax[1], minmax[2]:minmax[3], 2] += mask * drawing[2]
2166
+ return img