cellects 0.1.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__init__.py +0 -0
- cellects/__main__.py +49 -0
- cellects/config/__init__.py +0 -0
- cellects/config/all_vars_dict.py +154 -0
- cellects/core/__init__.py +0 -0
- cellects/core/cellects_paths.py +30 -0
- cellects/core/cellects_threads.py +1464 -0
- cellects/core/motion_analysis.py +1931 -0
- cellects/core/one_image_analysis.py +1065 -0
- cellects/core/one_video_per_blob.py +679 -0
- cellects/core/program_organizer.py +1347 -0
- cellects/core/script_based_run.py +154 -0
- cellects/gui/__init__.py +0 -0
- cellects/gui/advanced_parameters.py +1258 -0
- cellects/gui/cellects.py +189 -0
- cellects/gui/custom_widgets.py +789 -0
- cellects/gui/first_window.py +449 -0
- cellects/gui/if_several_folders_window.py +239 -0
- cellects/gui/image_analysis_window.py +1909 -0
- cellects/gui/required_output.py +232 -0
- cellects/gui/video_analysis_window.py +656 -0
- cellects/icons/__init__.py +0 -0
- cellects/icons/cellects_icon.icns +0 -0
- cellects/icons/cellects_icon.ico +0 -0
- cellects/image_analysis/__init__.py +0 -0
- cellects/image_analysis/cell_leaving_detection.py +54 -0
- cellects/image_analysis/cluster_flux_study.py +102 -0
- cellects/image_analysis/extract_exif.py +61 -0
- cellects/image_analysis/fractal_analysis.py +184 -0
- cellects/image_analysis/fractal_functions.py +108 -0
- cellects/image_analysis/image_segmentation.py +272 -0
- cellects/image_analysis/morphological_operations.py +867 -0
- cellects/image_analysis/network_functions.py +1244 -0
- cellects/image_analysis/one_image_analysis_threads.py +289 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
- cellects/image_analysis/shape_descriptors.py +981 -0
- cellects/utils/__init__.py +0 -0
- cellects/utils/formulas.py +881 -0
- cellects/utils/load_display_save.py +1016 -0
- cellects/utils/utilitarian.py +516 -0
- cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
- cellects-0.1.0.dev1.dist-info/METADATA +131 -0
- cellects-0.1.0.dev1.dist-info/RECORD +46 -0
- cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
- cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
- cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,867 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
This script contains methods to compare and modify shapes in binary images
|
|
4
|
+
It contains the following functions:
|
|
5
|
+
- cc : Sort connected components according to sizes
|
|
6
|
+
- make_gravity_field : put a gradient of decreasing numbers around a shape
|
|
7
|
+
- find_median_shape : sum shapes and keem a median image of them
|
|
8
|
+
- make_numbered_rays
|
|
9
|
+
- CompareNeighborsWithFocal : get the number of neighbors having an
|
|
10
|
+
equal/sup/inf value than each cell
|
|
11
|
+
- CompareNeighborsWithValue : get the number of neighbors having an
|
|
12
|
+
equal/sup/inf value than a given value
|
|
13
|
+
- ShapeDescriptors
|
|
14
|
+
- get_radius_distance_against_time : 3D, get a vector of radius distances
|
|
15
|
+
with idx as time
|
|
16
|
+
- expand_until_one
|
|
17
|
+
- expand_and_rate_until_one
|
|
18
|
+
- expand_until_overlap
|
|
19
|
+
- expand_to_fill_holes
|
|
20
|
+
- expand_smalls_toward_biggest
|
|
21
|
+
- change_thresh_until_one
|
|
22
|
+
- Ellipse
|
|
23
|
+
- get_rolling_window_coordinates_list
|
|
24
|
+
"""
|
|
25
|
+
import logging
|
|
26
|
+
from copy import deepcopy
|
|
27
|
+
import cv2
|
|
28
|
+
import numpy as np
|
|
29
|
+
from scipy.spatial import KDTree
|
|
30
|
+
from numba import njit
|
|
31
|
+
from cellects.utils.formulas import moving_average
|
|
32
|
+
from skimage.filters import threshold_otsu
|
|
33
|
+
from skimage.measure import label
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
cross_33 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
|
|
37
|
+
square_33 = np.ones((3, 3), np.uint8)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class CompareNeighborsWithValue:
|
|
41
|
+
def __init__(self, array, connectivity=None, data_type=np.int8):
|
|
42
|
+
"""
|
|
43
|
+
Summarize each pixel (cell) of a 2D array by comparing its neighbors to a value.
|
|
44
|
+
This comparison can be equal, inferior or superior.
|
|
45
|
+
Neighbors can be the 4 or the 8 nearest pixels based on the value of connectivity.
|
|
46
|
+
:param array: a 1 or 2D array
|
|
47
|
+
:type array: must be less permissive than data_type
|
|
48
|
+
:param connectivity: 4 or 8, if different, only compute diagonal
|
|
49
|
+
:type connectivity: uint8
|
|
50
|
+
:param data_type: the data type used for computation
|
|
51
|
+
:type data_type: type
|
|
52
|
+
"""
|
|
53
|
+
array = array.astype(data_type)
|
|
54
|
+
self.array = array
|
|
55
|
+
self.connectivity = connectivity
|
|
56
|
+
if len(self.array.shape) == 1:
|
|
57
|
+
self.on_the_right = np.append(array[1:], array[-1])
|
|
58
|
+
self.on_the_left = np.append(array[0], array[:-1])
|
|
59
|
+
else:
|
|
60
|
+
# Build 4 window of the original array, each missing one of the four borders
|
|
61
|
+
# Grow each window with a copy of the last border at the opposite of the side a border have been deleted
|
|
62
|
+
|
|
63
|
+
if self.connectivity == 4 or self.connectivity == 8:
|
|
64
|
+
self.on_the_right = np.column_stack((array[:, 1:], array[:, -1]))
|
|
65
|
+
self.on_the_left = np.column_stack((array[:, 0], array[:, :-1]))
|
|
66
|
+
self.on_the_bot = np.vstack((array[1:, :], array[-1, :]))
|
|
67
|
+
self.on_the_top = np.vstack((array[0, :], array[:-1, :]))
|
|
68
|
+
if self.connectivity != 4:
|
|
69
|
+
self.on_the_topleft = array[:-1, :-1]
|
|
70
|
+
self.on_the_topright = array[:-1, 1:]
|
|
71
|
+
self.on_the_botleft = array[1:, :-1]
|
|
72
|
+
self.on_the_botright = array[1:, 1:]
|
|
73
|
+
|
|
74
|
+
self.on_the_topleft = np.vstack((self.on_the_topleft[0, :], self.on_the_topleft))
|
|
75
|
+
self.on_the_topleft = np.column_stack((self.on_the_topleft[:, 0], self.on_the_topleft))
|
|
76
|
+
|
|
77
|
+
self.on_the_topright = np.vstack((self.on_the_topright[0, :], self.on_the_topright))
|
|
78
|
+
self.on_the_topright = np.column_stack((self.on_the_topright, self.on_the_topright[:, -1]))
|
|
79
|
+
|
|
80
|
+
self.on_the_botleft = np.vstack((self.on_the_botleft, self.on_the_botleft[-1, :]))
|
|
81
|
+
self.on_the_botleft = np.column_stack((self.on_the_botleft[:, 0], self.on_the_botleft))
|
|
82
|
+
|
|
83
|
+
self.on_the_botright = np.vstack((self.on_the_botright, self.on_the_botright[-1, :]))
|
|
84
|
+
self.on_the_botright = np.column_stack((self.on_the_botright, self.on_the_botright[:, -1]))
|
|
85
|
+
|
|
86
|
+
def is_equal(self, value, and_itself=False):
|
|
87
|
+
"""
|
|
88
|
+
Give, for each pixel, the number neighboring pixels having the value "value"
|
|
89
|
+
:param value: any number. The equal_neighbor_nb matrix will contain, for each pixel,
|
|
90
|
+
the number of neighboring pixels having that value.
|
|
91
|
+
:param and_itself: When False, the resulting number of neighbors fitting the condition is displayed normally.
|
|
92
|
+
When True, when the focal pixel does not fit the condition, it receives the value 0.
|
|
93
|
+
In other words, the resulting number of neighbors fitting the condition is displayed
|
|
94
|
+
if and only if the focal pixel ALSO fit the condition, otherwise, it will have the value 0.
|
|
95
|
+
:type and_itself: bool
|
|
96
|
+
:return: each cell of equal_neighbor_nb is the number of neighboring pixels having the value "value"
|
|
97
|
+
:rtype: uint8
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
if len(self.array.shape) == 1:
|
|
101
|
+
self.equal_neighbor_nb = self.on_the_right + self.on_the_left
|
|
102
|
+
else:
|
|
103
|
+
if self.connectivity == 4:
|
|
104
|
+
self.equal_neighbor_nb = np.dstack((np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
|
|
105
|
+
np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value)))
|
|
106
|
+
elif self.connectivity == 8:
|
|
107
|
+
self.equal_neighbor_nb = np.dstack(
|
|
108
|
+
(np.equal(self.on_the_right, value), np.equal(self.on_the_left, value),
|
|
109
|
+
np.equal(self.on_the_bot, value), np.equal(self.on_the_top, value),
|
|
110
|
+
np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
|
|
111
|
+
np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
|
|
112
|
+
else:
|
|
113
|
+
self.equal_neighbor_nb = np.dstack(
|
|
114
|
+
(np.equal(self.on_the_topleft, value), np.equal(self.on_the_topright, value),
|
|
115
|
+
np.equal(self.on_the_botleft, value), np.equal(self.on_the_botright, value)))
|
|
116
|
+
self.equal_neighbor_nb = np.sum(self.equal_neighbor_nb, 2, dtype=np.uint8)
|
|
117
|
+
|
|
118
|
+
if and_itself:
|
|
119
|
+
self.equal_neighbor_nb[np.not_equal(self.array, value)] = 0
|
|
120
|
+
|
|
121
|
+
def is_sup(self, value, and_itself=False):
|
|
122
|
+
"""
|
|
123
|
+
Give, for each pixel, the number neighboring pixels having a higher value than "value"
|
|
124
|
+
:param value: any number. The sup_neighbor_nb matrix will contain, for each pixel,
|
|
125
|
+
the number of number neighboring pixels having a higher value than "value".
|
|
126
|
+
:param and_itself: When False, the resulting number of neighbors fitting the condition is displayed normally.
|
|
127
|
+
When True, when the focal pixel does not fit the condition, it receives the value 0.
|
|
128
|
+
In other words, the resulting number of neighbors fitting the condition is displayed
|
|
129
|
+
if and only if the focal pixel ALSO fit the condition, otherwise, it will have the value 0.
|
|
130
|
+
:type and_itself: bool
|
|
131
|
+
:return: each cell of sup_neighbor_nb is the number of neighboring pixels having a value higher than "value"
|
|
132
|
+
:rtype: uint8
|
|
133
|
+
"""
|
|
134
|
+
if len(self.array.shape) == 1:
|
|
135
|
+
self.sup_neighbor_nb = (self.on_the_right > value).astype(self.array.dtype) + (self.on_the_left > value).astype(self.array.dtype)
|
|
136
|
+
else:
|
|
137
|
+
if self.connectivity == 4:
|
|
138
|
+
self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
|
|
139
|
+
self.on_the_bot > value, self.on_the_top > value))
|
|
140
|
+
elif self.connectivity == 8:
|
|
141
|
+
self.sup_neighbor_nb = np.dstack((self.on_the_right > value, self.on_the_left > value,
|
|
142
|
+
self.on_the_bot > value, self.on_the_top > value,
|
|
143
|
+
self.on_the_topleft > value, self.on_the_topright > value,
|
|
144
|
+
self.on_the_botleft > value, self.on_the_botright > value))
|
|
145
|
+
else:
|
|
146
|
+
self.sup_neighbor_nb = np.dstack((self.on_the_topleft > value, self.on_the_topright > value,
|
|
147
|
+
self.on_the_botleft > value, self.on_the_botright > value))
|
|
148
|
+
|
|
149
|
+
self.sup_neighbor_nb = np.sum(self.sup_neighbor_nb, 2, dtype=np.uint8)
|
|
150
|
+
if and_itself:
|
|
151
|
+
self.sup_neighbor_nb[np.less_equal(self.array, value)] = 0
|
|
152
|
+
|
|
153
|
+
def is_inf(self, value, and_itself=False):
|
|
154
|
+
"""
|
|
155
|
+
Give, for each pixel, the number neighboring pixels having a lower value than "value"
|
|
156
|
+
:param value: any number. The inf_neighbor_nb matrix will contain, for each pixel,
|
|
157
|
+
the number of number neighboring pixels having a lower value than "value".
|
|
158
|
+
:param and_itself: When False, the resulting number of neighbors fitting the condition is displayed normally.
|
|
159
|
+
When True, when the focal pixel does not fit the condition, it receives the value 0.
|
|
160
|
+
In other words, the resulting number of neighbors fitting the condition is displayed
|
|
161
|
+
if and only if the focal pixel ALSO fit the condition, otherwise it receives the value 0.
|
|
162
|
+
:type and_itself: bool
|
|
163
|
+
:return: each cell of sup_neighbor_nb is the number of neighboring pixels having a value lower than "value"
|
|
164
|
+
:rtype: uint8
|
|
165
|
+
"""
|
|
166
|
+
if len(self.array.shape) == 1:
|
|
167
|
+
self.inf_neighbor_nb = (self.on_the_right < value).astype(self.array.dtype) + (self.on_the_left < value).astype(self.array.dtype)
|
|
168
|
+
else:
|
|
169
|
+
if self.connectivity == 4:
|
|
170
|
+
self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
|
|
171
|
+
self.on_the_bot < value, self.on_the_top < value))
|
|
172
|
+
elif self.connectivity == 8:
|
|
173
|
+
self.inf_neighbor_nb = np.dstack((self.on_the_right < value, self.on_the_left < value,
|
|
174
|
+
self.on_the_bot < value, self.on_the_top < value,
|
|
175
|
+
self.on_the_topleft < value, self.on_the_topright < value,
|
|
176
|
+
self.on_the_botleft < value, self.on_the_botright < value))
|
|
177
|
+
else:
|
|
178
|
+
self.inf_neighbor_nb = np.dstack((self.on_the_topleft < value, self.on_the_topright < value,
|
|
179
|
+
self.on_the_botleft < value, self.on_the_botright < value))
|
|
180
|
+
|
|
181
|
+
self.inf_neighbor_nb = np.sum(self.inf_neighbor_nb, 2, dtype=np.uint8)
|
|
182
|
+
if and_itself:
|
|
183
|
+
self.inf_neighbor_nb[np.greater_equal(self.array, value)] = 0
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def cc(binary_img):
|
|
187
|
+
"""
|
|
188
|
+
This method find and order the numbering of connected components according to their sizes
|
|
189
|
+
stats columns order: left, top, right, bot
|
|
190
|
+
Sort connected components according to sizes
|
|
191
|
+
The shape touching more than 2 borders is considered as the background.
|
|
192
|
+
If another shape touches more than 2 borders, the second larger shape is considered as background
|
|
193
|
+
"""
|
|
194
|
+
number, img, stats, centers = cv2.connectedComponentsWithStats(binary_img, ltype=cv2.CV_16U)
|
|
195
|
+
if number > 255:
|
|
196
|
+
img_dtype = np.uint16
|
|
197
|
+
if number > 65535:
|
|
198
|
+
img_dtype = np.uint32
|
|
199
|
+
else:
|
|
200
|
+
img_dtype = np.uint8
|
|
201
|
+
stats[:, 2] = stats[:, 0] + stats[:, 2]
|
|
202
|
+
stats[:, 3] = stats[:, 1] + stats[:, 3]
|
|
203
|
+
sorted_idx = np.argsort(stats[:, 4])[::-1]
|
|
204
|
+
|
|
205
|
+
# Make sure that the first connected component (labelled 0) is the background and not the main shape
|
|
206
|
+
size_ranked_stats = stats[sorted_idx, :]
|
|
207
|
+
background = (size_ranked_stats[:, 0] == 0).astype(np.uint8) + (size_ranked_stats[:, 1] == 0).astype(np.uint8) + (
|
|
208
|
+
size_ranked_stats[:, 2] == img.shape[1]).astype(np.uint8) + (
|
|
209
|
+
size_ranked_stats[:, 3] == img.shape[0]).astype(np.uint8)
|
|
210
|
+
|
|
211
|
+
# background = ((size_ranked_stats[:, 0] == 0) & (size_ranked_stats[:, 1] == 0) & (size_ranked_stats[:, 2] == img.shape[1]) & (size_ranked_stats[:, 3] == img.shape[0]))
|
|
212
|
+
|
|
213
|
+
touch_borders = np.nonzero(background > 2)[0]
|
|
214
|
+
# if not isinstance(touch_borders, np.int64):
|
|
215
|
+
# touch_borders = touch_borders[0]
|
|
216
|
+
# Most of the time, the background should be the largest shape and therefore has the index 0,
|
|
217
|
+
# Then, if there is at least one shape touching more than 2 borders and having not the index 0, solve:
|
|
218
|
+
if np.any(touch_borders != 0):
|
|
219
|
+
# If there is only one shape touching borders, it means that background is not at its right position (i.e. 0)
|
|
220
|
+
if len(touch_borders) == 1:
|
|
221
|
+
# Then exchange that shape position with background position
|
|
222
|
+
shape = sorted_idx[0] # Store shape position in the first place
|
|
223
|
+
back = sorted_idx[touch_borders[0]] # Store back position in the first place
|
|
224
|
+
sorted_idx[touch_borders[0]] = shape # Put shape position at the previous place of back and conversely
|
|
225
|
+
sorted_idx[0] = back
|
|
226
|
+
# If there are two shapes, it means that the main shape grew sufficiently to reach at least 3 borders
|
|
227
|
+
# We assume that it grew larger than background
|
|
228
|
+
else:
|
|
229
|
+
shape = sorted_idx[0]
|
|
230
|
+
back = sorted_idx[1]
|
|
231
|
+
sorted_idx[1] = shape
|
|
232
|
+
sorted_idx[0] = back
|
|
233
|
+
# Put shape position at the previous place of back and conversely
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
stats = stats[sorted_idx, :]
|
|
237
|
+
centers = centers[sorted_idx, :]
|
|
238
|
+
|
|
239
|
+
new_order = np.zeros_like(binary_img, dtype=img_dtype)
|
|
240
|
+
|
|
241
|
+
for i, val in enumerate(sorted_idx):
|
|
242
|
+
new_order[img == val] = i
|
|
243
|
+
return new_order, stats, centers
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def get_largest_connected_component(segmentation):
|
|
247
|
+
labels = label(segmentation)
|
|
248
|
+
assert(labels.max() != 0) # assume at least 1 CC
|
|
249
|
+
con_comp = np.bincount(labels.flat)[1:]
|
|
250
|
+
largest_connected_component = labels == np.argmax(con_comp) + 1
|
|
251
|
+
return len(con_comp), largest_connected_component
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def make_gravity_field(original_shape, max_distance=None, with_erosion=0):
|
|
255
|
+
"""
|
|
256
|
+
gravity_field = scipy.ndimage.distance_transform_edt(1 - original_shape)
|
|
257
|
+
Create a field containing a gradient around a shape
|
|
258
|
+
:param original_shape: a binary matrix containing one connected component
|
|
259
|
+
:type original_shape: uint8
|
|
260
|
+
:param max_distance: maximal distance from the original shape the field can reach.
|
|
261
|
+
:type max_distance: uint64
|
|
262
|
+
:param with_erosion: Tells how the original shape should be eroded before creating the field
|
|
263
|
+
:type with_erosion: uint8
|
|
264
|
+
|
|
265
|
+
:return: a matrix of the gravity field
|
|
266
|
+
"""
|
|
267
|
+
kernel = cross_33
|
|
268
|
+
if with_erosion > 0:
|
|
269
|
+
original_shape = cv2.erode(original_shape, kernel, iterations=with_erosion, borderType=cv2.BORDER_CONSTANT, borderValue=0)
|
|
270
|
+
expand = deepcopy(original_shape)
|
|
271
|
+
if max_distance is not None:
|
|
272
|
+
if max_distance > np.min(original_shape.shape) / 2:
|
|
273
|
+
max_distance = (np.min(original_shape.shape) // 2).astype(np.uint32)
|
|
274
|
+
gravity_field = np.zeros(original_shape.shape , np.uint32)
|
|
275
|
+
for gravi in np.arange(max_distance):
|
|
276
|
+
expand = cv2.dilate(expand, kernel, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
|
|
277
|
+
gravity_field[np.logical_xor(expand, original_shape)] += 1
|
|
278
|
+
else:
|
|
279
|
+
gravity_field = np.zeros(original_shape.shape , np.uint32)
|
|
280
|
+
while np.any(np.equal(original_shape + expand, 0)):
|
|
281
|
+
expand = cv2.dilate(expand, kernel, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0)
|
|
282
|
+
gravity_field[np.logical_xor(expand, original_shape)] += 1
|
|
283
|
+
return gravity_field
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
@njit()
|
|
287
|
+
def get_line_points(start, end):
|
|
288
|
+
"""
|
|
289
|
+
Get all integer coordinates along a line from start to end.
|
|
290
|
+
Uses a simple line drawing algorithm similar to Bresenham's.
|
|
291
|
+
start, end = start_point, end_point
|
|
292
|
+
Args:
|
|
293
|
+
start: tuple (x, y) - starting point
|
|
294
|
+
end: tuple (x, y) - ending point
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
numpy array of shape (n, 2) with all integer coordinates
|
|
298
|
+
"""
|
|
299
|
+
x0, y0 = start
|
|
300
|
+
x1, y1 = end
|
|
301
|
+
|
|
302
|
+
# Calculate differences
|
|
303
|
+
dx = np.abs(x1 - x0)
|
|
304
|
+
dy = np.abs(y1 - y0)
|
|
305
|
+
|
|
306
|
+
# Determine step direction
|
|
307
|
+
sx = 1 if x0 < x1 else -1
|
|
308
|
+
sy = 1 if y0 < y1 else -1
|
|
309
|
+
|
|
310
|
+
# Initialize
|
|
311
|
+
err = dx - dy
|
|
312
|
+
points = []
|
|
313
|
+
x, y = x0, y0
|
|
314
|
+
|
|
315
|
+
while True:
|
|
316
|
+
points.append([x, y])
|
|
317
|
+
|
|
318
|
+
# Check if we've reached the end
|
|
319
|
+
if x == x1 and y == y1:
|
|
320
|
+
break
|
|
321
|
+
|
|
322
|
+
# Calculate error for next step
|
|
323
|
+
e2 = 2 * err
|
|
324
|
+
|
|
325
|
+
if e2 > -dy:
|
|
326
|
+
err -= dy
|
|
327
|
+
x += sx
|
|
328
|
+
|
|
329
|
+
if e2 < dx:
|
|
330
|
+
err += dx
|
|
331
|
+
y += sy
|
|
332
|
+
|
|
333
|
+
return np.array(points)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def get_all_line_coordinates(start_point, end_points):
|
|
337
|
+
"""
|
|
338
|
+
Get coordinates for lines from one point to many points.
|
|
339
|
+
Automatically determines the right number of points for continuous lines.
|
|
340
|
+
start_point, end_points = origin_centroid, skel_coord
|
|
341
|
+
Args:
|
|
342
|
+
start_point: tuple (x, y) - starting point
|
|
343
|
+
end_points: list of tuples - ending points
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
list of numpy arrays, each containing coordinates for one line
|
|
347
|
+
"""
|
|
348
|
+
if not isinstance(start_point.dtype, float):
|
|
349
|
+
start_point = start_point.astype(float)
|
|
350
|
+
if not isinstance(end_points.dtype, float):
|
|
351
|
+
end_points = end_points.astype(float)
|
|
352
|
+
|
|
353
|
+
lines = []
|
|
354
|
+
for end_point in end_points:
|
|
355
|
+
line_coords = get_line_points(start_point, end_point)
|
|
356
|
+
lines.append(np.array(line_coords, dtype =np.uint64))
|
|
357
|
+
return lines
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def get_every_coord_between_2_points(point_A, point_B):
|
|
361
|
+
"""
|
|
362
|
+
Only work in a 2D space
|
|
363
|
+
First check if the segment is vertical (first if) or horizontal (elif), in this case computation takes 3 rows
|
|
364
|
+
Else, determine an approximation of what would be that continuous line in this discrete space.
|
|
365
|
+
:param point_A: y and x coordinates of the first point in a 2D space
|
|
366
|
+
:param point_B: y and x coordinates of the second point in a 2D space
|
|
367
|
+
:return: a matrix of the y and x coordinates of all points forming the segment between point_A and point_B
|
|
368
|
+
"""
|
|
369
|
+
xa = point_A[1]
|
|
370
|
+
ya = point_A[0]
|
|
371
|
+
xb = point_B[1]
|
|
372
|
+
yb = point_B[0]
|
|
373
|
+
|
|
374
|
+
if np.equal(xa, xb):
|
|
375
|
+
sorted_y = np.sort((ya, yb))
|
|
376
|
+
y_values = np.arange(sorted_y[0], sorted_y[1] + 1).astype(np.uint64)
|
|
377
|
+
segment = np.vstack((y_values, np.repeat(xa, len(y_values)).astype(np.uint64)))
|
|
378
|
+
elif np.equal(ya, yb):
|
|
379
|
+
sorted_x = np.sort((xa, xb))
|
|
380
|
+
x_values = np.arange(sorted_x[0], sorted_x[1] + 1).astype(np.uint64)
|
|
381
|
+
segment = np.vstack((np.repeat(ya, len(x_values)).astype(np.uint64), x_values))
|
|
382
|
+
else:
|
|
383
|
+
# First, create vectors of integers linking the coordinates of two points
|
|
384
|
+
X = np.arange(np.min((xa, xb)), np.max((xa, xb)) + 1).astype(np.uint64)
|
|
385
|
+
Y = np.arange(np.min((ya, yb)), np.max((ya, yb)) + 1).astype(np.uint64)
|
|
386
|
+
# If X is longer than Y, we make Y grow
|
|
387
|
+
new_X = X
|
|
388
|
+
new_Y = Y
|
|
389
|
+
if len(X) > len(Y):
|
|
390
|
+
new_Y = np.repeat(Y, np.round((len(X) / len(Y))))
|
|
391
|
+
if len(X) < len(Y):
|
|
392
|
+
new_X = np.repeat(X, np.round((len(Y) / len(X))))
|
|
393
|
+
|
|
394
|
+
# Duplicate the last Y value until Y length reaches X length
|
|
395
|
+
count = 0
|
|
396
|
+
while len(new_X) > len(new_Y):
|
|
397
|
+
new_Y = np.append(new_Y, new_Y[-1])
|
|
398
|
+
count = count + 1
|
|
399
|
+
while len(new_X) < len(new_Y):
|
|
400
|
+
new_X = np.append(new_X, new_X[-1])
|
|
401
|
+
count = count + 1
|
|
402
|
+
|
|
403
|
+
if np.logical_or(np.logical_and(xb < xa, yb > ya), np.logical_and(xb > xa, yb < ya)):
|
|
404
|
+
segment = np.vstack((new_Y, np.sort(new_X)[::-1]))
|
|
405
|
+
else:
|
|
406
|
+
segment = np.vstack((new_Y, new_X))
|
|
407
|
+
return segment
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def draw_me_a_sun(main_shape, cross_33, ray_length_coef=2):
|
|
411
|
+
"""
|
|
412
|
+
Draw numbered rays around one shape. These rays are perpendicular to the tangent of the contour of the shape
|
|
413
|
+
:param main_shape: a binary matrix containing one connected component
|
|
414
|
+
:param cross_33: A 3*3 matrix containing a binary cross
|
|
415
|
+
:param ray_length_coef: coefficient telling the distance of the rays of the sun
|
|
416
|
+
:return: a vector of the number of each ray, the shape with the numbered rays
|
|
417
|
+
"""
|
|
418
|
+
img, stats, center = cc(main_shape)
|
|
419
|
+
main_center = center[1, :]
|
|
420
|
+
dilated_main_shape = cv2.dilate(main_shape, cross_33)
|
|
421
|
+
dilated_main_shape -= main_shape
|
|
422
|
+
first_ring_idx = np.nonzero(dilated_main_shape)
|
|
423
|
+
second_ring_y = main_center[1] + ((first_ring_idx[0] - main_center[1]) * ray_length_coef)
|
|
424
|
+
second_ring_x = main_center[0] + ((first_ring_idx[1] - main_center[0]) * ray_length_coef)
|
|
425
|
+
# Make sure that no negative value try to make rays go beyond the image border
|
|
426
|
+
while np.logical_and(np.logical_or(np.min(second_ring_y, 0) < 0, np.min(second_ring_x, 0) < 0),
|
|
427
|
+
ray_length_coef > 1):
|
|
428
|
+
ray_length_coef -= 0.1
|
|
429
|
+
second_ring_y = main_center[1] + ((first_ring_idx[0] - main_center[1]) * ray_length_coef)
|
|
430
|
+
second_ring_x = main_center[0] + ((first_ring_idx[1] - main_center[0]) * ray_length_coef)
|
|
431
|
+
|
|
432
|
+
second_ring_idx = ((np.round(second_ring_y).astype(np.uint64), np.round(second_ring_x).astype(np.uint64)))
|
|
433
|
+
sun = np.zeros(main_shape.shape, np.uint32)
|
|
434
|
+
# sun[second_ring_idx[0], second_ring_idx[1]]=1
|
|
435
|
+
rays = np.arange(len(first_ring_idx[0])) + 2
|
|
436
|
+
for ray in rays:
|
|
437
|
+
segment = get_every_coord_between_2_points((first_ring_idx[0][ray - 2], first_ring_idx[1][ray - 2]),
|
|
438
|
+
(second_ring_idx[0][ray - 2], second_ring_idx[1][ray - 2]))
|
|
439
|
+
try:
|
|
440
|
+
sun[segment[0], segment[1]] = ray
|
|
441
|
+
except IndexError:
|
|
442
|
+
logging.error("The algorithm allowing to correct errors around initial shape partially failed. The initial shape may be too close from the borders of the current arena")
|
|
443
|
+
|
|
444
|
+
return rays, sun
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def find_median_shape(binary_3d_matrix):
|
|
448
|
+
"""
|
|
449
|
+
Sum along the first axis of a 3D matrix binary and create a binary matrix
|
|
450
|
+
of the pixels that are true half of the time.
|
|
451
|
+
:param binary_3d_matrix:
|
|
452
|
+
:type binary_3d_matrix: uint8
|
|
453
|
+
:return: a 2D binary matrix
|
|
454
|
+
:rtype: uint8
|
|
455
|
+
"""
|
|
456
|
+
binary_2d_matrix = np.apply_along_axis(np.sum, 0, binary_3d_matrix)
|
|
457
|
+
median_shape = np.zeros(binary_2d_matrix.shape, dtype=np.uint8)
|
|
458
|
+
median_shape[np.greater_equal(binary_2d_matrix, binary_3d_matrix.shape[0] // 2)] = 1
|
|
459
|
+
return median_shape
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
@njit()
|
|
463
|
+
def reduce_image_size_for_speed(image_of_2_shapes):
|
|
464
|
+
"""
|
|
465
|
+
Divide the image into 4 rectangles.
|
|
466
|
+
If the two shapes can be found in one of these, divide this rectangle into 4
|
|
467
|
+
Repeat the above algorithm until image slicing separate the image
|
|
468
|
+
|
|
469
|
+
:param image_of_2_shapes: a uint8 numpy array with 0 as background, 1 for one shape and 2 for the other
|
|
470
|
+
:return: Return the smallest rectangle containing 1 and 2 simultaneously
|
|
471
|
+
"""
|
|
472
|
+
sub_image = image_of_2_shapes.copy()
|
|
473
|
+
y_size, x_size = sub_image.shape
|
|
474
|
+
images_list = [sub_image]
|
|
475
|
+
good_images = [0]
|
|
476
|
+
sub_image = images_list[good_images[0]]
|
|
477
|
+
while (len(good_images) == 1 | len(good_images) == 2) & y_size > 3 & x_size > 3:
|
|
478
|
+
y_size, x_size = sub_image.shape
|
|
479
|
+
images_list = []
|
|
480
|
+
images_list.append(sub_image[:((y_size // 2) + 1), :((x_size // 2) + 1)])
|
|
481
|
+
images_list.append(sub_image[:((y_size // 2) + 1), (x_size // 2):])
|
|
482
|
+
images_list.append(sub_image[(y_size // 2):, :((x_size // 2) + 1)])
|
|
483
|
+
images_list.append(sub_image[(y_size // 2):, (x_size // 2):])
|
|
484
|
+
good_images = []
|
|
485
|
+
for idx, image in enumerate(images_list):
|
|
486
|
+
if np.any(image == 2):
|
|
487
|
+
if np.any(image == 1):
|
|
488
|
+
good_images.append(idx)
|
|
489
|
+
if len(good_images) == 2:
|
|
490
|
+
if good_images == [0, 1]:
|
|
491
|
+
sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
|
|
492
|
+
elif good_images == [0, 2]:
|
|
493
|
+
sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
|
|
494
|
+
elif good_images == [1, 3]:
|
|
495
|
+
sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=0)
|
|
496
|
+
elif good_images == [2, 3]:
|
|
497
|
+
sub_image = np.concatenate((images_list[good_images[0]], images_list[good_images[1]]), axis=1)
|
|
498
|
+
else:
|
|
499
|
+
pass
|
|
500
|
+
else:
|
|
501
|
+
sub_image = images_list[good_images[0]]
|
|
502
|
+
|
|
503
|
+
shape1_idx = np.nonzero(sub_image == 1)
|
|
504
|
+
shape2_idx = np.nonzero(sub_image == 2)
|
|
505
|
+
return shape1_idx, shape2_idx
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
def get_minimal_distance_between_2_shapes(image_of_2_shapes, increase_speed=True):
|
|
509
|
+
"""
|
|
510
|
+
Fast function to get the minimal distance between two shapes
|
|
511
|
+
:param image_of_2_shapes: binary image
|
|
512
|
+
:type image_of_2_shapes: uint8
|
|
513
|
+
:param increase_speed:
|
|
514
|
+
:type increase_speed: bool
|
|
515
|
+
:return:
|
|
516
|
+
"""
|
|
517
|
+
if increase_speed:
|
|
518
|
+
shape1_idx, shape2_idx = reduce_image_size_for_speed(image_of_2_shapes)
|
|
519
|
+
else:
|
|
520
|
+
shape1_idx, shape2_idx = np.nonzero(image_of_2_shapes == 1), np.nonzero(image_of_2_shapes == 2)
|
|
521
|
+
t = KDTree(np.transpose(shape1_idx))
|
|
522
|
+
dists, nns = t.query(np.transpose(shape2_idx), 1)
|
|
523
|
+
return np.min(dists)
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def find_major_incline(vector, natural_noise):
|
|
527
|
+
"""
|
|
528
|
+
Find the major incline of a curve given a certain level of noise
|
|
529
|
+
:param vector: Values drawing a curve containing one major incline
|
|
530
|
+
:param natural_noise: The extent of a curve containing no major incline in the same conditions
|
|
531
|
+
:type natural_noise: uint64
|
|
532
|
+
:return:
|
|
533
|
+
vector = self.converted_video[self.start:self.substantial_time, subst_idx[0][index], subst_idx[1][index]]
|
|
534
|
+
"""
|
|
535
|
+
left = 0
|
|
536
|
+
right = 1
|
|
537
|
+
ref_length = np.max((5, 2 * natural_noise))
|
|
538
|
+
vector = moving_average(vector, 5)
|
|
539
|
+
ref_extent = np.ptp(vector)
|
|
540
|
+
extent = ref_extent
|
|
541
|
+
# Find the left limit:
|
|
542
|
+
while len(vector) > ref_length and extent > (ref_extent - (natural_noise / 4)):
|
|
543
|
+
vector = vector[1:]
|
|
544
|
+
extent = np.ptp(vector)
|
|
545
|
+
left += 1
|
|
546
|
+
# And the right one:
|
|
547
|
+
extent = ref_extent
|
|
548
|
+
while len(vector) > ref_length and extent > (ref_extent - natural_noise / 2):
|
|
549
|
+
vector = vector[:-1]
|
|
550
|
+
extent = np.ptp(vector)
|
|
551
|
+
right += 1
|
|
552
|
+
# And the left again, with stronger stringency:
|
|
553
|
+
extent = ref_extent
|
|
554
|
+
while len(vector) > ref_length and extent > (ref_extent - natural_noise):
|
|
555
|
+
vector = vector[1:]
|
|
556
|
+
extent = np.ptp(vector)
|
|
557
|
+
left += 1
|
|
558
|
+
# When there is no incline, put back left and right to 0
|
|
559
|
+
if len(vector) <= ref_length:
|
|
560
|
+
left = 0
|
|
561
|
+
right = 1
|
|
562
|
+
return left, right
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
def rank_from_top_to_bottom_from_left_to_right(binary_image, y_boundaries, get_ordered_image=False):
|
|
566
|
+
"""binary_image=self.first_image.validated_shapes; y_boundaries=self.first_image.y_boundaries; get_ordered_image=True
|
|
567
|
+
:param binary_image: Zeros 2D array with ones where shapes have been detected and validated
|
|
568
|
+
:param y_boundaries: Zeros 1D array of the vertical size of the image with 1 and -1 where rows start and end
|
|
569
|
+
:param get_ordered_image: Boolean telling if output should contain a zeros 2D image with shapes drew with integer
|
|
570
|
+
from 1 to the number of shape according to their position
|
|
571
|
+
"""
|
|
572
|
+
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
|
|
573
|
+
connectivity=8)
|
|
574
|
+
|
|
575
|
+
centroids = centroids[1:, :]
|
|
576
|
+
final_order = np.zeros(centroids.shape[0], dtype=np.uint8)
|
|
577
|
+
sorted_against_y = np.argsort(centroids[:, 1])
|
|
578
|
+
# row_nb = (y_boundaries == 1).sum()
|
|
579
|
+
row_nb = np.max(((y_boundaries == 1).sum(), (y_boundaries == - 1).sum()))
|
|
580
|
+
component_per_row = int(np.ceil((nb_components - 1) / row_nb))
|
|
581
|
+
for row_i in range(row_nb):
|
|
582
|
+
row_i_start = row_i * component_per_row
|
|
583
|
+
if row_i == (row_nb - 1):
|
|
584
|
+
sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:], 0])
|
|
585
|
+
final_order[row_i_start:] = sorted_against_y[row_i_start:][sorted_against_x]
|
|
586
|
+
else:
|
|
587
|
+
row_i_end = (row_i + 1) * component_per_row
|
|
588
|
+
sorted_against_x = np.argsort(centroids[sorted_against_y[row_i_start:row_i_end], 0])
|
|
589
|
+
final_order[row_i_start:row_i_end] = sorted_against_y[row_i_start:row_i_end][sorted_against_x]
|
|
590
|
+
ordered_centroids = centroids[final_order, :]
|
|
591
|
+
ordered_stats = stats[1:, :]
|
|
592
|
+
ordered_stats = ordered_stats[final_order, :]
|
|
593
|
+
|
|
594
|
+
# If it fails, use another algo
|
|
595
|
+
if (final_order == 0).sum() > 1:
|
|
596
|
+
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(binary_image.astype(np.uint8),
|
|
597
|
+
connectivity=8)
|
|
598
|
+
# First order according to x: from left to right
|
|
599
|
+
# Remove the background and order centroids along x axis
|
|
600
|
+
centroids = centroids[1:, :]
|
|
601
|
+
x_order = np.argsort(centroids[:, 0])
|
|
602
|
+
centroids = centroids[x_order, :]
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
# Then use the boundaries of each Y peak to sort these shapes row by row
|
|
606
|
+
if y_boundaries is not None:
|
|
607
|
+
binary_image = deepcopy(output)
|
|
608
|
+
binary_image[np.nonzero(binary_image)] = 1
|
|
609
|
+
y_starts, y_ends = np.argwhere(y_boundaries == - 1), np.argwhere(y_boundaries == 1)
|
|
610
|
+
|
|
611
|
+
margins_ci = np.array((0.5, 0.4, 0.3, 0.2, 0.1))
|
|
612
|
+
for margin in margins_ci:
|
|
613
|
+
ranking_success: bool = True
|
|
614
|
+
y_order = np.zeros(centroids.shape[0], dtype=np.uint8)
|
|
615
|
+
count: np.uint8 = 0
|
|
616
|
+
y_margins = (y_ends - y_starts) * margin# 0.3
|
|
617
|
+
# Loop and try to fill each row with all components, fail if the final number is wrong
|
|
618
|
+
for y_interval in np.arange(len(y_starts)):
|
|
619
|
+
for patch_i in np.arange(nb_components - 1):
|
|
620
|
+
# Compare the y coordinate of the centroid with the detected y intervals with
|
|
621
|
+
# an added margin in order to order coordinates
|
|
622
|
+
if np.logical_and(centroids[patch_i, 1] >= (y_starts[y_interval] - y_margins[y_interval]),
|
|
623
|
+
centroids[patch_i, 1] <= (y_ends[y_interval] + y_margins[y_interval])):
|
|
624
|
+
try:
|
|
625
|
+
y_order[count] = patch_i
|
|
626
|
+
count = count + 1
|
|
627
|
+
except IndexError as exc:
|
|
628
|
+
ranking_success = False
|
|
629
|
+
|
|
630
|
+
if ranking_success:
|
|
631
|
+
break
|
|
632
|
+
else:
|
|
633
|
+
ranking_success = False
|
|
634
|
+
# if that all tested margins failed, do not rank_from_top_to_bottom_from_left_to_right, i.e. keep automatic ranking
|
|
635
|
+
if not ranking_success:
|
|
636
|
+
y_order = np.arange(centroids.shape[0])
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
# Second order according to y: from top to bottom
|
|
640
|
+
ordered_centroids = centroids[y_order, :]
|
|
641
|
+
ordered_stats = stats[1:, :]
|
|
642
|
+
ordered_stats = ordered_stats[x_order, :]
|
|
643
|
+
ordered_stats = ordered_stats[y_order, :]
|
|
644
|
+
|
|
645
|
+
if get_ordered_image:
|
|
646
|
+
ordered_image = np.zeros(binary_image.shape, dtype=np.uint8)
|
|
647
|
+
for patch_j in np.arange(centroids.shape[0]):
|
|
648
|
+
sub_output = output[ordered_stats[patch_j, 1]: (ordered_stats[patch_j, 1] + ordered_stats[patch_j, 3]), ordered_stats[patch_j, 0]: (ordered_stats[patch_j, 0] + ordered_stats[patch_j, 2])]
|
|
649
|
+
sub_output = np.sort(np.unique(sub_output))
|
|
650
|
+
if len(sub_output) == 1:
|
|
651
|
+
ordered_image[output == sub_output[0]] = patch_j + 1
|
|
652
|
+
else:
|
|
653
|
+
ordered_image[output == sub_output[1]] = patch_j + 1
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
return ordered_stats, ordered_centroids, ordered_image
|
|
657
|
+
else:
|
|
658
|
+
return ordered_stats, ordered_centroids
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
def expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand, without_shape_i, shape_original_centroid,
|
|
662
|
+
ref_centroids, kernel):
|
|
663
|
+
"""
|
|
664
|
+
Expand one shape until its border becomes nearer to the center of neighboring cells than to its own center
|
|
665
|
+
:param shape_to_expand: Binary image containing the focal shape only
|
|
666
|
+
:param shape_original_centroid: The centroid coordinates of the focal shape at the true beginning
|
|
667
|
+
:param without_shape_i: Binary image all shapes except the focal one
|
|
668
|
+
:param kernel: Binary matrix containing a circle of 1, copying roughly a slime mold growth
|
|
669
|
+
:return: Binary image containing the focal shape only, but expanded until it reach a border
|
|
670
|
+
or got too close from a neighbor
|
|
671
|
+
"""
|
|
672
|
+
|
|
673
|
+
without_shape = deepcopy(without_shape_i)
|
|
674
|
+
# Calculate the distance between the focal shape centroid and its 10% nearest neighbor centroids
|
|
675
|
+
centroid_distances = np.sqrt(np.square(ref_centroids[1:, 0] - shape_original_centroid[0]) + np.square(
|
|
676
|
+
ref_centroids[1:, 1] - shape_original_centroid[1]))
|
|
677
|
+
nearest_shapes = np.where(np.greater(np.quantile(centroid_distances, 0.1), centroid_distances))[0]
|
|
678
|
+
|
|
679
|
+
# Use the nearest neighbor distance as a maximal reference to get the minimal distance between the border of the shape and the neighboring centroids
|
|
680
|
+
neighbor_mindist = np.min(centroid_distances)
|
|
681
|
+
idx = np.nonzero(shape_to_expand)
|
|
682
|
+
for shape_j in nearest_shapes:
|
|
683
|
+
neighbor_mindist = np.minimum(neighbor_mindist, np.min(
|
|
684
|
+
np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
|
|
685
|
+
neighbor_mindist *= 0.5
|
|
686
|
+
# Get the maximal distance of the focal shape between its contour and its centroids
|
|
687
|
+
itself_maxdist = np.max(
|
|
688
|
+
np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
|
|
689
|
+
|
|
690
|
+
# Put 1 at the border of the reference image in order to be able to stop the while loop once border reached
|
|
691
|
+
without_shape[0, :] = 1
|
|
692
|
+
without_shape[:, 0] = 1
|
|
693
|
+
without_shape[without_shape.shape[0] - 1, :] = 1
|
|
694
|
+
without_shape[:, without_shape.shape[1] - 1] = 1
|
|
695
|
+
|
|
696
|
+
# Compare the distance between the contour of the shape and its centroid with this contout with the centroids of neighbors
|
|
697
|
+
# Continue as the distance made by the shape (from its centroid) keeps being smaller than its distance with the nearest centroid.
|
|
698
|
+
previous_shape_to_expand = deepcopy(shape_to_expand)
|
|
699
|
+
while np.logical_and(np.any(np.less_equal(itself_maxdist, neighbor_mindist)),
|
|
700
|
+
np.count_nonzero(shape_to_expand * without_shape) == 0):
|
|
701
|
+
previous_shape_to_expand = deepcopy(shape_to_expand)
|
|
702
|
+
# Dilate the shape by the kernel size
|
|
703
|
+
shape_to_expand = cv2.dilate(shape_to_expand, kernel, iterations=1,
|
|
704
|
+
borderType=cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED)
|
|
705
|
+
# Extract the new connected component
|
|
706
|
+
shape_nb, shape_to_expand = cv2.connectedComponents(shape_to_expand, ltype=cv2.CV_16U)
|
|
707
|
+
shape_to_expand = shape_to_expand.astype(np.uint8)
|
|
708
|
+
# Use the nex shape coordinates to calculate the new distances of the shape with its centroid and with neighboring centroids
|
|
709
|
+
idx = np.nonzero(shape_to_expand)
|
|
710
|
+
for shape_j in nearest_shapes:
|
|
711
|
+
neighbor_mindist = np.minimum(neighbor_mindist, np.min(
|
|
712
|
+
np.sqrt(np.square(ref_centroids[shape_j, 0] - idx[1]) + np.square(ref_centroids[shape_j, 1] - idx[0]))))
|
|
713
|
+
itself_maxdist = np.max(
|
|
714
|
+
np.sqrt(np.square(shape_original_centroid[0] - idx[1]) + np.square(shape_original_centroid[1] - idx[0])))
|
|
715
|
+
return previous_shape_to_expand
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
def image_borders(dimensions):
|
|
719
|
+
"""
|
|
720
|
+
Create a matrix of dimensions "dimensions" containing ones everywhere except at borders (0)
|
|
721
|
+
:param dimensions:
|
|
722
|
+
:return:
|
|
723
|
+
:rtype: uint8
|
|
724
|
+
"""
|
|
725
|
+
borders = np.ones(dimensions, dtype=np.uint8)
|
|
726
|
+
borders[0, :] = 0
|
|
727
|
+
borders[:, 0] = 0
|
|
728
|
+
borders[- 1, :] = 0
|
|
729
|
+
borders[:, - 1] = 0
|
|
730
|
+
return borders
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
def get_radius_distance_against_time(binary_video, field):
|
|
734
|
+
"""
|
|
735
|
+
Find the relationship between distance in a gravity field and growth speed of a binary shape in a video
|
|
736
|
+
:param binary_video: a binary video having a growing/moving shape
|
|
737
|
+
:type binary_video: uint8
|
|
738
|
+
:param field: a gravity field around an initial shape
|
|
739
|
+
:return:
|
|
740
|
+
"""
|
|
741
|
+
pixel_start = np.max(field[field > 0])
|
|
742
|
+
pixel_end = np.min(field[field > 0])
|
|
743
|
+
time_span = np.arange(binary_video.shape[0])
|
|
744
|
+
time_start = 0
|
|
745
|
+
time_end = time_span[-1]
|
|
746
|
+
start_not_found: bool = True
|
|
747
|
+
for t in time_span:
|
|
748
|
+
if start_not_found:
|
|
749
|
+
if np.any((field == pixel_start) * binary_video[t, :, :]):
|
|
750
|
+
start_not_found = False
|
|
751
|
+
time_start = t
|
|
752
|
+
if np.any((field == pixel_end) * binary_video[t, :, :]):
|
|
753
|
+
time_end = t
|
|
754
|
+
break
|
|
755
|
+
distance_against_time = np.linspace(pixel_start, pixel_end, (time_end - time_start + 1))
|
|
756
|
+
distance_against_time = np.round(distance_against_time).astype(np.float32)
|
|
757
|
+
return distance_against_time, time_start, time_end
|
|
758
|
+
|
|
759
|
+
|
|
760
|
+
def expand_to_fill_holes(binary_video, holes):
|
|
761
|
+
#first move should be the time at wich the first pixel hole could have been covered
|
|
762
|
+
#it should ask how much time the shape made to cross a distance long enough to overlap all holes
|
|
763
|
+
holes_contours = cv2.dilate(holes, cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
|
|
764
|
+
field = make_gravity_field(binary_video[0, :, :], (binary_video.shape[0] - 1))
|
|
765
|
+
holes_contours = holes_contours * field * binary_video[- 1, :, :]
|
|
766
|
+
holes[np.nonzero(holes)] = field[np.nonzero(holes)]
|
|
767
|
+
if np.any(holes_contours):
|
|
768
|
+
# Find the relationship between distance and time
|
|
769
|
+
distance_against_time, holes_time_start, holes_time_end = get_radius_distance_against_time(binary_video, holes_contours)
|
|
770
|
+
# Use that vector to progressively fill holes at the same speed as shape grows
|
|
771
|
+
for t in np.arange(len(distance_against_time)):
|
|
772
|
+
new_order, stats, centers = cc((holes >= distance_against_time[t]).astype(np.uint8))
|
|
773
|
+
for comp_i in np.arange(1, stats.shape[0]):
|
|
774
|
+
past_image = deepcopy(binary_video[holes_time_start + t, :, :])
|
|
775
|
+
with_new_comp = new_order == comp_i
|
|
776
|
+
past_image[with_new_comp] = 1
|
|
777
|
+
nb_comp, image_garbage = cv2.connectedComponents(past_image)
|
|
778
|
+
if nb_comp == 2:
|
|
779
|
+
binary_video[holes_time_start + t, :, :][with_new_comp] = 1
|
|
780
|
+
# Make sure that holes remain filled from holes_time_end to the end of the video
|
|
781
|
+
for t in np.arange((holes_time_end + 1), binary_video.shape[0]):
|
|
782
|
+
past_image = binary_video[t, :, :]
|
|
783
|
+
past_image[holes >= distance_against_time[-1]] = 1
|
|
784
|
+
binary_video[t, :, :] = past_image
|
|
785
|
+
else:
|
|
786
|
+
holes_time_end = None
|
|
787
|
+
distance_against_time = [1, 2]
|
|
788
|
+
|
|
789
|
+
return binary_video, holes_time_end, distance_against_time
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
def change_thresh_until_one(grayscale_image, binary_image, lighter_background):
|
|
793
|
+
coord = np.nonzero(binary_image)
|
|
794
|
+
min_cx = np.min(coord[0])
|
|
795
|
+
max_cx = np.max(coord[0])
|
|
796
|
+
min_cy = np.min(coord[1])
|
|
797
|
+
max_cy = np.max(coord[1])
|
|
798
|
+
gray_img = grayscale_image[min_cy:max_cy, min_cx:max_cx]
|
|
799
|
+
# threshold = get_otsu_threshold(gray_img)
|
|
800
|
+
threshold = threshold_otsu(gray_img)
|
|
801
|
+
bin_img = (gray_img < threshold).astype(np.uint8)
|
|
802
|
+
detected_shape_number, bin_img = cv2.connectedComponents(bin_img, ltype=cv2.CV_16U)
|
|
803
|
+
while (detected_shape_number > 2) and (0 < threshold < 255):
|
|
804
|
+
if lighter_background:
|
|
805
|
+
threshold += 1
|
|
806
|
+
bin_img = (gray_img < threshold).astype(np.uint8)
|
|
807
|
+
else:
|
|
808
|
+
threshold -= 1
|
|
809
|
+
bin_img = (gray_img < threshold).astype(np.uint8)
|
|
810
|
+
detected_shape_number, bin_img = cv2.connectedComponents(bin_img, ltype=cv2.CV_16U)
|
|
811
|
+
binary_image = np.zeros_like(binary_image, np.uint8)
|
|
812
|
+
binary_image[min_cy:max_cy, min_cx:max_cx] = bin_img
|
|
813
|
+
return binary_image
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
class Ellipse:
|
|
817
|
+
def __init__(self, sizes):
|
|
818
|
+
"""
|
|
819
|
+
Usage: Ellipse
|
|
820
|
+
"""
|
|
821
|
+
self.vsize = sizes[0]
|
|
822
|
+
self.hsize = sizes[1]
|
|
823
|
+
self.vr = self.hsize // 2
|
|
824
|
+
self.hr = self.vsize // 2
|
|
825
|
+
|
|
826
|
+
def ellipse_fun(self, x, y):
|
|
827
|
+
"""
|
|
828
|
+
Create an ellipse of size x and y in a 2D array of size vsize and hsize
|
|
829
|
+
:param x: ellipse size on x axis
|
|
830
|
+
:param y: ellipse size on y axis
|
|
831
|
+
:return: a binary image containing the ellipse
|
|
832
|
+
"""
|
|
833
|
+
return (((x - self.hr) ** 2) / (self.hr ** 2)) + (((y - self.vr) ** 2) / (self.vr ** 2)) <= 1
|
|
834
|
+
|
|
835
|
+
def create(self):
|
|
836
|
+
# if self.hsize % 2 == 0:
|
|
837
|
+
# self.hsize += 1
|
|
838
|
+
# if self.vsize % 2 == 0:
|
|
839
|
+
# self.vsize += 1
|
|
840
|
+
return np.fromfunction(self.ellipse_fun, (self.vsize, self.hsize))
|
|
841
|
+
|
|
842
|
+
|
|
843
|
+
def get_rolling_window_coordinates_list(height, width, side_length, window_step, allowed_pixels=None):
|
|
844
|
+
y_remain = height % side_length
|
|
845
|
+
x_remain = width % side_length
|
|
846
|
+
y_nb = height // side_length
|
|
847
|
+
x_nb = width // side_length
|
|
848
|
+
y_coord = np.arange(y_nb + 1, dtype =np.uint64) * side_length
|
|
849
|
+
x_coord = np.arange(x_nb + 1, dtype =np.uint64) * side_length
|
|
850
|
+
y_coord[-1] += y_remain
|
|
851
|
+
x_coord[-1] += x_remain
|
|
852
|
+
window_coords = []
|
|
853
|
+
for y_i in range(len(y_coord) - 1):
|
|
854
|
+
for x_i in range(len(x_coord) - 1):
|
|
855
|
+
for add_to_y in np.arange(0, side_length, window_step, dtype =np.uint64):
|
|
856
|
+
y_max = np.min((height, y_coord[y_i + 1] + add_to_y)).astype(np.uint64)
|
|
857
|
+
for add_to_x in np.arange(0, side_length, window_step, dtype =np.uint64):
|
|
858
|
+
x_max = np.min((width, x_coord[x_i + 1] + add_to_x)).astype(np.uint64)
|
|
859
|
+
if allowed_pixels is None or np.any(allowed_pixels[(y_coord[y_i] + add_to_y):y_max, (x_coord[x_i] + add_to_x):x_max]):
|
|
860
|
+
window_coords.append([y_coord[y_i] + add_to_y, y_max, x_coord[x_i] + add_to_x, x_max])
|
|
861
|
+
return window_coords
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
def get_contours(binary_image):
|
|
865
|
+
eroded_binary = cv2.erode(binary_image, cross_33)
|
|
866
|
+
contours = binary_image - eroded_binary
|
|
867
|
+
return contours
|