cellects 0.1.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__init__.py +0 -0
- cellects/__main__.py +49 -0
- cellects/config/__init__.py +0 -0
- cellects/config/all_vars_dict.py +154 -0
- cellects/core/__init__.py +0 -0
- cellects/core/cellects_paths.py +30 -0
- cellects/core/cellects_threads.py +1464 -0
- cellects/core/motion_analysis.py +1931 -0
- cellects/core/one_image_analysis.py +1065 -0
- cellects/core/one_video_per_blob.py +679 -0
- cellects/core/program_organizer.py +1347 -0
- cellects/core/script_based_run.py +154 -0
- cellects/gui/__init__.py +0 -0
- cellects/gui/advanced_parameters.py +1258 -0
- cellects/gui/cellects.py +189 -0
- cellects/gui/custom_widgets.py +789 -0
- cellects/gui/first_window.py +449 -0
- cellects/gui/if_several_folders_window.py +239 -0
- cellects/gui/image_analysis_window.py +1909 -0
- cellects/gui/required_output.py +232 -0
- cellects/gui/video_analysis_window.py +656 -0
- cellects/icons/__init__.py +0 -0
- cellects/icons/cellects_icon.icns +0 -0
- cellects/icons/cellects_icon.ico +0 -0
- cellects/image_analysis/__init__.py +0 -0
- cellects/image_analysis/cell_leaving_detection.py +54 -0
- cellects/image_analysis/cluster_flux_study.py +102 -0
- cellects/image_analysis/extract_exif.py +61 -0
- cellects/image_analysis/fractal_analysis.py +184 -0
- cellects/image_analysis/fractal_functions.py +108 -0
- cellects/image_analysis/image_segmentation.py +272 -0
- cellects/image_analysis/morphological_operations.py +867 -0
- cellects/image_analysis/network_functions.py +1244 -0
- cellects/image_analysis/one_image_analysis_threads.py +289 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
- cellects/image_analysis/shape_descriptors.py +981 -0
- cellects/utils/__init__.py +0 -0
- cellects/utils/formulas.py +881 -0
- cellects/utils/load_display_save.py +1016 -0
- cellects/utils/utilitarian.py +516 -0
- cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
- cellects-0.1.0.dev1.dist-info/METADATA +131 -0
- cellects-0.1.0.dev1.dist-info/RECORD +46 -0
- cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
- cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
- cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
This script contains 2 classes used by the OneImageAnalysis class
|
|
4
|
+
They are threads to process the first image and save the selected combinations simultaneously
|
|
5
|
+
"""
|
|
6
|
+
import threading
|
|
7
|
+
import logging
|
|
8
|
+
from copy import deepcopy
|
|
9
|
+
import numpy as np
|
|
10
|
+
import cv2
|
|
11
|
+
from cellects.image_analysis.image_segmentation import otsu_thresholding, combine_color_spaces
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ProcessFirstImage:
|
|
15
|
+
def __init__(self, l):
|
|
16
|
+
self.start_processing(l)
|
|
17
|
+
|
|
18
|
+
def start_processing(self, l):
|
|
19
|
+
"""
|
|
20
|
+
Wil process the first image according to rules and parameters in l
|
|
21
|
+
:param l: list containing the necessary data to process the first image
|
|
22
|
+
"""
|
|
23
|
+
self.parent = l[0]
|
|
24
|
+
get_one_channel_result = l[1]
|
|
25
|
+
combine_channels = l[2]
|
|
26
|
+
self.all_c_spaces = self.parent.all_c_spaces
|
|
27
|
+
self.several_blob_per_arena = l[4]
|
|
28
|
+
self.sample_number = l[5]
|
|
29
|
+
self.spot_size = l[6]
|
|
30
|
+
kmeans_clust_nb = l[7]
|
|
31
|
+
self.biomask = l[8]
|
|
32
|
+
self.backmask = l[9]
|
|
33
|
+
if get_one_channel_result:
|
|
34
|
+
self.csc_dict = l[3]
|
|
35
|
+
self.image = combine_color_spaces(self.csc_dict, self.all_c_spaces)
|
|
36
|
+
if kmeans_clust_nb is None:
|
|
37
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
38
|
+
else:
|
|
39
|
+
self.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
40
|
+
# self.parent.image = self.image
|
|
41
|
+
# self.parent.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
42
|
+
# self.binary_image = self.parent.binary_image
|
|
43
|
+
self.unaltered_concomp_nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
44
|
+
if 1 < self.unaltered_concomp_nb < 10000:
|
|
45
|
+
self.total_area = np.sum(self.binary_image)
|
|
46
|
+
if 100 < self.total_area < self.binary_image.size * 0.75:
|
|
47
|
+
self.process_binary_image()
|
|
48
|
+
self.parent.save_combination_features(self)
|
|
49
|
+
# except RuntimeWarning:
|
|
50
|
+
# logging.info("Make sure that scaling and spot size are correct")
|
|
51
|
+
if combine_channels:
|
|
52
|
+
i = l[3]
|
|
53
|
+
possibilities = l[10]
|
|
54
|
+
saved_color_space_list = self.parent.saved_color_space_list
|
|
55
|
+
combination_features = self.parent.combination_features
|
|
56
|
+
self.csc_dict = saved_color_space_list[i]
|
|
57
|
+
previous_shape_number = combination_features[i, 4]
|
|
58
|
+
previous_sum = combination_features[i, 5]
|
|
59
|
+
for j in possibilities[::-1]:
|
|
60
|
+
csc_dict2 = saved_color_space_list[j]
|
|
61
|
+
csc_dict = deepcopy(self.csc_dict)
|
|
62
|
+
keys = list(csc_dict.keys())
|
|
63
|
+
|
|
64
|
+
k2 = list(csc_dict2.keys())[0]
|
|
65
|
+
v2 = csc_dict2[k2]
|
|
66
|
+
if np.isin(k2, keys) and np.sum(v2 * csc_dict[k2]) != 0:
|
|
67
|
+
break
|
|
68
|
+
for factor in [2, 1]:
|
|
69
|
+
if np.isin(k2, keys):
|
|
70
|
+
csc_dict[k2] += v2 * factor
|
|
71
|
+
else:
|
|
72
|
+
csc_dict[k2] = v2 * factor
|
|
73
|
+
self.image = combine_color_spaces(csc_dict, self.all_c_spaces)
|
|
74
|
+
if kmeans_clust_nb is None:
|
|
75
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
76
|
+
else:
|
|
77
|
+
self.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
78
|
+
self.process_binary_image()
|
|
79
|
+
self.total_area = self.validated_shapes.sum()
|
|
80
|
+
if previous_shape_number >= self.shape_number and self.total_area > previous_sum * 0.9:
|
|
81
|
+
previous_shape_number = self.shape_number
|
|
82
|
+
previous_sum = self.total_area
|
|
83
|
+
self.csc_dict = deepcopy(csc_dict)
|
|
84
|
+
self.unaltered_concomp_nb = combination_features[i, 3]
|
|
85
|
+
self.parent.save_combination_features(self)
|
|
86
|
+
logging.info(str(saved_color_space_list[i]) + "-->" + str(self.csc_dict ))
|
|
87
|
+
|
|
88
|
+
def shape_selection(self, horizontal_size, shape, confint, do_not_delete=None):
|
|
89
|
+
"""
|
|
90
|
+
This method use the statistics of the connected components of a binary image to make shape selection
|
|
91
|
+
:param horizontal_size: the average horizontal size of one shape in pixels
|
|
92
|
+
:param shape: the geometry of the shape: circle or rectangle
|
|
93
|
+
:param confint: confidence interval for horizontal size and shape detection
|
|
94
|
+
:param do_not_delete: binary image with 1 in area drawn by the user as "Cell"
|
|
95
|
+
:return: A binary matrix of the resulting validated shapes and the number of shapes detected
|
|
96
|
+
"""
|
|
97
|
+
# counter+=1;horizontal_size = self.spot_size; shape = self.parent.spot_shapes[counter];confint = self.parent.spot_size_confints[::-1][counter]
|
|
98
|
+
# stats columns contain in that order:
|
|
99
|
+
# - x leftmost coordinate of boundingbox
|
|
100
|
+
# - y topmost coordinate of boundingbox
|
|
101
|
+
# - The horizontal size of the bounding box.
|
|
102
|
+
# - The vertical size of the bounding box.
|
|
103
|
+
# - The total area (in pixels) of the connected component.
|
|
104
|
+
|
|
105
|
+
# First, remove each stain which horizontal size varies too much from reference
|
|
106
|
+
size_interval = [horizontal_size * (1 - confint), horizontal_size * (1 + confint)]
|
|
107
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 2] < size_interval[0], self.stats[:, 2] > size_interval[1]))
|
|
108
|
+
|
|
109
|
+
if do_not_delete is None:
|
|
110
|
+
self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
|
|
111
|
+
else:
|
|
112
|
+
self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove), np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
|
|
113
|
+
|
|
114
|
+
# Second, determine the shape of each stain to only keep the ones corresponding to the reference shape
|
|
115
|
+
shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
|
|
116
|
+
shapes[self.shapes2 > 0] = 1
|
|
117
|
+
nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes,
|
|
118
|
+
connectivity=8)
|
|
119
|
+
if nb_components > 1:
|
|
120
|
+
if shape == 'circle':
|
|
121
|
+
surf_interval = [np.pi * np.square(horizontal_size // 2) * (1 - confint), np.pi * np.square(horizontal_size // 2) * (1 + confint)]
|
|
122
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
123
|
+
elif shape == 'rectangle':
|
|
124
|
+
# If the smaller side is the horizontal one, use the user provided horizontal side
|
|
125
|
+
if np.argmin((np.mean(self.stats[1:, 2]), np.mean(self.stats[1:, 3]))) == 0:
|
|
126
|
+
surf_interval = [np.square(horizontal_size) * (1 - confint), np.square(horizontal_size) * (1 + confint)]
|
|
127
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
128
|
+
# If the smaller side is the vertical one, use the median vertical length shape
|
|
129
|
+
else:
|
|
130
|
+
surf_interval = [np.square(np.median(self.stats[1:, 3])) * (1 - confint), np.square(np.median(self.stats[1:, 3])) * (1 + confint)]
|
|
131
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
132
|
+
else:
|
|
133
|
+
logging.info("Original blob shape not well written")
|
|
134
|
+
|
|
135
|
+
if do_not_delete is None:
|
|
136
|
+
self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
|
|
137
|
+
else:
|
|
138
|
+
self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove),
|
|
139
|
+
np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
|
|
140
|
+
# There was only that before:
|
|
141
|
+
shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
|
|
142
|
+
shapes[np.nonzero(self.shapes2)] = 1
|
|
143
|
+
|
|
144
|
+
nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes, connectivity=8)
|
|
145
|
+
self.validated_shapes = shapes
|
|
146
|
+
self.shape_number = nb_components - 1
|
|
147
|
+
|
|
148
|
+
def kmeans(self, cluster_number, biomask=None, backmask=None, bio_label=None):
|
|
149
|
+
"""
|
|
150
|
+
Use of Kmeans to detect the Cell(s) after having segmented the grayscale image into two or more categories
|
|
151
|
+
:param cluster_number: the number of categories to find
|
|
152
|
+
:param biomask: the mask of pixels marked as Cell(s) by the user
|
|
153
|
+
:param backmask: the mask of pixels marked as Background by the user
|
|
154
|
+
:param bio_label:
|
|
155
|
+
:return:
|
|
156
|
+
"""
|
|
157
|
+
image = self.image.reshape((-1, 1))
|
|
158
|
+
image = np.float32(image)
|
|
159
|
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
|
|
160
|
+
compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
|
|
161
|
+
kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
|
|
162
|
+
sum_per_label = np.zeros(cluster_number)
|
|
163
|
+
self.binary_image = np.zeros(self.image.shape[:2], np.uint8)
|
|
164
|
+
if bio_label is not None:
|
|
165
|
+
self.binary_image[np.nonzero(kmeans_image == bio_label)] = 1
|
|
166
|
+
self.bio_label = bio_label
|
|
167
|
+
else:
|
|
168
|
+
if biomask is not None:
|
|
169
|
+
all_labels = kmeans_image[biomask[0], biomask[1]]
|
|
170
|
+
for i in range(cluster_number):
|
|
171
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
172
|
+
self.bio_label = np.nonzero(sum_per_label == np.max(sum_per_label))
|
|
173
|
+
elif backmask is not None:
|
|
174
|
+
all_labels = kmeans_image[backmask[0], backmask[1]]
|
|
175
|
+
for i in range(cluster_number):
|
|
176
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
177
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
178
|
+
else:
|
|
179
|
+
for i in range(cluster_number):
|
|
180
|
+
sum_per_label[i] = (kmeans_image == i).sum()
|
|
181
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
182
|
+
self.binary_image[np.nonzero(kmeans_image == self.bio_label)] = 1
|
|
183
|
+
|
|
184
|
+
def process_binary_image(self, use_bio_and_back_masks=False):
|
|
185
|
+
"""
|
|
186
|
+
Process the binary image to get the final validated shapes
|
|
187
|
+
Starts by computin connected components, then remove the background pixels marked by the user,
|
|
188
|
+
then, if there are not several blob per arena, select spot according to their sizes
|
|
189
|
+
:param use_bio_and_back_masks: if true, will use the cell(s) and background matked by the user
|
|
190
|
+
:return:
|
|
191
|
+
"""
|
|
192
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
193
|
+
self.binary_image, connectivity=8)
|
|
194
|
+
do_not_delete = None
|
|
195
|
+
if use_bio_and_back_masks:
|
|
196
|
+
if self.backmask is not None:
|
|
197
|
+
if np.any(self.shapes[self.backmask]):
|
|
198
|
+
self.shapes[np.isin(self.shapes, np.unique(self.shapes[self.backmask]))] = 0
|
|
199
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
200
|
+
(self.shapes > 0).astype(np.uint8), connectivity=8)
|
|
201
|
+
self.shape_number -= 1
|
|
202
|
+
if self.biomask is not None:
|
|
203
|
+
if np.any(self.shapes[self.biomask]):
|
|
204
|
+
do_not_delete = np.unique(self.shapes[self.biomask])
|
|
205
|
+
do_not_delete = do_not_delete[do_not_delete != 0]
|
|
206
|
+
if not self.several_blob_per_arena and self.spot_size is not None:
|
|
207
|
+
counter = 0
|
|
208
|
+
self.shapes2 = deepcopy(self.shapes)
|
|
209
|
+
while self.shape_number != self.sample_number and counter < len(self.parent.spot_size_confints):
|
|
210
|
+
self.shape_selection(horizontal_size=self.spot_size, shape=self.parent.spot_shapes[counter],
|
|
211
|
+
confint=self.parent.spot_size_confints[counter], do_not_delete=do_not_delete)
|
|
212
|
+
logging.info(f"Shape selection algorithm found {self.shape_number} disconnected shapes")
|
|
213
|
+
counter += 1
|
|
214
|
+
if self.shape_number == self.sample_number:
|
|
215
|
+
self.shapes = self.shapes2
|
|
216
|
+
if self.shape_number == self.sample_number:
|
|
217
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
218
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
219
|
+
else:
|
|
220
|
+
max_size = self.binary_image.size * 0.75
|
|
221
|
+
min_size = 10
|
|
222
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[1:, 4] < min_size, self.stats[1:, 4] > max_size)) + 1
|
|
223
|
+
self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
|
|
224
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
225
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
226
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
227
|
+
self.validated_shapes,
|
|
228
|
+
connectivity=8)
|
|
229
|
+
if not self.several_blob_per_arena and self.sample_number is not None and self.shape_number > self.sample_number:
|
|
230
|
+
# Sort shapes by size and compare the largest with the second largest
|
|
231
|
+
# If the difference is too large, remove that largest shape.
|
|
232
|
+
cc_to_remove = np.array([], dtype=np.uint8)
|
|
233
|
+
to_remove = np.array([], dtype=np.uint8)
|
|
234
|
+
self.stats = self.stats[1:, :]
|
|
235
|
+
while self.stats.shape[0] > self.sample_number and to_remove is not None:
|
|
236
|
+
# 1) rank by height
|
|
237
|
+
sorted_height = np.argsort(self.stats[:, 2])
|
|
238
|
+
# and only consider the number of shapes we want to detect
|
|
239
|
+
standard_error = np.std(self.stats[sorted_height, 2][-self.sample_number:])
|
|
240
|
+
differences = np.diff(self.stats[sorted_height, 2])
|
|
241
|
+
# Look for very big changes from one height to the next
|
|
242
|
+
if differences.any() and np.max(differences) > 2 * standard_error:
|
|
243
|
+
# Within these, remove shapes that are too large
|
|
244
|
+
to_remove = sorted_height[np.argmax(differences)]
|
|
245
|
+
cc_to_remove = np.append(cc_to_remove, to_remove + 1)
|
|
246
|
+
self.stats = np.delete(self.stats, to_remove, 0)
|
|
247
|
+
|
|
248
|
+
else:
|
|
249
|
+
to_remove = None
|
|
250
|
+
self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
|
|
251
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
252
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
253
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
254
|
+
self.validated_shapes,
|
|
255
|
+
connectivity=8)
|
|
256
|
+
|
|
257
|
+
self.shape_number -= 1
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class SaveCombinationThread(threading.Thread):
|
|
261
|
+
def __init__(self, parent=None):
|
|
262
|
+
# super(SaveCombinationThread, self).__init__()
|
|
263
|
+
threading.Thread.__init__(self)
|
|
264
|
+
self.parent = parent
|
|
265
|
+
|
|
266
|
+
def run(self):
|
|
267
|
+
"""
|
|
268
|
+
Save the current process_i data into the combination_features list
|
|
269
|
+
:return:
|
|
270
|
+
"""
|
|
271
|
+
logging.info(f"Saving results from the color space combination: {self.process_i.csc_dict}. {self.process_i.shape_number} distinct spots detected.")
|
|
272
|
+
self.parent.saved_images_list.append(self.process_i.validated_shapes)
|
|
273
|
+
self.parent.converted_images_list.append(np.round(self.process_i.image).astype(np.uint8))
|
|
274
|
+
self.parent.saved_color_space_list.append(self.process_i.csc_dict)
|
|
275
|
+
self.parent.combination_features[self.parent.saved_csc_nb, :3] = list(self.process_i.csc_dict.values())[0]
|
|
276
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 3] = self.process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
|
|
277
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 4] = self.process_i.shape_number # cc_nb
|
|
278
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 5] = self.process_i.total_area # area
|
|
279
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 6] = np.std(self.process_i.stats[1:, 2]) # width_std
|
|
280
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 7] = np.std(self.process_i.stats[1:, 3]) # height_std
|
|
281
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 8] = np.std(self.process_i.stats[1:, 4]) # area_std
|
|
282
|
+
if self.process_i.biomask is not None:
|
|
283
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 9] = np.sum(
|
|
284
|
+
self.process_i.validated_shapes[self.process_i.biomask[0], self.process_i.biomask[1]])
|
|
285
|
+
if self.process_i.backmask is not None:
|
|
286
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 10] = np.sum(
|
|
287
|
+
(1 - self.process_i.validated_shapes)[self.process_i.backmask[0], self.process_i.backmask[1]])
|
|
288
|
+
self.parent.saved_csc_nb += 1
|
|
289
|
+
logging.info("end")
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Contains the class: ProgressivelyAddDistantShapes"""
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
import numpy as np
|
|
5
|
+
import cv2
|
|
6
|
+
from cellects.image_analysis.morphological_operations import cross_33, make_gravity_field, CompareNeighborsWithValue, get_radius_distance_against_time, cc, Ellipse
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ProgressivelyAddDistantShapes:
|
|
11
|
+
def __init__(self, new_potentials, previous_shape, max_distance):
|
|
12
|
+
"""
|
|
13
|
+
This class check new potential shapes sizes and distance to a main
|
|
14
|
+
(first called previous) shape.
|
|
15
|
+
If these sizes and distance match requirements,
|
|
16
|
+
create a bridge between these and the main shape
|
|
17
|
+
Then, the modify_past_analysis method progressively grows that bridge
|
|
18
|
+
in a binary video. Bridge growth speed depends on neighboring growth speed
|
|
19
|
+
|
|
20
|
+
:param new_potentials: A binary image of all shapes detected at t
|
|
21
|
+
:param previous_shape: A binary image of the main shape (1) at t - 1
|
|
22
|
+
:param min_shape_size: The minimal size for a shape from new_potentials to get bridged
|
|
23
|
+
:param max_shape_size: The maximal size for a shape from new_potentials to get bridged
|
|
24
|
+
:param max_distance: The maximal distance for a shape from new_potentials to get bridged
|
|
25
|
+
:param cross_33: A binary crux
|
|
26
|
+
"""
|
|
27
|
+
self.new_order = np.logical_or(new_potentials, previous_shape).astype(np.uint8)
|
|
28
|
+
self.new_order, self.stats, centers = cc(self.new_order)
|
|
29
|
+
self.main_shape = np.zeros(self.new_order.shape, np.uint8)
|
|
30
|
+
self.max_distance = max_distance
|
|
31
|
+
self.check_main_shape_label(previous_shape)
|
|
32
|
+
|
|
33
|
+
def check_main_shape_label(self, previous_shape):
|
|
34
|
+
|
|
35
|
+
if np.any(self.new_order > 1):
|
|
36
|
+
# If there is at least one pixel of the previous shape that is not among pixels labelled 1,
|
|
37
|
+
# clarify who's main shape
|
|
38
|
+
main_shape_label = np.unique(previous_shape * self.new_order)
|
|
39
|
+
main_shape_label = main_shape_label[main_shape_label != 0]
|
|
40
|
+
|
|
41
|
+
# If the main shape is not labelled 1 in main_shape:
|
|
42
|
+
if not np.isin(1, main_shape_label):
|
|
43
|
+
# If it is not 1, find which label correspond to the previous shape
|
|
44
|
+
if len(main_shape_label) > 1:
|
|
45
|
+
pixel_sum_per_label = np.zeros(len(main_shape_label), dtype =np.uint64)
|
|
46
|
+
# Find out the label corresponding to the largest shape
|
|
47
|
+
for li, label in enumerate(main_shape_label):
|
|
48
|
+
pixel_sum_per_label[li] = self.new_order[self.new_order == label].sum()
|
|
49
|
+
main_shape_label = main_shape_label[np.argmax(pixel_sum_per_label)]
|
|
50
|
+
# Attribute the correct main shape
|
|
51
|
+
self.main_shape[self.new_order == main_shape_label] = 1
|
|
52
|
+
# Exchange the 1 and the main shape label in new_order image
|
|
53
|
+
not_one_idx = np.nonzero(self.new_order == main_shape_label)
|
|
54
|
+
one_idx = np.nonzero(self.new_order == 1)
|
|
55
|
+
self.new_order[not_one_idx[0], not_one_idx[1]] = 1
|
|
56
|
+
self.new_order[one_idx[0], one_idx[1]] = main_shape_label
|
|
57
|
+
# Do the same for stats
|
|
58
|
+
not_one_stats = deepcopy(self.stats[main_shape_label, :])
|
|
59
|
+
self.stats[main_shape_label, :] = self.stats[1, :]
|
|
60
|
+
self.stats[1, :] = not_one_stats
|
|
61
|
+
else:
|
|
62
|
+
#if np.any(previous_shape * (self.new_order == 1)):
|
|
63
|
+
# Create an image of the principal shape
|
|
64
|
+
self.main_shape[self.new_order == 1] = 1
|
|
65
|
+
else:
|
|
66
|
+
self.main_shape[np.nonzero(self.new_order)] = 1
|
|
67
|
+
|
|
68
|
+
def consider_shapes_sizes(self, min_shape_size=None, max_shape_size=None):
|
|
69
|
+
if self.max_distance != 0:
|
|
70
|
+
# Eliminate too small and too large shapes
|
|
71
|
+
if min_shape_size is not None or max_shape_size is not None:
|
|
72
|
+
if min_shape_size is not None:
|
|
73
|
+
small_shapes = self.stats[:, 4] < min_shape_size
|
|
74
|
+
extreme_shapes = deepcopy(small_shapes)
|
|
75
|
+
if max_shape_size is not None:
|
|
76
|
+
large_shapes = self.stats[:, 4] > max_shape_size
|
|
77
|
+
extreme_shapes = deepcopy(large_shapes)
|
|
78
|
+
if min_shape_size is not None and max_shape_size is not None:
|
|
79
|
+
extreme_shapes = np.nonzero(np.logical_or(small_shapes, large_shapes))[0]
|
|
80
|
+
is_main_in_it = np.isin(extreme_shapes, 1)
|
|
81
|
+
if np.any(is_main_in_it):
|
|
82
|
+
extreme_shapes = np.delete(extreme_shapes, is_main_in_it)
|
|
83
|
+
for extreme_shape in extreme_shapes:
|
|
84
|
+
self.new_order[self.new_order == extreme_shape] = 0
|
|
85
|
+
else:
|
|
86
|
+
self.expanded_shape = self.main_shape
|
|
87
|
+
|
|
88
|
+
def connect_shapes(self, only_keep_connected_shapes, rank_connecting_pixels, intensity_valley=None):
|
|
89
|
+
# If there are distant shapes of the good size, run the following:
|
|
90
|
+
if self.max_distance != 0 and np.any(self.new_order > 1):
|
|
91
|
+
# The intensity valley method does not work yet, don't use it
|
|
92
|
+
if intensity_valley is not None:
|
|
93
|
+
self.gravity_field = intensity_valley # make sure that the values correspond to the coord
|
|
94
|
+
else:
|
|
95
|
+
# 1) faire un champ gravitationnel autour de la forme principale
|
|
96
|
+
self.gravity_field = make_gravity_field(self.main_shape, max_distance=self.max_distance, with_erosion=1)
|
|
97
|
+
|
|
98
|
+
# If there are near enough shapes, run the following
|
|
99
|
+
# 2) Dilate other shapes toward the main according to the gradient
|
|
100
|
+
other_shapes, max_field_feelings = self.expand_smalls_toward_main()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# plt.imshow(other_shapes)
|
|
104
|
+
# If there are shapes within gravity field range
|
|
105
|
+
if np.any(max_field_feelings > 0):
|
|
106
|
+
self.expanded_shape = np.zeros(self.main_shape.shape, np.uint8)
|
|
107
|
+
self.expanded_shape[np.nonzero(self.main_shape + other_shapes)] = 1
|
|
108
|
+
if only_keep_connected_shapes:
|
|
109
|
+
# Make sure that only shapes connected with the main one remain on the final image
|
|
110
|
+
self.keep_connected_shapes()
|
|
111
|
+
if rank_connecting_pixels:
|
|
112
|
+
# Rate the extension of small shapes according to the distance between the small and the main shapes
|
|
113
|
+
self.distance_ranking_of_connecting_pixels()
|
|
114
|
+
#self.expanded_shape
|
|
115
|
+
# plt.imshow(self.expanded_shape)
|
|
116
|
+
else:
|
|
117
|
+
self.expanded_shape = self.main_shape
|
|
118
|
+
# Otherwise, end by putting the main shape as output
|
|
119
|
+
else:
|
|
120
|
+
self.expanded_shape = self.main_shape
|
|
121
|
+
|
|
122
|
+
def expand_smalls_toward_main(self):
|
|
123
|
+
|
|
124
|
+
other_shapes = np.zeros(self.main_shape.shape, np.uint8)
|
|
125
|
+
other_shapes[self.new_order > 1] = 1
|
|
126
|
+
simple_disk = cross_33
|
|
127
|
+
kernel = Ellipse((5, 5)).create().astype(np.uint8)
|
|
128
|
+
# Dilate the main shape, progressively to infer in what order other shapes should be expanded toward it
|
|
129
|
+
main_shape = deepcopy(self.main_shape)
|
|
130
|
+
new_order = deepcopy(self.new_order)
|
|
131
|
+
order_of_shapes_to_expand = np.empty(0, dtype=np.uint32)
|
|
132
|
+
nb = 3
|
|
133
|
+
while nb > 2:
|
|
134
|
+
main_shape = cv2.dilate(main_shape, kernel)
|
|
135
|
+
connections = deepcopy(main_shape)
|
|
136
|
+
connections *= new_order
|
|
137
|
+
new_connections = np.unique(connections)[2:]
|
|
138
|
+
new_order[np.isin(new_order, new_connections)] = 1
|
|
139
|
+
order_of_shapes_to_expand = np.append(order_of_shapes_to_expand, new_connections)
|
|
140
|
+
connections[main_shape > 0] = 1
|
|
141
|
+
connections[other_shapes > 0] = 1
|
|
142
|
+
nb, connections = cv2.connectedComponents(connections)
|
|
143
|
+
|
|
144
|
+
expanded_main = deepcopy(self.main_shape)
|
|
145
|
+
max_field_feelings = np.empty(0, dtype=np.uint32)
|
|
146
|
+
max_field_feeling = 0
|
|
147
|
+
# Loop over each shape to connect, from the nearest to the furthest to the main shape
|
|
148
|
+
for shape_i in order_of_shapes_to_expand:# np.unique(self.new_order)[2:]:
|
|
149
|
+
current_shape = np.zeros(self.main_shape.shape, np.uint8)
|
|
150
|
+
current_shape[self.new_order == shape_i] = 1
|
|
151
|
+
dil = 0
|
|
152
|
+
# Dilate that shape until it overlaps the main shape
|
|
153
|
+
while np.logical_and(dil <= self.max_distance, not np.any(current_shape * expanded_main)):
|
|
154
|
+
dil += 1
|
|
155
|
+
rings = cv2.dilate(current_shape, simple_disk, iterations=1, borderType=cv2.BORDER_CONSTANT,
|
|
156
|
+
borderValue=0)
|
|
157
|
+
|
|
158
|
+
rings = self.gravity_field * (rings - current_shape)
|
|
159
|
+
max_field_feeling = np.max(rings) # np.min(rings[rings>0])
|
|
160
|
+
max_field_feelings = np.append(max_field_feeling, max_field_feelings)
|
|
161
|
+
if max_field_feeling > 0: # If there is no shape within max_distance range, quit the loop
|
|
162
|
+
|
|
163
|
+
if dil == 1:
|
|
164
|
+
initial_pixel_number = np.sum(rings == max_field_feeling)
|
|
165
|
+
while np.sum(rings == max_field_feeling) > initial_pixel_number:
|
|
166
|
+
shrinking_stick = CompareNeighborsWithValue(rings, 8, np.uint32)
|
|
167
|
+
shrinking_stick.is_equal(max_field_feeling, True)
|
|
168
|
+
rings[shrinking_stick.equal_neighbor_nb < 2] = 0
|
|
169
|
+
current_shape[rings == max_field_feeling] = 1
|
|
170
|
+
else:
|
|
171
|
+
break
|
|
172
|
+
|
|
173
|
+
expanded_main[current_shape != 0] = 1
|
|
174
|
+
return expanded_main, max_field_feelings
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def keep_connected_shapes(self):
|
|
178
|
+
number, order = cv2.connectedComponents(self.expanded_shape, ltype=cv2.CV_16U)
|
|
179
|
+
if number > 2:
|
|
180
|
+
for i in np.arange(1, number):
|
|
181
|
+
expanded_shape_test = np.zeros(order.shape, np.uint8)
|
|
182
|
+
expanded_shape_test[order == i] = 1
|
|
183
|
+
if np.any(expanded_shape_test * self.main_shape):
|
|
184
|
+
break
|
|
185
|
+
self.expanded_shape = expanded_shape_test
|
|
186
|
+
# else:
|
|
187
|
+
# self.expanded_shape = other_shapes + self.main_shape
|
|
188
|
+
# self.expanded_shape[self.expanded_shape > 1] = 1
|
|
189
|
+
|
|
190
|
+
def distance_ranking_of_connecting_pixels(self):
|
|
191
|
+
rated_extension = np.zeros(self.main_shape.shape, np.uint8)
|
|
192
|
+
rated_extension[(self.main_shape - self.expanded_shape) == 255] = 1
|
|
193
|
+
rated_extension = rated_extension * self.gravity_field
|
|
194
|
+
if np.any(rated_extension):
|
|
195
|
+
rated_extension[np.nonzero(rated_extension)] -= np.min(
|
|
196
|
+
rated_extension[np.nonzero(rated_extension)]) - 1
|
|
197
|
+
self.expanded_shape += rated_extension
|
|
198
|
+
|
|
199
|
+
#binary_video = self.binary[(self.step // 2):(self.t + 1), :, :]
|
|
200
|
+
#draft_seg = self.segmentation[(self.step // 2):(self.t + 1), :, :]
|
|
201
|
+
def modify_past_analysis(self, binary_video, draft_seg):
|
|
202
|
+
self.binary_video = binary_video
|
|
203
|
+
self.draft_seg = draft_seg
|
|
204
|
+
self.expanded_shape[self.expanded_shape == 1] = 0
|
|
205
|
+
# Find the time at which the shape became connected to the expanded shape
|
|
206
|
+
# (i.e. the time to start looking for a growth)
|
|
207
|
+
distance_against_time, time_start, time_end = self.find_expansion_timings()
|
|
208
|
+
|
|
209
|
+
# Use that vector to progressively fill pixels at the same speed as shape grows
|
|
210
|
+
for t in np.arange(len(distance_against_time)):
|
|
211
|
+
image_garbage = (self.expanded_shape >= distance_against_time[t]).astype(np.uint8)
|
|
212
|
+
new_order, stats, centers = cc(image_garbage)
|
|
213
|
+
for comp_i in np.arange(1, stats.shape[0]):
|
|
214
|
+
past_image = deepcopy(self.binary_video[time_start + t, :, :])
|
|
215
|
+
with_new_comp = new_order == comp_i
|
|
216
|
+
past_image[with_new_comp] = 1
|
|
217
|
+
nb_comp, image_garbage = cv2.connectedComponents(past_image)
|
|
218
|
+
if nb_comp == 2:
|
|
219
|
+
self.binary_video[time_start + t, :, :][with_new_comp] = 1
|
|
220
|
+
#self.expanded_shape[self.expanded_shape > 0] = 1
|
|
221
|
+
#self.binary_video[time_end:, :, :] += self.expanded_shape
|
|
222
|
+
for t in np.arange(time_end, self.binary_video.shape[0]):
|
|
223
|
+
self.binary_video[t, :, :][np.nonzero(self.expanded_shape)] = 1
|
|
224
|
+
last_image = self.binary_video[t, :, :] + self.binary_video[t - 1, :, :]
|
|
225
|
+
last_image[last_image > 0] = 1
|
|
226
|
+
self.binary_video[-1, :, :] = last_image
|
|
227
|
+
return self.binary_video
|
|
228
|
+
|
|
229
|
+
def find_expansion_timings(self):
|
|
230
|
+
max_t = self.binary_video.shape[0] - 1
|
|
231
|
+
dilated_one = cv2.dilate(self.expanded_shape, cross_33)
|
|
232
|
+
# Find the time at which the nearest pixel of the expanded_shape si reached by the main shape
|
|
233
|
+
closest_pixels = np.zeros(self.main_shape.shape, dtype=np.uint8)
|
|
234
|
+
closest_pixels[self.expanded_shape == np.max(dilated_one)] = 1
|
|
235
|
+
expand_start = max_t
|
|
236
|
+
# Loop until there is no overlap between the dilated added shape and the original shape
|
|
237
|
+
# Stop one frame before in order to obtain the exact reaching moment.
|
|
238
|
+
while np.any(self.binary_video[expand_start - 1, :, :] * closest_pixels):
|
|
239
|
+
expand_start -= 1
|
|
240
|
+
|
|
241
|
+
# Find the relationship between distance and time
|
|
242
|
+
distance_against_time, time_start, time_end = get_radius_distance_against_time(
|
|
243
|
+
self.draft_seg[expand_start:(max_t + 1), :, :], dilated_one)
|
|
244
|
+
time_start += expand_start
|
|
245
|
+
time_end += expand_start
|
|
246
|
+
return distance_against_time, time_start, time_end
|