cellects 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__init__.py +0 -0
- cellects/__main__.py +49 -0
- cellects/config/__init__.py +0 -0
- cellects/config/all_vars_dict.py +155 -0
- cellects/core/__init__.py +0 -0
- cellects/core/cellects_paths.py +31 -0
- cellects/core/cellects_threads.py +1451 -0
- cellects/core/motion_analysis.py +2010 -0
- cellects/core/one_image_analysis.py +1061 -0
- cellects/core/one_video_per_blob.py +540 -0
- cellects/core/program_organizer.py +1316 -0
- cellects/core/script_based_run.py +154 -0
- cellects/gui/__init__.py +0 -0
- cellects/gui/advanced_parameters.py +1258 -0
- cellects/gui/cellects.py +189 -0
- cellects/gui/custom_widgets.py +790 -0
- cellects/gui/first_window.py +449 -0
- cellects/gui/if_several_folders_window.py +239 -0
- cellects/gui/image_analysis_window.py +2066 -0
- cellects/gui/required_output.py +232 -0
- cellects/gui/video_analysis_window.py +656 -0
- cellects/icons/__init__.py +0 -0
- cellects/icons/cellects_icon.icns +0 -0
- cellects/icons/cellects_icon.ico +0 -0
- cellects/image_analysis/__init__.py +0 -0
- cellects/image_analysis/cell_leaving_detection.py +54 -0
- cellects/image_analysis/cluster_flux_study.py +102 -0
- cellects/image_analysis/image_segmentation.py +706 -0
- cellects/image_analysis/morphological_operations.py +1635 -0
- cellects/image_analysis/network_functions.py +1757 -0
- cellects/image_analysis/one_image_analysis_threads.py +289 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
- cellects/image_analysis/shape_descriptors.py +1016 -0
- cellects/utils/__init__.py +0 -0
- cellects/utils/decorators.py +14 -0
- cellects/utils/formulas.py +637 -0
- cellects/utils/load_display_save.py +1054 -0
- cellects/utils/utilitarian.py +490 -0
- cellects-0.1.2.dist-info/LICENSE.odt +0 -0
- cellects-0.1.2.dist-info/METADATA +132 -0
- cellects-0.1.2.dist-info/RECORD +44 -0
- cellects-0.1.2.dist-info/WHEEL +5 -0
- cellects-0.1.2.dist-info/entry_points.txt +2 -0
- cellects-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
This script contains 2 classes used by the OneImageAnalysis class
|
|
4
|
+
They are threads to process the first image and save the selected combinations simultaneously
|
|
5
|
+
"""
|
|
6
|
+
import threading
|
|
7
|
+
import logging
|
|
8
|
+
from copy import deepcopy
|
|
9
|
+
import numpy as np
|
|
10
|
+
import cv2
|
|
11
|
+
from cellects.image_analysis.image_segmentation import otsu_thresholding, combine_color_spaces
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ProcessFirstImage:
|
|
15
|
+
def __init__(self, l):
|
|
16
|
+
self.start_processing(l)
|
|
17
|
+
|
|
18
|
+
def start_processing(self, l):
|
|
19
|
+
"""
|
|
20
|
+
Wil process the first image according to rules and parameters in l
|
|
21
|
+
:param l: list containing the necessary data to process the first image
|
|
22
|
+
"""
|
|
23
|
+
self.parent = l[0]
|
|
24
|
+
get_one_channel_result = l[1]
|
|
25
|
+
combine_channels = l[2]
|
|
26
|
+
self.all_c_spaces = self.parent.all_c_spaces
|
|
27
|
+
self.several_blob_per_arena = l[4]
|
|
28
|
+
self.sample_number = l[5]
|
|
29
|
+
self.spot_size = l[6]
|
|
30
|
+
kmeans_clust_nb = l[7]
|
|
31
|
+
self.biomask = l[8]
|
|
32
|
+
self.backmask = l[9]
|
|
33
|
+
if get_one_channel_result:
|
|
34
|
+
self.csc_dict = l[3]
|
|
35
|
+
self.image = combine_color_spaces(self.csc_dict, self.all_c_spaces)
|
|
36
|
+
if kmeans_clust_nb is None:
|
|
37
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
38
|
+
else:
|
|
39
|
+
self.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
40
|
+
# self.parent.image = self.image
|
|
41
|
+
# self.parent.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
42
|
+
# self.binary_image = self.parent.binary_image
|
|
43
|
+
self.unaltered_concomp_nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
44
|
+
if 1 < self.unaltered_concomp_nb < 10000:
|
|
45
|
+
self.total_area = np.sum(self.binary_image)
|
|
46
|
+
if 100 < self.total_area < self.binary_image.size * 0.75:
|
|
47
|
+
self.process_binary_image()
|
|
48
|
+
self.parent.save_combination_features(self)
|
|
49
|
+
# except RuntimeWarning:
|
|
50
|
+
# logging.info("Make sure that scaling and spot size are correct")
|
|
51
|
+
if combine_channels:
|
|
52
|
+
i = l[3]
|
|
53
|
+
possibilities = l[10]
|
|
54
|
+
saved_color_space_list = self.parent.saved_color_space_list
|
|
55
|
+
combination_features = self.parent.combination_features
|
|
56
|
+
self.csc_dict = saved_color_space_list[i]
|
|
57
|
+
previous_shape_number = combination_features[i, 4]
|
|
58
|
+
previous_sum = combination_features[i, 5]
|
|
59
|
+
for j in possibilities[::-1]:
|
|
60
|
+
csc_dict2 = saved_color_space_list[j]
|
|
61
|
+
csc_dict = deepcopy(self.csc_dict)
|
|
62
|
+
keys = list(csc_dict.keys())
|
|
63
|
+
|
|
64
|
+
k2 = list(csc_dict2.keys())[0]
|
|
65
|
+
v2 = csc_dict2[k2]
|
|
66
|
+
if np.isin(k2, keys) and np.sum(v2 * csc_dict[k2]) != 0:
|
|
67
|
+
break
|
|
68
|
+
for factor in [2, 1]:
|
|
69
|
+
if np.isin(k2, keys):
|
|
70
|
+
csc_dict[k2] += v2 * factor
|
|
71
|
+
else:
|
|
72
|
+
csc_dict[k2] = v2 * factor
|
|
73
|
+
self.image = combine_color_spaces(csc_dict, self.all_c_spaces)
|
|
74
|
+
if kmeans_clust_nb is None:
|
|
75
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
76
|
+
else:
|
|
77
|
+
self.kmeans(kmeans_clust_nb, self.biomask, self.backmask)
|
|
78
|
+
self.process_binary_image()
|
|
79
|
+
self.total_area = self.validated_shapes.sum()
|
|
80
|
+
if previous_shape_number >= self.shape_number and self.total_area > previous_sum * 0.9:
|
|
81
|
+
previous_shape_number = self.shape_number
|
|
82
|
+
previous_sum = self.total_area
|
|
83
|
+
self.csc_dict = deepcopy(csc_dict)
|
|
84
|
+
self.unaltered_concomp_nb = combination_features[i, 3]
|
|
85
|
+
self.parent.save_combination_features(self)
|
|
86
|
+
logging.info(str(saved_color_space_list[i]) + "-->" + str(self.csc_dict ))
|
|
87
|
+
|
|
88
|
+
def shape_selection(self, horizontal_size, shape, confint, do_not_delete=None):
|
|
89
|
+
"""
|
|
90
|
+
This method use the statistics of the connected components of a binary image to make shape selection
|
|
91
|
+
:param horizontal_size: the average horizontal size of one shape in pixels
|
|
92
|
+
:param shape: the geometry of the shape: circle or rectangle
|
|
93
|
+
:param confint: confidence interval for horizontal size and shape detection
|
|
94
|
+
:param do_not_delete: binary image with 1 in area drawn by the user as "Cell"
|
|
95
|
+
:return: A binary matrix of the resulting validated shapes and the number of shapes detected
|
|
96
|
+
"""
|
|
97
|
+
# counter+=1;horizontal_size = self.spot_size; shape = self.parent.spot_shapes[counter];confint = self.parent.spot_size_confints[::-1][counter]
|
|
98
|
+
# stats columns contain in that order:
|
|
99
|
+
# - x leftmost coordinate of boundingbox
|
|
100
|
+
# - y topmost coordinate of boundingbox
|
|
101
|
+
# - The horizontal size of the bounding box.
|
|
102
|
+
# - The vertical size of the bounding box.
|
|
103
|
+
# - The total area (in pixels) of the connected component.
|
|
104
|
+
|
|
105
|
+
# First, remove each stain which horizontal size varies too much from reference
|
|
106
|
+
size_interval = [horizontal_size * (1 - confint), horizontal_size * (1 + confint)]
|
|
107
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 2] < size_interval[0], self.stats[:, 2] > size_interval[1]))
|
|
108
|
+
|
|
109
|
+
if do_not_delete is None:
|
|
110
|
+
self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
|
|
111
|
+
else:
|
|
112
|
+
self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove), np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
|
|
113
|
+
|
|
114
|
+
# Second, determine the shape of each stain to only keep the ones corresponding to the reference shape
|
|
115
|
+
shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
|
|
116
|
+
shapes[self.shapes2 > 0] = 1
|
|
117
|
+
nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes,
|
|
118
|
+
connectivity=8)
|
|
119
|
+
if nb_components > 1:
|
|
120
|
+
if shape == 'circle':
|
|
121
|
+
surf_interval = [np.pi * np.square(horizontal_size // 2) * (1 - confint), np.pi * np.square(horizontal_size // 2) * (1 + confint)]
|
|
122
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
123
|
+
elif shape == 'rectangle':
|
|
124
|
+
# If the smaller side is the horizontal one, use the user provided horizontal side
|
|
125
|
+
if np.argmin((np.mean(self.stats[1:, 2]), np.mean(self.stats[1:, 3]))) == 0:
|
|
126
|
+
surf_interval = [np.square(horizontal_size) * (1 - confint), np.square(horizontal_size) * (1 + confint)]
|
|
127
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
128
|
+
# If the smaller side is the vertical one, use the median vertical length shape
|
|
129
|
+
else:
|
|
130
|
+
surf_interval = [np.square(np.median(self.stats[1:, 3])) * (1 - confint), np.square(np.median(self.stats[1:, 3])) * (1 + confint)]
|
|
131
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[:, 4] < surf_interval[0], self.stats[:, 4] > surf_interval[1]))
|
|
132
|
+
else:
|
|
133
|
+
logging.info("Original blob shape not well written")
|
|
134
|
+
|
|
135
|
+
if do_not_delete is None:
|
|
136
|
+
self.shapes2[np.isin(self.shapes2, cc_to_remove)] = 0
|
|
137
|
+
else:
|
|
138
|
+
self.shapes2[np.logical_and(np.isin(self.shapes2, cc_to_remove),
|
|
139
|
+
np.logical_not(np.isin(self.shapes2, do_not_delete)))] = 0
|
|
140
|
+
# There was only that before:
|
|
141
|
+
shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
|
|
142
|
+
shapes[np.nonzero(self.shapes2)] = 1
|
|
143
|
+
|
|
144
|
+
nb_components, self.shapes2, self.stats, self.centroids = cv2.connectedComponentsWithStats(shapes, connectivity=8)
|
|
145
|
+
self.validated_shapes = shapes
|
|
146
|
+
self.shape_number = nb_components - 1
|
|
147
|
+
|
|
148
|
+
def kmeans(self, cluster_number, biomask=None, backmask=None, bio_label=None):
|
|
149
|
+
"""
|
|
150
|
+
Use of Kmeans to detect the Cell(s) after having segmented the grayscale image into two or more categories
|
|
151
|
+
:param cluster_number: the number of categories to find
|
|
152
|
+
:param biomask: the mask of pixels marked as Cell(s) by the user
|
|
153
|
+
:param backmask: the mask of pixels marked as Background by the user
|
|
154
|
+
:param bio_label:
|
|
155
|
+
:return:
|
|
156
|
+
"""
|
|
157
|
+
image = self.image.reshape((-1, 1))
|
|
158
|
+
image = np.float32(image)
|
|
159
|
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
|
|
160
|
+
compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
|
|
161
|
+
kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
|
|
162
|
+
sum_per_label = np.zeros(cluster_number)
|
|
163
|
+
self.binary_image = np.zeros(self.image.shape[:2], np.uint8)
|
|
164
|
+
if bio_label is not None:
|
|
165
|
+
self.binary_image[np.nonzero(kmeans_image == bio_label)] = 1
|
|
166
|
+
self.bio_label = bio_label
|
|
167
|
+
else:
|
|
168
|
+
if biomask is not None:
|
|
169
|
+
all_labels = kmeans_image[biomask[0], biomask[1]]
|
|
170
|
+
for i in range(cluster_number):
|
|
171
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
172
|
+
self.bio_label = np.nonzero(sum_per_label == np.max(sum_per_label))
|
|
173
|
+
elif backmask is not None:
|
|
174
|
+
all_labels = kmeans_image[backmask[0], backmask[1]]
|
|
175
|
+
for i in range(cluster_number):
|
|
176
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
177
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
178
|
+
else:
|
|
179
|
+
for i in range(cluster_number):
|
|
180
|
+
sum_per_label[i] = (kmeans_image == i).sum()
|
|
181
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
182
|
+
self.binary_image[np.nonzero(kmeans_image == self.bio_label)] = 1
|
|
183
|
+
|
|
184
|
+
def process_binary_image(self, use_bio_and_back_masks=False):
|
|
185
|
+
"""
|
|
186
|
+
Process the binary image to get the final validated shapes
|
|
187
|
+
Starts by computin connected components, then remove the background pixels marked by the user,
|
|
188
|
+
then, if there are not several blob per arena, select spot according to their sizes
|
|
189
|
+
:param use_bio_and_back_masks: if true, will use the cell(s) and background matked by the user
|
|
190
|
+
:return:
|
|
191
|
+
"""
|
|
192
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
193
|
+
self.binary_image, connectivity=8)
|
|
194
|
+
do_not_delete = None
|
|
195
|
+
if use_bio_and_back_masks:
|
|
196
|
+
if self.backmask is not None:
|
|
197
|
+
if np.any(self.shapes[self.backmask]):
|
|
198
|
+
self.shapes[np.isin(self.shapes, np.unique(self.shapes[self.backmask]))] = 0
|
|
199
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
200
|
+
(self.shapes > 0).astype(np.uint8), connectivity=8)
|
|
201
|
+
self.shape_number -= 1
|
|
202
|
+
if self.biomask is not None:
|
|
203
|
+
if np.any(self.shapes[self.biomask]):
|
|
204
|
+
do_not_delete = np.unique(self.shapes[self.biomask])
|
|
205
|
+
do_not_delete = do_not_delete[do_not_delete != 0]
|
|
206
|
+
if not self.several_blob_per_arena and self.spot_size is not None:
|
|
207
|
+
counter = 0
|
|
208
|
+
self.shapes2 = deepcopy(self.shapes)
|
|
209
|
+
while self.shape_number != self.sample_number and counter < len(self.parent.spot_size_confints):
|
|
210
|
+
self.shape_selection(horizontal_size=self.spot_size, shape=self.parent.spot_shapes[counter],
|
|
211
|
+
confint=self.parent.spot_size_confints[counter], do_not_delete=do_not_delete)
|
|
212
|
+
logging.info(f"Shape selection algorithm found {self.shape_number} disconnected shapes")
|
|
213
|
+
counter += 1
|
|
214
|
+
if self.shape_number == self.sample_number:
|
|
215
|
+
self.shapes = self.shapes2
|
|
216
|
+
if self.shape_number == self.sample_number:
|
|
217
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
218
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
219
|
+
else:
|
|
220
|
+
max_size = self.binary_image.size * 0.75
|
|
221
|
+
min_size = 10
|
|
222
|
+
cc_to_remove = np.argwhere(np.logical_or(self.stats[1:, 4] < min_size, self.stats[1:, 4] > max_size)) + 1
|
|
223
|
+
self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
|
|
224
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
225
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
226
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
227
|
+
self.validated_shapes,
|
|
228
|
+
connectivity=8)
|
|
229
|
+
if not self.several_blob_per_arena and self.sample_number is not None and self.shape_number > self.sample_number:
|
|
230
|
+
# Sort shapes by size and compare the largest with the second largest
|
|
231
|
+
# If the difference is too large, remove that largest shape.
|
|
232
|
+
cc_to_remove = np.array([], dtype=np.uint8)
|
|
233
|
+
to_remove = np.array([], dtype=np.uint8)
|
|
234
|
+
self.stats = self.stats[1:, :]
|
|
235
|
+
while self.stats.shape[0] > self.sample_number and to_remove is not None:
|
|
236
|
+
# 1) rank by height
|
|
237
|
+
sorted_height = np.argsort(self.stats[:, 2])
|
|
238
|
+
# and only consider the number of shapes we want to detect
|
|
239
|
+
standard_error = np.std(self.stats[sorted_height, 2][-self.sample_number:])
|
|
240
|
+
differences = np.diff(self.stats[sorted_height, 2])
|
|
241
|
+
# Look for very big changes from one height to the next
|
|
242
|
+
if differences.any() and np.max(differences) > 2 * standard_error:
|
|
243
|
+
# Within these, remove shapes that are too large
|
|
244
|
+
to_remove = sorted_height[np.argmax(differences)]
|
|
245
|
+
cc_to_remove = np.append(cc_to_remove, to_remove + 1)
|
|
246
|
+
self.stats = np.delete(self.stats, to_remove, 0)
|
|
247
|
+
|
|
248
|
+
else:
|
|
249
|
+
to_remove = None
|
|
250
|
+
self.shapes[np.isin(self.shapes, cc_to_remove)] = 0
|
|
251
|
+
self.validated_shapes = np.zeros(self.shapes.shape, dtype=np.uint8)
|
|
252
|
+
self.validated_shapes[self.shapes > 0] = 1
|
|
253
|
+
self.shape_number, self.shapes, self.stats, self.centroids = cv2.connectedComponentsWithStats(
|
|
254
|
+
self.validated_shapes,
|
|
255
|
+
connectivity=8)
|
|
256
|
+
|
|
257
|
+
self.shape_number -= 1
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class SaveCombinationThread(threading.Thread):
|
|
261
|
+
def __init__(self, parent=None):
|
|
262
|
+
# super(SaveCombinationThread, self).__init__()
|
|
263
|
+
threading.Thread.__init__(self)
|
|
264
|
+
self.parent = parent
|
|
265
|
+
|
|
266
|
+
def run(self):
|
|
267
|
+
"""
|
|
268
|
+
Save the current process_i data into the combination_features list
|
|
269
|
+
:return:
|
|
270
|
+
"""
|
|
271
|
+
logging.info(f"Saving results from the color space combination: {self.process_i.csc_dict}. {self.process_i.shape_number} distinct spots detected.")
|
|
272
|
+
self.parent.saved_images_list.append(self.process_i.validated_shapes)
|
|
273
|
+
self.parent.converted_images_list.append(np.round(self.process_i.image).astype(np.uint8))
|
|
274
|
+
self.parent.saved_color_space_list.append(self.process_i.csc_dict)
|
|
275
|
+
self.parent.combination_features[self.parent.saved_csc_nb, :3] = list(self.process_i.csc_dict.values())[0]
|
|
276
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 3] = self.process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
|
|
277
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 4] = self.process_i.shape_number # cc_nb
|
|
278
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 5] = self.process_i.total_area # area
|
|
279
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 6] = np.std(self.process_i.stats[1:, 2]) # width_std
|
|
280
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 7] = np.std(self.process_i.stats[1:, 3]) # height_std
|
|
281
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 8] = np.std(self.process_i.stats[1:, 4]) # area_std
|
|
282
|
+
if self.process_i.biomask is not None:
|
|
283
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 9] = np.sum(
|
|
284
|
+
self.process_i.validated_shapes[self.process_i.biomask[0], self.process_i.biomask[1]])
|
|
285
|
+
if self.process_i.backmask is not None:
|
|
286
|
+
self.parent.combination_features[self.parent.saved_csc_nb, 10] = np.sum(
|
|
287
|
+
(1 - self.process_i.validated_shapes)[self.process_i.backmask[0], self.process_i.backmask[1]])
|
|
288
|
+
self.parent.saved_csc_nb += 1
|
|
289
|
+
logging.info("end")
|