cellects 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__init__.py +0 -0
- cellects/__main__.py +49 -0
- cellects/config/__init__.py +0 -0
- cellects/config/all_vars_dict.py +155 -0
- cellects/core/__init__.py +0 -0
- cellects/core/cellects_paths.py +31 -0
- cellects/core/cellects_threads.py +1451 -0
- cellects/core/motion_analysis.py +2010 -0
- cellects/core/one_image_analysis.py +1061 -0
- cellects/core/one_video_per_blob.py +540 -0
- cellects/core/program_organizer.py +1316 -0
- cellects/core/script_based_run.py +154 -0
- cellects/gui/__init__.py +0 -0
- cellects/gui/advanced_parameters.py +1258 -0
- cellects/gui/cellects.py +189 -0
- cellects/gui/custom_widgets.py +790 -0
- cellects/gui/first_window.py +449 -0
- cellects/gui/if_several_folders_window.py +239 -0
- cellects/gui/image_analysis_window.py +2066 -0
- cellects/gui/required_output.py +232 -0
- cellects/gui/video_analysis_window.py +656 -0
- cellects/icons/__init__.py +0 -0
- cellects/icons/cellects_icon.icns +0 -0
- cellects/icons/cellects_icon.ico +0 -0
- cellects/image_analysis/__init__.py +0 -0
- cellects/image_analysis/cell_leaving_detection.py +54 -0
- cellects/image_analysis/cluster_flux_study.py +102 -0
- cellects/image_analysis/image_segmentation.py +706 -0
- cellects/image_analysis/morphological_operations.py +1635 -0
- cellects/image_analysis/network_functions.py +1757 -0
- cellects/image_analysis/one_image_analysis_threads.py +289 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
- cellects/image_analysis/shape_descriptors.py +1016 -0
- cellects/utils/__init__.py +0 -0
- cellects/utils/decorators.py +14 -0
- cellects/utils/formulas.py +637 -0
- cellects/utils/load_display_save.py +1054 -0
- cellects/utils/utilitarian.py +490 -0
- cellects-0.1.2.dist-info/LICENSE.odt +0 -0
- cellects-0.1.2.dist-info/METADATA +132 -0
- cellects-0.1.2.dist-info/RECORD +44 -0
- cellects-0.1.2.dist-info/WHEEL +5 -0
- cellects-0.1.2.dist-info/entry_points.txt +2 -0
- cellects-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1061 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
This script contains the OneImageAnalysis class
|
|
4
|
+
OneImageAnalysis is a class containing many tools to analyze one image
|
|
5
|
+
|
|
6
|
+
An image can be coded in different color spaces, such as RGB, HSV, etc. These color spaces code the color of each pixel as three numbers, ranging from 0 to 255. Our aim is to find a combination of these three numbers that provides a single intensity value for each pixel, and which maximizes the contrast between the organism and the background. To increase the flexibility of our algorithm, we use more than one color space to look for these combinations. In particular, we use the RGB, LAB, HSV, LUV, HLS and YUV color spaces. What we call a color space combination is a transformation combining several channels of one or more color spaces.
|
|
7
|
+
To find the optimal color space combination, Cellects uses one image (which we will call “seed image”). The software selects by default the first image of the sequence as seed image, but the user can select a different image where the cells are more visible.
|
|
8
|
+
Cellects has a fully automatic algorithm to select a good color space combination, which proceeds in four steps:
|
|
9
|
+
|
|
10
|
+
First, it screens every channel of every color space. For instance, it converts the image into grayscale using the second channel of the color space HSV, and segments that grayscale image using Otsu thresholding. Once a binary image is computed from every channel, Cellects only keep the channels for which the number of connected components is lower than 10000, and the total area detected is higher than 100 pixels but lower than 0.75 times the total size of the image. By doing so, we eliminate the channels that produce the most noise.
|
|
11
|
+
|
|
12
|
+
In the second step, Cellects uses all the channels that pass the first filter and tests all possible pairwise combinations. Cellects combines channels by summing their intensities and re-scaling the result between 0 and 255. It then performs the segmentation on these combinations, and filters them with the same criteria as in the first step.
|
|
13
|
+
|
|
14
|
+
The third step uses the previously selected channels and combinations that produce the highest and lowest detected surface to make logical operations between them. It applies the AND operator between the two results having the highest surface, and the OR operator between the two results having the lowest surface. It thus generates another two candidate segmentations, which are added to the ones obtained in the previous steps.
|
|
15
|
+
|
|
16
|
+
In the fourth step, Cellects works under the assumption that the image contains multiple similar arenas containing a collection of objects with similar size and shape, and keeps the segmentations whose standard error of the area is smaller than ten times the smallest area standard error across all segmentations. To account for cases in which the experimental setup induces segmentation errors in one particular direction, Cellects also keeps the segmentation with minimal width standard error across all segmentations, and the one with minimal height standard error across all segmentations. All retained segmentations are shown to the user, who can then select the best one.
|
|
17
|
+
|
|
18
|
+
As an optional step, Cellects can refine the choice of color space combination, using the last image of the sequence instead of the seed image. In order to increase the diversity of combinations explored, this optional analysis is performed in a different way than for the seed image. Also, this refining can use information from the segmentation of the seed frame and from the geometry of the arenas to rank the quality of the segmentation emerging from each color space combination. To generate these combinations, Cellects follows four steps.
|
|
19
|
+
The first step is identical to the first step of the previously described automatic algorithm (in section 1) and starts by screening every possible channel and color space.
|
|
20
|
+
|
|
21
|
+
The second step aims to find combinations that consider many channels, rather than those with only one or two. To do that, it creates combinations that consist of the sum of all channels except one. It then filters these combinations in the same way as for the previous step. Then, all surviving combinations are retained, and also undergo the same process in which one more channel is excluded, and the process continues until reaching single-channel combinations. This process thus creates new combinations that include any number of channels.
|
|
22
|
+
|
|
23
|
+
The third step filters these segmentations, keeping those that fulfill the following criteria: (1) The number of connected components is higher than the number of arenas and lower than 10000. (2) The detected area covers less than 99% of the image. (2) Less than 1% of the detected area falls outside the arenas. (4) Each connected component of the detected area covers less than 75% of the image.
|
|
24
|
+
|
|
25
|
+
Finally, the fourth step ranks the remaining segmentations using the following criteria: If the user labeled any areas as “cell”, the ranking will reflect the amount of cell pixels in common between the segmentation and the user labels. If the user did not label any areas as cells but labeled areas as background, the ranking will reflect the number of background pixels in common. Otherwise, the ranking will reflect the number of pixels in common with the segmentation of the first image.
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
import logging
|
|
31
|
+
import os
|
|
32
|
+
from copy import deepcopy
|
|
33
|
+
import numpy as np
|
|
34
|
+
import cv2 # named opencv-python
|
|
35
|
+
import multiprocessing.pool as mp
|
|
36
|
+
from numba.typed import List as TList
|
|
37
|
+
from numba.typed import Dict as TDict
|
|
38
|
+
from cellects.image_analysis.morphological_operations import cross_33, Ellipse
|
|
39
|
+
from cellects.image_analysis.image_segmentation import get_color_spaces, combine_color_spaces, apply_filter, otsu_thresholding, get_otsu_threshold
|
|
40
|
+
from cellects.image_analysis.one_image_analysis_threads import SaveCombinationThread, ProcessFirstImage
|
|
41
|
+
from cellects.utils.formulas import bracket_to_uint8_image_contrast
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class OneImageAnalysis:
|
|
45
|
+
"""
|
|
46
|
+
This class takes a 3D matrix (2 space and 1 color [BGR] dimensions),
|
|
47
|
+
Its methods allow image
|
|
48
|
+
- conversion to any bgr/hsv/lab channels
|
|
49
|
+
- croping
|
|
50
|
+
- rotating
|
|
51
|
+
- filtering using some of the mainly used techniques:
|
|
52
|
+
- Gaussian, Median, Bilateral, Laplacian, Mexican hat
|
|
53
|
+
- segmenting using thresholds or kmeans
|
|
54
|
+
- shape selection according to horizontal size or shape ('circle' vs 'quadrilateral')
|
|
55
|
+
|
|
56
|
+
ps: A viewing method displays the image before and after the most advanced modification made in instance
|
|
57
|
+
"""
|
|
58
|
+
def __init__(self, image):
|
|
59
|
+
self.image = image
|
|
60
|
+
if len(self.image.shape) == 2:
|
|
61
|
+
self.already_greyscale = True
|
|
62
|
+
else:
|
|
63
|
+
self.already_greyscale = False
|
|
64
|
+
self.image2 = None
|
|
65
|
+
self.binary_image2 = None
|
|
66
|
+
self.drift_correction_already_adjusted: bool = False
|
|
67
|
+
# Create empty variables to fill in the following functions
|
|
68
|
+
self.binary_image = np.zeros(self.image.shape[:2], dtype=np.uint8)
|
|
69
|
+
self.previous_binary_image = None
|
|
70
|
+
self.validated_shapes = np.zeros(self.image.shape[:2], dtype=np.uint8)
|
|
71
|
+
self.centroids = 0
|
|
72
|
+
self.shape_number = 0
|
|
73
|
+
self.concomp_stats = 0
|
|
74
|
+
self.y_boundaries = None
|
|
75
|
+
self.x_boundaries = None
|
|
76
|
+
self.crop_coord = None
|
|
77
|
+
self.cropped: bool = False
|
|
78
|
+
self.subtract_background = None
|
|
79
|
+
self.subtract_background2 = None
|
|
80
|
+
self.im_combinations = None
|
|
81
|
+
self.bgr = image
|
|
82
|
+
self.colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
|
|
83
|
+
self.spot_shapes = None
|
|
84
|
+
self.all_c_spaces = TDict()
|
|
85
|
+
self.hsv = None
|
|
86
|
+
self.hls = None
|
|
87
|
+
self.lab = None
|
|
88
|
+
self.luv = None
|
|
89
|
+
self.yuv = None
|
|
90
|
+
"""
|
|
91
|
+
I/ Image modification for segmentation through thresholding
|
|
92
|
+
This part contain methods to convert, visualize, filter and threshold one image.
|
|
93
|
+
"""
|
|
94
|
+
def convert_and_segment(self, c_space_dict, color_number=2, biomask=None,
|
|
95
|
+
backmask=None, subtract_background=None, subtract_background2=None, grid_segmentation=False,
|
|
96
|
+
lighter_background=None, side_length=20, step=5, int_variation_thresh=None, mask=None,
|
|
97
|
+
filter_spec=None):
|
|
98
|
+
|
|
99
|
+
if self.already_greyscale:
|
|
100
|
+
self.segmentation(logical='None', color_number=2, biomask=biomask, backmask=backmask,
|
|
101
|
+
grid_segmentation=grid_segmentation, lighter_background=lighter_background,
|
|
102
|
+
side_length=side_length, step=step, int_variation_thresh=int_variation_thresh, mask=mask,
|
|
103
|
+
filter_spec=filter_spec)
|
|
104
|
+
else:
|
|
105
|
+
if len(self.all_c_spaces) == 0:
|
|
106
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
107
|
+
# if c_space_dict['logical'] != 'None':
|
|
108
|
+
first_dict = TDict()
|
|
109
|
+
second_dict = TDict()
|
|
110
|
+
for k, v in c_space_dict.items():
|
|
111
|
+
if k != 'logical' and v.sum() > 0:
|
|
112
|
+
if k[-1] != '2':
|
|
113
|
+
first_dict[k] = v
|
|
114
|
+
else:
|
|
115
|
+
second_dict[k[:-1]] = v
|
|
116
|
+
logging.info(first_dict)
|
|
117
|
+
self.image = combine_color_spaces(first_dict, self.all_c_spaces, subtract_background)
|
|
118
|
+
if len(second_dict) > 0:
|
|
119
|
+
self.image2 = combine_color_spaces(second_dict, self.all_c_spaces, subtract_background2)
|
|
120
|
+
self.segmentation(logical=c_space_dict['logical'], color_number=color_number, biomask=biomask,
|
|
121
|
+
backmask=backmask, grid_segmentation=grid_segmentation,
|
|
122
|
+
lighter_background=lighter_background, side_length=side_length, step=step,
|
|
123
|
+
int_variation_thresh=int_variation_thresh, mask=mask, filter_spec=filter_spec)
|
|
124
|
+
|
|
125
|
+
else:
|
|
126
|
+
|
|
127
|
+
self.segmentation(logical='None', color_number=color_number, biomask=biomask,
|
|
128
|
+
backmask=backmask, grid_segmentation=grid_segmentation,
|
|
129
|
+
lighter_background=lighter_background, side_length=side_length, step=step,
|
|
130
|
+
int_variation_thresh=int_variation_thresh, mask=mask, filter_spec=filter_spec)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def segmentation(self, logical='None', color_number=2, biomask=None, backmask=None, bio_label=None, bio_label2=None, grid_segmentation=False, lighter_background=None, side_length=20, step=5, int_variation_thresh=None, mask=None, filter_spec=None):
|
|
134
|
+
if filter_spec is not None and filter_spec["filter1_type"] != "":
|
|
135
|
+
self.image = apply_filter(self.image, filter_spec["filter1_type"], filter_spec["filter1_param"])
|
|
136
|
+
if (color_number > 2):
|
|
137
|
+
self.kmeans(color_number, biomask, backmask, logical, bio_label, bio_label2)
|
|
138
|
+
elif grid_segmentation:
|
|
139
|
+
if lighter_background is None:
|
|
140
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
141
|
+
lighter_background = self.binary_image.sum() > (self.binary_image.size / 2)
|
|
142
|
+
if int_variation_thresh is None:
|
|
143
|
+
int_variation_thresh =100 - (np.ptp(self.image) * 90 / 255)
|
|
144
|
+
self.grid_segmentation(lighter_background, side_length, step, int_variation_thresh, mask)
|
|
145
|
+
else:
|
|
146
|
+
# logging.info("Segment the image using Otsu thresholding")
|
|
147
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
148
|
+
if self.previous_binary_image is not None:
|
|
149
|
+
if (self.binary_image * (1 - self.previous_binary_image)).sum() > (self.binary_image * self.previous_binary_image).sum():
|
|
150
|
+
# Ones of the binary image have more in common with the background than with the specimen
|
|
151
|
+
self.binary_image = 1 - self.binary_image
|
|
152
|
+
# self.binary_image = self.correct_with_previous_binary_image(self.binary_image.copy())
|
|
153
|
+
|
|
154
|
+
if logical != 'None':
|
|
155
|
+
# logging.info("Segment the image using Otsu thresholding")
|
|
156
|
+
if filter_spec is not None and filter_spec["filter2_type"] != "":
|
|
157
|
+
self.image2 = apply_filter(self.image2, filter_spec["filter2_type"], filter_spec["filter2_param"])
|
|
158
|
+
self.binary_image2 = otsu_thresholding(self.image2)
|
|
159
|
+
if self.previous_binary_image is not None:
|
|
160
|
+
if (self.binary_image2 * (1 - self.previous_binary_image)).sum() > (
|
|
161
|
+
self.binary_image2 * self.previous_binary_image).sum():
|
|
162
|
+
self.binary_image2 = 1 - self.binary_image2
|
|
163
|
+
# self.binary_image2 = self.correct_with_previous_binary_image(self.binary_image2.copy())
|
|
164
|
+
|
|
165
|
+
if logical != 'None':
|
|
166
|
+
if logical == 'Or':
|
|
167
|
+
self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
|
|
168
|
+
elif logical == 'And':
|
|
169
|
+
self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
|
|
170
|
+
elif logical == 'Xor':
|
|
171
|
+
self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
|
|
172
|
+
self.binary_image = self.binary_image.astype(np.uint8)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def correct_with_previous_binary_image(self, binary_image):
|
|
176
|
+
# If binary image is more than twenty times bigger or smaller than the previous binary image:
|
|
177
|
+
# otsu thresholding failed, we use a threshold of 127 instead
|
|
178
|
+
if binary_image.sum() > self.previous_binary_image.sum() * 20 or binary_image.sum() < self.previous_binary_image.sum() * 0.05:
|
|
179
|
+
binary_adaptive = cv2.adaptiveThreshold(bracket_to_uint8_image_contrast(self.image), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
|
|
180
|
+
# from skimage import filters
|
|
181
|
+
# threshold_value = filters.threshold_li(self.image)
|
|
182
|
+
# binary_image = self.image >= threshold_value
|
|
183
|
+
binary_image = self.image >= 127
|
|
184
|
+
# And again, make sure than these pixels are shared with the previous binary image
|
|
185
|
+
if (binary_image * (1 - self.previous_binary_image)).sum() > (binary_image * self.previous_binary_image).sum():
|
|
186
|
+
binary_image = 1 - binary_image
|
|
187
|
+
return binary_image.astype(np.uint8)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def get_largest_shape(self):
|
|
191
|
+
shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
|
|
192
|
+
sorted_area = np.sort(stats[1:, 4])
|
|
193
|
+
self.validated_shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
|
|
194
|
+
self.validated_shapes[np.nonzero(shapes == np.nonzero(stats[:, 4] == sorted_area[-1])[0])] = 1
|
|
195
|
+
|
|
196
|
+
def generate_subtract_background(self, c_space_dict):
|
|
197
|
+
logging.info("Generate background using the generate_subtract_background method of OneImageAnalysis class")
|
|
198
|
+
if len(self.all_c_spaces) == 0 and not self.already_greyscale:
|
|
199
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
200
|
+
self.convert_and_segment(c_space_dict, grid_segmentation=False)
|
|
201
|
+
# self.image = generate_color_space_combination(c_space_dict, self.all_c_spaces)
|
|
202
|
+
disk_size = int(np.floor(np.sqrt(np.min(self.bgr.shape[:2])) / 2))
|
|
203
|
+
disk = np.uint8(Ellipse((disk_size, disk_size)).create())
|
|
204
|
+
self.subtract_background = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, disk)
|
|
205
|
+
if self.image2 is not None:
|
|
206
|
+
self.subtract_background2 = cv2.morphologyEx(self.image2, cv2.MORPH_OPEN, disk)
|
|
207
|
+
|
|
208
|
+
def check_if_image_border_attest_drift_correction(self):
|
|
209
|
+
t = np.all(self.binary_image[0, :])
|
|
210
|
+
b = np.all(self.binary_image[-1, :])
|
|
211
|
+
l = np.all(self.binary_image[:, 0])
|
|
212
|
+
r = np.all(self.binary_image[:, -1])
|
|
213
|
+
if (t and b) or (t and r) or (t and l) or (t and r) or (b and l) or (b and r) or (l and r):
|
|
214
|
+
cc_nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
215
|
+
if cc_nb == 2:
|
|
216
|
+
return True
|
|
217
|
+
else:
|
|
218
|
+
return False
|
|
219
|
+
else:
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
def adjust_to_drift_correction(self, logical):
|
|
223
|
+
if not self.drift_correction_already_adjusted:
|
|
224
|
+
self.drift_correction_already_adjusted = True
|
|
225
|
+
|
|
226
|
+
mask = cv2.dilate(self.binary_image, kernel=cross_33)
|
|
227
|
+
mask -= self.binary_image
|
|
228
|
+
mask = np.nonzero(mask)
|
|
229
|
+
|
|
230
|
+
drift_correction = np.mean(self.image[mask[0], mask[1]])
|
|
231
|
+
self.image[np.nonzero(self.binary_image)] = drift_correction
|
|
232
|
+
threshold = get_otsu_threshold(self.image)
|
|
233
|
+
binary = (self.image > threshold)
|
|
234
|
+
# while np.any(binary * self.binary_image) and threshold > 1: #binary.sum() > self.binary_image.sum()
|
|
235
|
+
# threshold -= 1
|
|
236
|
+
# binary1 = (self.image > threshold)
|
|
237
|
+
# binary2 = np.logical_not(binary1)
|
|
238
|
+
# if binary1.sum() < binary2.sum():
|
|
239
|
+
# binary = binary1
|
|
240
|
+
# else:
|
|
241
|
+
# binary = binary2
|
|
242
|
+
self.binary_image = binary.astype(np.uint8)
|
|
243
|
+
|
|
244
|
+
if self.image2 is not None:
|
|
245
|
+
drift_correction2 = np.mean(self.image2[mask[0], mask[1]])
|
|
246
|
+
self.image2[np.nonzero(self.binary_image)] = drift_correction2
|
|
247
|
+
threshold = get_otsu_threshold(self.image2)
|
|
248
|
+
binary1 = (self.image2 > threshold)
|
|
249
|
+
binary2 = np.logical_not(binary1)
|
|
250
|
+
if binary1.sum() < binary2.sum():
|
|
251
|
+
binary = binary1
|
|
252
|
+
else:
|
|
253
|
+
binary = binary2
|
|
254
|
+
while np.any(binary * self.binary_image2) and threshold > 1: # binary.sum() > self.binary_image.sum()
|
|
255
|
+
threshold -= 1
|
|
256
|
+
binary1 = (self.image2 > threshold)
|
|
257
|
+
binary2 = np.logical_not(binary1)
|
|
258
|
+
if binary1.sum() < binary2.sum():
|
|
259
|
+
binary = binary1
|
|
260
|
+
else:
|
|
261
|
+
binary = binary2
|
|
262
|
+
self.binary_image2 = binary.astype(np.uint8)
|
|
263
|
+
if logical == 'Or':
|
|
264
|
+
self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
|
|
265
|
+
elif logical == 'And':
|
|
266
|
+
self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
|
|
267
|
+
elif logical == 'Xor':
|
|
268
|
+
self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
|
|
269
|
+
self.binary_image = self.binary_image.astype(np.uint8)
|
|
270
|
+
|
|
271
|
+
def set_spot_shapes_and_size_confint(self, spot_shape):
|
|
272
|
+
self.spot_size_confints = np.arange(0.75, 0.00, - 0.05)# np.concatenate((np.arange(0.75, 0.00, - 0.05), np.arange(0.05, 0.00, -0.005)))#
|
|
273
|
+
if spot_shape is None:
|
|
274
|
+
self.spot_shapes = np.tile(["circle", "rectangle"], len(self.spot_size_confints))
|
|
275
|
+
self.spot_size_confints = np.repeat(self.spot_size_confints, 2)
|
|
276
|
+
else:
|
|
277
|
+
self.spot_shapes = np.repeat(spot_shape, len(self.spot_size_confints))
|
|
278
|
+
|
|
279
|
+
def find_first_im_csc(self, sample_number=None, several_blob_per_arena=True, spot_shape=None, spot_size=None, kmeans_clust_nb=None, biomask=None, backmask=None, color_space_dictionaries=None, carefully=False):
|
|
280
|
+
logging.info(f"Prepare color space lists, dictionaries and matrices")
|
|
281
|
+
if len(self.all_c_spaces) == 0:
|
|
282
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
283
|
+
if color_space_dictionaries is None:
|
|
284
|
+
if carefully:
|
|
285
|
+
colorspace_list = ["bgr", "lab", "hsv", "luv", "hls", "yuv"]
|
|
286
|
+
else:
|
|
287
|
+
colorspace_list = ["lab", "hsv"]
|
|
288
|
+
color_space_dictionaries = TList()
|
|
289
|
+
for i, c_space in enumerate(colorspace_list):
|
|
290
|
+
for i in np.arange(3):
|
|
291
|
+
channels = np.array((0, 0, 0), dtype=np.int8)
|
|
292
|
+
channels[i] = 1
|
|
293
|
+
csc_dict = TDict()
|
|
294
|
+
csc_dict[c_space] = channels
|
|
295
|
+
color_space_dictionaries.append(csc_dict)
|
|
296
|
+
|
|
297
|
+
# if not several_blob_per_arena:
|
|
298
|
+
self.set_spot_shapes_and_size_confint(spot_shape)
|
|
299
|
+
|
|
300
|
+
self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 11), dtype=np.uint32)
|
|
301
|
+
# ["c1", "c2", "c3", "unaltered_cc_nb", "concomp_nb", "total_area", "width_std", "height_std", "centrodist_std", "biosum", "backsum"]
|
|
302
|
+
unaltered_cc_nb, cc_nb, area, width_std, height_std, area_std, biosum, backsum = 3, 4, 5, 6, 7, 8, 9, 10
|
|
303
|
+
self.saved_images_list = TList()
|
|
304
|
+
self.converted_images_list = TList()
|
|
305
|
+
self.saved_color_space_list = list()
|
|
306
|
+
self.saved_csc_nb = 0
|
|
307
|
+
self.save_combination_thread = SaveCombinationThread(self)
|
|
308
|
+
get_one_channel_result = True
|
|
309
|
+
combine_channels = False
|
|
310
|
+
|
|
311
|
+
for csc_dict in color_space_dictionaries:
|
|
312
|
+
logging.info(f"Try detection with each color space channel, one by one. Currently analyzing {csc_dict}")
|
|
313
|
+
list_args = [self, get_one_channel_result, combine_channels, csc_dict, several_blob_per_arena,
|
|
314
|
+
sample_number, spot_size, kmeans_clust_nb, biomask, backmask, None]
|
|
315
|
+
ProcessFirstImage(list_args)
|
|
316
|
+
# logging.info(csc_dict)
|
|
317
|
+
|
|
318
|
+
if sample_number is not None and carefully:
|
|
319
|
+
# tic = default_timer()
|
|
320
|
+
# Try to add csc together
|
|
321
|
+
# possibilities = np.arange(len(self.saved_color_space_list))
|
|
322
|
+
possibilities = []
|
|
323
|
+
if self.saved_csc_nb > 6:
|
|
324
|
+
different_color_spaces = np.unique(self.saved_color_space_list)
|
|
325
|
+
for color_space in different_color_spaces:
|
|
326
|
+
csc_idx = np.nonzero(np.isin(self.saved_color_space_list, color_space))[0]
|
|
327
|
+
possibilities.append(csc_idx[0] + np.argmin(self.combination_features[csc_idx, area_std]))
|
|
328
|
+
if len(possibilities) < 6:
|
|
329
|
+
remaining_possibilities = np.arange(len(self.saved_color_space_list))
|
|
330
|
+
remaining_possibilities = remaining_possibilities[np.logical_not(np.isin(remaining_possibilities, possibilities))]
|
|
331
|
+
while len(possibilities) < 6:
|
|
332
|
+
new_possibility = np.argmin(self.combination_features[remaining_possibilities, area_std])
|
|
333
|
+
possibilities.append(new_possibility)
|
|
334
|
+
remaining_possibilities = remaining_possibilities[remaining_possibilities != new_possibility]
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
pool = mp.ThreadPool(processes=os.cpu_count() - 1)
|
|
338
|
+
get_one_channel_result = False
|
|
339
|
+
combine_channels = True
|
|
340
|
+
list_args = [[self, get_one_channel_result, combine_channels, i, several_blob_per_arena, sample_number,
|
|
341
|
+
spot_size, kmeans_clust_nb, biomask, backmask, possibilities] for i in possibilities]
|
|
342
|
+
for process_i in pool.imap_unordered(ProcessFirstImage, list_args):
|
|
343
|
+
pass
|
|
344
|
+
|
|
345
|
+
# Get the most and the least covered images and the 2 best biomask and backmask scores
|
|
346
|
+
# To try combinations of those
|
|
347
|
+
if self.saved_csc_nb > 1:
|
|
348
|
+
coverage = np.argsort(self.combination_features[:self.saved_csc_nb, area])
|
|
349
|
+
most1 = coverage[-1]; most2 = coverage[-2]
|
|
350
|
+
least1 = coverage[0]; least2 = coverage[1]
|
|
351
|
+
if biomask is not None:
|
|
352
|
+
bio_sort = np.argsort(self.combination_features[:self.saved_csc_nb, biosum])
|
|
353
|
+
bio1 = bio_sort[-1]; bio2 = bio_sort[-2]
|
|
354
|
+
if backmask is not None:
|
|
355
|
+
back_sort = np.argsort(self.combination_features[:self.saved_csc_nb, backsum])
|
|
356
|
+
back1 = back_sort[-1]; back2 = back_sort[-2]
|
|
357
|
+
|
|
358
|
+
# Try a logical And between the most covered images
|
|
359
|
+
# Should only need one instanciation
|
|
360
|
+
process_i = ProcessFirstImage(
|
|
361
|
+
[self, False, False, None, several_blob_per_arena, sample_number, spot_size, kmeans_clust_nb, biomask, backmask, None])
|
|
362
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[most1], self.saved_images_list[most2]).astype(np.uint8)
|
|
363
|
+
process_i.image = self.converted_images_list[most1]
|
|
364
|
+
process_i.process_binary_image()
|
|
365
|
+
process_i.csc_dict = {list(self.saved_color_space_list[most1].keys())[0]: self.combination_features[most1, :3],
|
|
366
|
+
"logical": "And",
|
|
367
|
+
list(self.saved_color_space_list[most2].keys())[0] + "2": self.combination_features[most2, :3]}
|
|
368
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(most1, most2), unaltered_cc_nb])
|
|
369
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
370
|
+
self.save_combination_features(process_i)
|
|
371
|
+
process_i.image = self.converted_images_list[least1]
|
|
372
|
+
process_i.binary_image = np.logical_or(self.saved_images_list[least1], self.saved_images_list[least2]).astype(np.uint8)
|
|
373
|
+
process_i.process_binary_image()
|
|
374
|
+
process_i.csc_dict = {list(self.saved_color_space_list[least1].keys())[0]: self.combination_features[least1, :3],
|
|
375
|
+
"logical": "Or",
|
|
376
|
+
list(self.saved_color_space_list[least2].keys())[0] + "2": self.combination_features[least2, :3]}
|
|
377
|
+
process_i.unaltered_concomp_nb = np.max(self.combination_features[(least1, least2), unaltered_cc_nb])
|
|
378
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
379
|
+
self.save_combination_features(process_i)
|
|
380
|
+
|
|
381
|
+
# self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask, backmask)
|
|
382
|
+
|
|
383
|
+
# If most images are very low in biosum or backsum, try to mix them together to improve that score
|
|
384
|
+
# Do a logical And between the two best biomasks
|
|
385
|
+
if biomask is not None:
|
|
386
|
+
if not np.all(np.isin((bio1, bio2), (most1, most2))):
|
|
387
|
+
process_i.image = self.converted_images_list[bio1]
|
|
388
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[bio2]).astype(
|
|
389
|
+
np.uint8)
|
|
390
|
+
process_i.process_binary_image()
|
|
391
|
+
process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
|
|
392
|
+
"logical": "And",
|
|
393
|
+
list(self.saved_color_space_list[bio2].keys())[0] + "2": self.combination_features[bio2,:3]}
|
|
394
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(bio1, bio2), unaltered_cc_nb])
|
|
395
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
396
|
+
|
|
397
|
+
self.save_combination_features(process_i)
|
|
398
|
+
|
|
399
|
+
# Do a logical And between the two best backmask
|
|
400
|
+
if backmask is not None:
|
|
401
|
+
|
|
402
|
+
if not np.all(np.isin((back1, back2), (most1, most2))):
|
|
403
|
+
process_i.image = self.converted_images_list[back1]
|
|
404
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[back1], self.saved_images_list[back2]).astype(
|
|
405
|
+
np.uint8)
|
|
406
|
+
process_i.process_binary_image()
|
|
407
|
+
process_i.csc_dict = {list(self.saved_color_space_list[back1].keys())[0]: self.combination_features[back1, :3],
|
|
408
|
+
"logical": "And",
|
|
409
|
+
list(self.saved_color_space_list[back2].keys())[0] + "2": self.combination_features[back2,:3]}
|
|
410
|
+
process_i.unaltered_concomp_nb = np.min(self.combination_features[(back1, back2), unaltered_cc_nb])
|
|
411
|
+
process_i.total_area = process_i.binary_image.sum()
|
|
412
|
+
self.save_combination_features(process_i)
|
|
413
|
+
# Do a logical Or between the best biomask and the best backmask
|
|
414
|
+
if biomask is not None and backmask is not None:
|
|
415
|
+
if not np.all(np.isin((bio1, back1), (least1, least2))):
|
|
416
|
+
process_i.image = self.converted_images_list[bio1]
|
|
417
|
+
process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[back1]).astype(
|
|
418
|
+
np.uint8)
|
|
419
|
+
process_i.process_binary_image()
|
|
420
|
+
process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
|
|
421
|
+
"logical": "Or",
|
|
422
|
+
list(self.saved_color_space_list[back1].keys())[0] + "2": self.combination_features[back1, :3]}
|
|
423
|
+
process_i.unaltered_concomp_nb = np.max(self.combination_features[(bio1, back1), unaltered_cc_nb])
|
|
424
|
+
# self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask,
|
|
425
|
+
# backmask)
|
|
426
|
+
process_i.total_area = self.binary_image.sum()
|
|
427
|
+
self.save_combination_features(process_i)
|
|
428
|
+
|
|
429
|
+
if self.save_combination_thread.is_alive():
|
|
430
|
+
self.save_combination_thread.join()
|
|
431
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
432
|
+
# Only keep the row that filled conditions
|
|
433
|
+
# Save all combinations if they fulfill the following conditions:
|
|
434
|
+
# - Their conncomp number is lower than 3 times the smaller conncomp number.
|
|
435
|
+
# - OR The minimal area variations
|
|
436
|
+
# - OR The minimal width variations
|
|
437
|
+
# - OR The minimal height variations
|
|
438
|
+
# - AND/OR their segmentation fits with biomask and backmask
|
|
439
|
+
width_std_fit = self.combination_features[:, width_std] == np.min(self.combination_features[:, width_std])
|
|
440
|
+
height_std_fit = self.combination_features[:, height_std] == np.min(self.combination_features[:, height_std])
|
|
441
|
+
area_std_fit = self.combination_features[:, area_std] < np.min(self.combination_features[:, area_std]) * 10
|
|
442
|
+
fit = np.logical_or(np.logical_or(width_std_fit, height_std_fit), area_std_fit)
|
|
443
|
+
biomask_fit = np.ones(self.saved_csc_nb, dtype=bool)
|
|
444
|
+
backmask_fit = np.ones(self.saved_csc_nb, dtype=bool)
|
|
445
|
+
if biomask is not None or backmask is not None:
|
|
446
|
+
if biomask is not None:
|
|
447
|
+
biomask_fit = self.combination_features[:, biosum] > 0.9 * len(biomask[0])
|
|
448
|
+
if backmask is not None:
|
|
449
|
+
backmask_fit = self.combination_features[:, backsum] > 0.9 * len(backmask[0])
|
|
450
|
+
# First test a logical OR between the precedent options and the mask fits.
|
|
451
|
+
fit = np.logical_or(fit, np.logical_and(biomask_fit, backmask_fit))
|
|
452
|
+
# If this is not stringent enough, use a logical AND and increase progressively the proportion of pixels that
|
|
453
|
+
# must match the biomask and the backmask
|
|
454
|
+
if np.sum(fit) > 5:
|
|
455
|
+
to_add = 0
|
|
456
|
+
while np.sum(fit) > 5 and to_add <= 0.25:
|
|
457
|
+
if biomask is not None:
|
|
458
|
+
biomask_fit = self.combination_features[:, biosum] > (0.75 + to_add) * len(biomask[0])
|
|
459
|
+
if backmask is not None:
|
|
460
|
+
backmask_fit = self.combination_features[:, backsum] > (0.75 + to_add) * len(backmask[0])
|
|
461
|
+
test_fit = np.logical_and(fit, np.logical_and(biomask_fit, backmask_fit))
|
|
462
|
+
if np.sum(test_fit) != 0:
|
|
463
|
+
fit = test_fit
|
|
464
|
+
to_add += 0.05
|
|
465
|
+
else:
|
|
466
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
467
|
+
fit = np.array([True])
|
|
468
|
+
# If saved_csc_nb is too low, try bool operators to mix them together to fill holes for instance
|
|
469
|
+
# Order the table according to the number of shapes that have been removed by filters
|
|
470
|
+
# cc_efficiency_order = np.argsort(self.combination_features[:, unaltered_cc_nb] - self.combination_features[:, cc_nb])
|
|
471
|
+
cc_efficiency_order = np.argsort(self.combination_features[:, area_std])
|
|
472
|
+
# Save and return a dictionnary containing the selected color space combinations
|
|
473
|
+
# and their corresponding binary images
|
|
474
|
+
|
|
475
|
+
# first_im_combinations = [i for i in np.arange(fit.sum())]
|
|
476
|
+
self.im_combinations = []
|
|
477
|
+
for saved_csc in cc_efficiency_order:
|
|
478
|
+
if fit[saved_csc]:
|
|
479
|
+
self.im_combinations.append({})
|
|
480
|
+
# self.im_combinations.append({})
|
|
481
|
+
# self.im_combinations[len(self.im_combinations) - 1]["csc"] = self.saved_color_space_list[saved_csc]
|
|
482
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
|
|
483
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
|
|
484
|
+
for k, v in self.saved_color_space_list[saved_csc].items():
|
|
485
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
|
|
486
|
+
# self.im_combinations[len(self.im_combinations) - 1]["csc"] = {list(self.saved_color_space_list[saved_csc])[0]: self.combination_features[saved_csc, :3]}
|
|
487
|
+
|
|
488
|
+
if backmask is not None:
|
|
489
|
+
shape_number, shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
|
|
490
|
+
if np.any(shapes[backmask]):
|
|
491
|
+
shapes[np.isin(shapes, np.unique(shapes[backmask]))] = 0
|
|
492
|
+
self.saved_images_list[saved_csc] = (shapes > 0).astype(np.uint8)
|
|
493
|
+
if biomask is not None:
|
|
494
|
+
self.saved_images_list[saved_csc][biomask] = 1
|
|
495
|
+
if backmask is not None or biomask is not None:
|
|
496
|
+
self.combination_features[saved_csc, cc_nb], shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
|
|
497
|
+
self.combination_features[saved_csc, cc_nb] -= 1
|
|
498
|
+
self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
|
|
499
|
+
self.im_combinations[len(self.im_combinations) - 1]["shape_number"] = self.combination_features[saved_csc, cc_nb]
|
|
500
|
+
self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = self.converted_images_list[saved_csc]
|
|
501
|
+
|
|
502
|
+
# logging.info(default_timer()-tic)
|
|
503
|
+
self.saved_color_space_list = []
|
|
504
|
+
self.saved_images_list = None
|
|
505
|
+
self.converted_images_list = None
|
|
506
|
+
self.combination_features = None
|
|
507
|
+
|
|
508
|
+
# def save_combination_features(self, process_i):
|
|
509
|
+
# if self.save_combination_thread.is_alive():
|
|
510
|
+
# self.save_combination_thread.join()
|
|
511
|
+
# self.save_combination_thread = SaveCombinationThread(self)
|
|
512
|
+
# self.save_combination_thread.process_i = process_i
|
|
513
|
+
# self.save_combination_thread.start()
|
|
514
|
+
|
|
515
|
+
def save_combination_features(self, process_i):
|
|
516
|
+
self.saved_images_list.append(process_i.validated_shapes)
|
|
517
|
+
self.converted_images_list.append(np.round(process_i.image).astype(np.uint8))
|
|
518
|
+
self.saved_color_space_list.append(process_i.csc_dict)
|
|
519
|
+
self.combination_features[self.saved_csc_nb, :3] = list(process_i.csc_dict.values())[0]
|
|
520
|
+
self.combination_features[
|
|
521
|
+
self.saved_csc_nb, 3] = process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
|
|
522
|
+
self.combination_features[self.saved_csc_nb, 4] = process_i.shape_number # cc_nb
|
|
523
|
+
self.combination_features[self.saved_csc_nb, 5] = process_i.total_area # area
|
|
524
|
+
self.combination_features[self.saved_csc_nb, 6] = np.std(process_i.stats[1:, 2]) # width_std
|
|
525
|
+
self.combination_features[self.saved_csc_nb, 7] = np.std(process_i.stats[1:, 3]) # height_std
|
|
526
|
+
self.combination_features[self.saved_csc_nb, 8] = np.std(process_i.stats[1:, 4]) # area_std
|
|
527
|
+
if process_i.biomask is not None:
|
|
528
|
+
self.combination_features[self.saved_csc_nb, 9] = np.sum(
|
|
529
|
+
process_i.validated_shapes[process_i.biomask[0], process_i.biomask[1]])
|
|
530
|
+
if process_i.backmask is not None:
|
|
531
|
+
self.combination_features[self.saved_csc_nb, 10] = np.sum(
|
|
532
|
+
(1 - process_i.validated_shapes)[process_i.backmask[0], process_i.backmask[1]])
|
|
533
|
+
self.saved_csc_nb += 1
|
|
534
|
+
|
|
535
|
+
def update_current_images(self, current_combination_id):
|
|
536
|
+
self.image = self.im_combinations[current_combination_id]["converted_image"]
|
|
537
|
+
self.validated_shapes = self.im_combinations[current_combination_id]["binary_image"]
|
|
538
|
+
|
|
539
|
+
def find_last_im_csc(self, concomp_nb, total_surfarea, max_shape_size, out_of_arenas=None, ref_image=None,
|
|
540
|
+
subtract_background=None, kmeans_clust_nb=None, biomask=None, backmask=None,
|
|
541
|
+
color_space_dictionaries=None, carefully=False):
|
|
542
|
+
if len(self.all_c_spaces) == 0:
|
|
543
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
544
|
+
if color_space_dictionaries is None:
|
|
545
|
+
if carefully:
|
|
546
|
+
colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
|
|
547
|
+
else:
|
|
548
|
+
colorspace_list = TList(("lab", "hsv"))
|
|
549
|
+
color_space_dictionaries = TList()
|
|
550
|
+
for i, c_space in enumerate(colorspace_list):
|
|
551
|
+
for i in np.arange(3):
|
|
552
|
+
channels = np.array((0, 0, 0), dtype=np.int8)
|
|
553
|
+
channels[i] = 1
|
|
554
|
+
csc_dict = TDict()
|
|
555
|
+
csc_dict[c_space] = channels
|
|
556
|
+
color_space_dictionaries.append(csc_dict)
|
|
557
|
+
if ref_image is not None:
|
|
558
|
+
ref_image = cv2.dilate(ref_image, cross_33)
|
|
559
|
+
else:
|
|
560
|
+
ref_image = np.ones(self.bgr.shape[:2], dtype=np.uint8)
|
|
561
|
+
if out_of_arenas is not None:
|
|
562
|
+
out_of_arenas_threshold = 0.01 * out_of_arenas.sum()
|
|
563
|
+
else:
|
|
564
|
+
out_of_arenas = np.zeros(self.bgr.shape[:2], dtype=np.uint8)
|
|
565
|
+
out_of_arenas_threshold = 1
|
|
566
|
+
self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 9), dtype=np.uint32)
|
|
567
|
+
cc_nb_idx, area_idx, out_of_arenas_idx, surf_in_common_idx, biosum_idx, backsum_idx = 3, 4, 5, 6, 7, 8
|
|
568
|
+
self.saved_images_list = TList()
|
|
569
|
+
self.converted_images_list = TList()
|
|
570
|
+
self.saved_color_space_list = list()
|
|
571
|
+
self.saved_csc_nb = 0
|
|
572
|
+
self.save_combination_thread = SaveCombinationThread(self)
|
|
573
|
+
|
|
574
|
+
# One channel processing
|
|
575
|
+
potentials = TDict()
|
|
576
|
+
for csc_dict in color_space_dictionaries:
|
|
577
|
+
self.image = combine_color_spaces(csc_dict, self.all_c_spaces, subtract_background)
|
|
578
|
+
# self.generate_color_space_combination(c_space_dict, subtract_background)
|
|
579
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
580
|
+
self.kmeans(kmeans_clust_nb, biomask, backmask)
|
|
581
|
+
else:
|
|
582
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
583
|
+
surf = np.sum(self.binary_image)
|
|
584
|
+
if surf < total_surfarea:
|
|
585
|
+
# nb, shapes = cv2.connectedComponents(oia.binary_image)
|
|
586
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
587
|
+
# outside_pixels = np.sum(oia.binary_image * out_of_arenas)
|
|
588
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
589
|
+
if outside_pixels < out_of_arenas_threshold:
|
|
590
|
+
if (nb > concomp_nb[0]) and (nb < concomp_nb[1]):
|
|
591
|
+
# in_common = np.sum(ref_image * oia.binary_image)
|
|
592
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
593
|
+
if in_common > 0:
|
|
594
|
+
nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
|
|
595
|
+
nb -= 1
|
|
596
|
+
if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
|
|
597
|
+
# oia.viewing()
|
|
598
|
+
c_space = list(csc_dict.keys())[0]
|
|
599
|
+
self.converted_images_list.append(self.image)
|
|
600
|
+
self.saved_images_list.append(self.binary_image)
|
|
601
|
+
self.saved_color_space_list.append(csc_dict)
|
|
602
|
+
self.combination_features[self.saved_csc_nb, :3] = csc_dict[c_space]
|
|
603
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
604
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
605
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
606
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
607
|
+
if biomask is not None:
|
|
608
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
609
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
610
|
+
if backmask is not None:
|
|
611
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
612
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
613
|
+
if np.isin(c_space, list(potentials.keys())):
|
|
614
|
+
potentials[c_space] += csc_dict[c_space]
|
|
615
|
+
else:
|
|
616
|
+
potentials[c_space] = csc_dict[c_space]
|
|
617
|
+
self.saved_csc_nb += 1
|
|
618
|
+
if len(potentials) > 0:
|
|
619
|
+
# All combination processing
|
|
620
|
+
|
|
621
|
+
# Add a combination of all selected channels :
|
|
622
|
+
self.saved_color_space_list.append(potentials)
|
|
623
|
+
# all_potential_combinations.append(potentials)
|
|
624
|
+
self.image = combine_color_spaces(potentials, self.all_c_spaces, subtract_background)
|
|
625
|
+
# self.generate_color_space_combination(potentials, subtract_background)
|
|
626
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
627
|
+
self.kmeans(kmeans_clust_nb, biomask, backmask)
|
|
628
|
+
else:
|
|
629
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
630
|
+
# self.thresholding()
|
|
631
|
+
surf = self.binary_image.sum()
|
|
632
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
633
|
+
nb -= 1
|
|
634
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
635
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
636
|
+
self.converted_images_list.append(self.image)
|
|
637
|
+
self.saved_images_list.append(self.binary_image)
|
|
638
|
+
self.saved_color_space_list.append(potentials)
|
|
639
|
+
self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
|
|
640
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
641
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
642
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
643
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
644
|
+
if biomask is not None:
|
|
645
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
646
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
647
|
+
if backmask is not None:
|
|
648
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
649
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
650
|
+
self.saved_csc_nb += 1
|
|
651
|
+
# current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
|
|
652
|
+
# "surf_in_common": in_common}
|
|
653
|
+
# combination_features = combination_features.append(current, ignore_index=True)
|
|
654
|
+
|
|
655
|
+
# All combination processing
|
|
656
|
+
# Try to remove color space one by one
|
|
657
|
+
i = 0
|
|
658
|
+
original_length = len(potentials)
|
|
659
|
+
while np.logical_and(len(potentials) > 1, i < original_length // 2):
|
|
660
|
+
color_space_to_remove = TList()
|
|
661
|
+
# The while loop until one col space remains or the removal of one implies a strong enough area change
|
|
662
|
+
previous_c_space = list(potentials.keys())[-1]
|
|
663
|
+
for c_space in potentials.keys():
|
|
664
|
+
try_potentials = potentials.copy()
|
|
665
|
+
try_potentials.pop(c_space)
|
|
666
|
+
if i > 0:
|
|
667
|
+
try_potentials.pop(previous_c_space)
|
|
668
|
+
self.image = combine_color_spaces(try_potentials, self.all_c_spaces, subtract_background)
|
|
669
|
+
# self.generate_color_space_combination(try_potentials, subtract_background)
|
|
670
|
+
if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
|
|
671
|
+
self.kmeans(kmeans_clust_nb, biomask, backmask)
|
|
672
|
+
else:
|
|
673
|
+
self.binary_image = otsu_thresholding(self.image)
|
|
674
|
+
# self.thresholding()
|
|
675
|
+
surf = np.sum(self.binary_image)
|
|
676
|
+
if surf < total_surfarea:
|
|
677
|
+
nb, shapes = cv2.connectedComponents(self.binary_image)
|
|
678
|
+
outside_pixels = np.sum(self.binary_image * out_of_arenas)
|
|
679
|
+
if outside_pixels < out_of_arenas_threshold:
|
|
680
|
+
if (nb > concomp_nb[0]) and (nb < concomp_nb[1]):
|
|
681
|
+
in_common = np.sum(ref_image * self.binary_image)
|
|
682
|
+
if in_common > 0:
|
|
683
|
+
nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
|
|
684
|
+
nb -= 1
|
|
685
|
+
if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
|
|
686
|
+
# If a color space remove fits in the requirements, we store its values
|
|
687
|
+
self.converted_images_list.append(self.image)
|
|
688
|
+
self.saved_images_list.append(self.binary_image)
|
|
689
|
+
self.saved_color_space_list.append(try_potentials)
|
|
690
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
691
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
692
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
693
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
694
|
+
if biomask is not None:
|
|
695
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
696
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
697
|
+
if backmask is not None:
|
|
698
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
699
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
700
|
+
self.saved_csc_nb += 1
|
|
701
|
+
# all_potential_combinations.append(try_potentials)
|
|
702
|
+
# current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
|
|
703
|
+
# "surf_in_common": in_common}
|
|
704
|
+
# combination_features = combination_features.append(current, ignore_index=True)
|
|
705
|
+
color_space_to_remove.append(c_space)
|
|
706
|
+
if i > 0:
|
|
707
|
+
color_space_to_remove.append(previous_c_space)
|
|
708
|
+
# If it does not (if it did not pass every "if" layers), we definitely remove that color space
|
|
709
|
+
previous_c_space = c_space
|
|
710
|
+
color_space_to_remove = np.unique(color_space_to_remove)
|
|
711
|
+
for remove_col_space in color_space_to_remove:
|
|
712
|
+
potentials.pop(remove_col_space)
|
|
713
|
+
i += 1
|
|
714
|
+
if np.logical_and(len(potentials) > 0, i > 1):
|
|
715
|
+
self.converted_images_list.append(self.image)
|
|
716
|
+
self.saved_images_list.append(self.binary_image)
|
|
717
|
+
self.saved_color_space_list.append(potentials)
|
|
718
|
+
self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
|
|
719
|
+
self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
|
|
720
|
+
self.combination_features[self.saved_csc_nb, area_idx] = surf
|
|
721
|
+
self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
|
|
722
|
+
self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
|
|
723
|
+
if biomask is not None:
|
|
724
|
+
self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
|
|
725
|
+
self.binary_image[biomask[0], biomask[1]])
|
|
726
|
+
if backmask is not None:
|
|
727
|
+
self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
|
|
728
|
+
(1 - self.binary_image)[backmask[0], backmask[1]])
|
|
729
|
+
self.saved_csc_nb += 1
|
|
730
|
+
# all_potential_combinations.append(potentials)
|
|
731
|
+
# current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
|
|
732
|
+
# "surf_in_common": in_common}
|
|
733
|
+
# combination_features = combination_features.append(current, ignore_index=True)
|
|
734
|
+
|
|
735
|
+
self.combination_features = self.combination_features[:self.saved_csc_nb, :]
|
|
736
|
+
# Among all potentials, select the best one, according to criterion decreasing in importance
|
|
737
|
+
# a = combination_features.sort_values(by=["surf_in_common"], ascending=False)
|
|
738
|
+
# self.channel_combination = all_potential_combinations[a[:1].index[0]]
|
|
739
|
+
cc_efficiency_order = np.argsort(self.combination_features[:, surf_in_common_idx])
|
|
740
|
+
|
|
741
|
+
# Save and return a dictionnary containing the selected color space combinations
|
|
742
|
+
# and their corresponding binary images
|
|
743
|
+
|
|
744
|
+
self.im_combinations = []
|
|
745
|
+
for saved_csc in cc_efficiency_order:
|
|
746
|
+
if len(self.saved_color_space_list[saved_csc]) > 0:
|
|
747
|
+
self.im_combinations.append({})
|
|
748
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
|
|
749
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
|
|
750
|
+
for k, v in self.saved_color_space_list[saved_csc].items():
|
|
751
|
+
self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
|
|
752
|
+
self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
|
|
753
|
+
self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = np.round(self.converted_images_list[
|
|
754
|
+
saved_csc]).astype(np.uint8)
|
|
755
|
+
self.saved_color_space_list = []
|
|
756
|
+
self.saved_images_list = None
|
|
757
|
+
self.converted_images_list = None
|
|
758
|
+
self.combination_features = None
|
|
759
|
+
|
|
760
|
+
"""
|
|
761
|
+
Thresholding is a very simple and fast segmentation method. Kmeans can be implemented in a function bellow
|
|
762
|
+
"""
|
|
763
|
+
def thresholding(self, luminosity_threshold=None, lighter_background=None):
|
|
764
|
+
if luminosity_threshold is not None:
|
|
765
|
+
binarymg = np.zeros(self.image.shape, dtype=np.uint8)
|
|
766
|
+
if lighter_background:
|
|
767
|
+
binarymg[self.image < luminosity_threshold] = 1
|
|
768
|
+
else:
|
|
769
|
+
binarymg[self.image > luminosity_threshold] = 1
|
|
770
|
+
else:
|
|
771
|
+
ret, binarymg = cv2.threshold(self.image, 0, 1, cv2.THRESH_OTSU)
|
|
772
|
+
#binarymg = binarymg - 1
|
|
773
|
+
# Make sure that blobs are 1 and background is 0
|
|
774
|
+
if np.sum(binarymg) > np.sum(1 - binarymg):
|
|
775
|
+
binarymg = 1 - binarymg
|
|
776
|
+
self.binary_image = binarymg
|
|
777
|
+
|
|
778
|
+
def kmeans(self, cluster_number, biomask=None, backmask=None, logical='None', bio_label=None, bio_label2=None):
|
|
779
|
+
image = self.image.reshape((-1, 1))
|
|
780
|
+
image = np.float32(image)
|
|
781
|
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
|
|
782
|
+
compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
|
|
783
|
+
kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
|
|
784
|
+
sum_per_label = np.zeros(cluster_number)
|
|
785
|
+
self.binary_image = np.zeros(self.image.shape[:2], np.uint8)
|
|
786
|
+
if self.previous_binary_image is not None:
|
|
787
|
+
binary_images = []
|
|
788
|
+
image_scores = np.zeros(cluster_number, np.uint64)
|
|
789
|
+
for i in range(cluster_number):
|
|
790
|
+
binary_image_i = np.zeros(self.image.shape[:2], np.uint8)
|
|
791
|
+
binary_image_i[np.nonzero(kmeans_image == i)] = 1
|
|
792
|
+
image_scores[i] = (binary_image_i * self.previous_binary_image).sum()
|
|
793
|
+
binary_images.append(binary_image_i)
|
|
794
|
+
self.binary_image[np.nonzero(kmeans_image == np.argmax(image_scores))] = 1
|
|
795
|
+
elif bio_label is not None:
|
|
796
|
+
self.binary_image[np.nonzero(kmeans_image == bio_label)] = 1
|
|
797
|
+
self.bio_label = bio_label
|
|
798
|
+
else:
|
|
799
|
+
if biomask is not None:
|
|
800
|
+
all_labels = kmeans_image[biomask[0], biomask[1]]
|
|
801
|
+
for i in range(cluster_number):
|
|
802
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
803
|
+
self.bio_label = np.nonzero(sum_per_label == np.max(sum_per_label))
|
|
804
|
+
elif backmask is not None:
|
|
805
|
+
all_labels = kmeans_image[backmask[0], backmask[1]]
|
|
806
|
+
for i in range(cluster_number):
|
|
807
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
808
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
809
|
+
else:
|
|
810
|
+
for i in range(cluster_number):
|
|
811
|
+
sum_per_label[i] = (kmeans_image == i).sum()
|
|
812
|
+
self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
813
|
+
self.binary_image[np.nonzero(kmeans_image == self.bio_label)] = 1
|
|
814
|
+
|
|
815
|
+
if logical != 'None':
|
|
816
|
+
image = self.image2.reshape((-1, 1))
|
|
817
|
+
image = np.float32(image)
|
|
818
|
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
|
|
819
|
+
compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10,
|
|
820
|
+
flags=cv2.KMEANS_RANDOM_CENTERS)
|
|
821
|
+
kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
|
|
822
|
+
sum_per_label = np.zeros(cluster_number)
|
|
823
|
+
self.binary_image2 = np.zeros(self.image.shape[:2], np.uint8)
|
|
824
|
+
if self.previous_binary_image is not None:
|
|
825
|
+
binary_images = []
|
|
826
|
+
image_scores = np.zeros(cluster_number, np.uint64)
|
|
827
|
+
for i in range(cluster_number):
|
|
828
|
+
binary_image_i = np.zeros(self.image.shape[:2], np.uint8)
|
|
829
|
+
binary_image_i[np.nonzero(kmeans_image == i)] = 1
|
|
830
|
+
image_scores[i] = (binary_image_i * self.previous_binary_image).sum()
|
|
831
|
+
binary_images.append(binary_image_i)
|
|
832
|
+
self.binary_image2[np.nonzero(kmeans_image == np.argmax(image_scores))] = 1
|
|
833
|
+
elif bio_label2 is not None:
|
|
834
|
+
self.binary_image2[np.nonzero(kmeans_image == bio_label2)] = 1
|
|
835
|
+
self.bio_label2 = bio_label2
|
|
836
|
+
else:
|
|
837
|
+
if biomask is not None:
|
|
838
|
+
all_labels = kmeans_image[biomask[0], biomask[1]]
|
|
839
|
+
for i in range(cluster_number):
|
|
840
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
841
|
+
self.bio_label2 = np.nonzero(sum_per_label == np.max(sum_per_label))
|
|
842
|
+
elif backmask is not None:
|
|
843
|
+
all_labels = kmeans_image[backmask[0], backmask[1]]
|
|
844
|
+
for i in range(cluster_number):
|
|
845
|
+
sum_per_label[i] = (all_labels == i).sum()
|
|
846
|
+
self.bio_label2 = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
847
|
+
else:
|
|
848
|
+
for i in range(cluster_number):
|
|
849
|
+
sum_per_label[i] = (kmeans_image == i).sum()
|
|
850
|
+
self.bio_label2 = np.nonzero(sum_per_label == np.min(sum_per_label))
|
|
851
|
+
self.binary_image2[np.nonzero(kmeans_image == self.bio_label2)] = 1
|
|
852
|
+
|
|
853
|
+
def binarize_k_means_product(self, grey_idx):
|
|
854
|
+
binarization = np.zeros_like(self.binary_image)
|
|
855
|
+
binarization[np.nonzero(self.binary_image == grey_idx)] = 1
|
|
856
|
+
self.binary_image = binarization
|
|
857
|
+
|
|
858
|
+
def grid_segmentation(self, lighter_background, side_length=8, step=2, int_variation_thresh=20, mask=None):
|
|
859
|
+
"""
|
|
860
|
+
Segment small squares of the images to detect local intensity valleys
|
|
861
|
+
This method segment the image locally using otsu thresholding on a rolling window
|
|
862
|
+
:param side_length: The size of the window to detect the blobs
|
|
863
|
+
:type side_length: uint8
|
|
864
|
+
:param step:
|
|
865
|
+
:type step: uint8
|
|
866
|
+
:return:
|
|
867
|
+
"""
|
|
868
|
+
if len(self.image.shape) == 3:
|
|
869
|
+
print("Image is not Grayscale")
|
|
870
|
+
if mask is None:
|
|
871
|
+
min_y = 0
|
|
872
|
+
min_x = 0
|
|
873
|
+
y_size = self.image.shape[0]
|
|
874
|
+
x_size = self.image.shape[1]
|
|
875
|
+
max_y = y_size + 1
|
|
876
|
+
max_x = x_size + 1
|
|
877
|
+
mask = np.ones_like(self.image)
|
|
878
|
+
else:
|
|
879
|
+
y, x = np.nonzero(mask)
|
|
880
|
+
min_y = np.min(y)
|
|
881
|
+
if (min_y - 20) >= 0:
|
|
882
|
+
min_y -= 20
|
|
883
|
+
else:
|
|
884
|
+
min_y = 0
|
|
885
|
+
max_y = np.max(y) + 1
|
|
886
|
+
if (max_y + 20) < mask.shape[0]:
|
|
887
|
+
max_y += 20
|
|
888
|
+
else:
|
|
889
|
+
max_y = mask.shape[0] - 1
|
|
890
|
+
min_x = np.min(x)
|
|
891
|
+
if (min_x - 20) >= 0:
|
|
892
|
+
min_x -= 20
|
|
893
|
+
else:
|
|
894
|
+
min_x = 0
|
|
895
|
+
max_x = np.max(x) + 1
|
|
896
|
+
if (max_x + 20) < mask.shape[1]:
|
|
897
|
+
max_x += 20
|
|
898
|
+
else:
|
|
899
|
+
max_x = mask.shape[1] - 1
|
|
900
|
+
y_size = max_y - min_y
|
|
901
|
+
x_size = max_x - min_x
|
|
902
|
+
grid_image = np.zeros((y_size, x_size), np.uint64)
|
|
903
|
+
homogeneities = np.zeros((y_size, x_size), np.uint64)
|
|
904
|
+
cropped_mask = mask[min_y:max_y, min_x:max_x]
|
|
905
|
+
cropped_image = self.image[min_y:max_y, min_x:max_x]
|
|
906
|
+
# will be more efficient if it only loops over a zoom on self.mask == 1
|
|
907
|
+
for to_add in np.arange(0, side_length, step):
|
|
908
|
+
y_windows = np.arange(0, y_size, side_length)
|
|
909
|
+
x_windows = np.arange(0, x_size, side_length)
|
|
910
|
+
y_windows += to_add
|
|
911
|
+
x_windows += to_add
|
|
912
|
+
for y_start in y_windows:
|
|
913
|
+
# y_start = 4
|
|
914
|
+
if y_start < self.image.shape[0]:
|
|
915
|
+
y_end = y_start + side_length
|
|
916
|
+
if y_end < self.image.shape[0]:
|
|
917
|
+
for x_start in x_windows:
|
|
918
|
+
if x_start < self.image.shape[1]:
|
|
919
|
+
x_end = x_start + side_length
|
|
920
|
+
if x_end < self.image.shape[1]:
|
|
921
|
+
if np.any(cropped_mask[y_start:y_end, x_start:x_end]):
|
|
922
|
+
potential_detection = cropped_image[y_start:y_end, x_start:x_end]
|
|
923
|
+
if np.any(potential_detection):
|
|
924
|
+
if np.ptp(potential_detection[np.nonzero(potential_detection)]) < int_variation_thresh:
|
|
925
|
+
homogeneities[y_start:y_end, x_start:x_end] += 1
|
|
926
|
+
|
|
927
|
+
threshold = get_otsu_threshold(potential_detection)
|
|
928
|
+
if lighter_background:
|
|
929
|
+
net_coord = np.nonzero(potential_detection < threshold)
|
|
930
|
+
else:
|
|
931
|
+
net_coord = np.nonzero(potential_detection > threshold)
|
|
932
|
+
grid_image[y_start + net_coord[0], x_start + net_coord[1]] += 1
|
|
933
|
+
|
|
934
|
+
self.binary_image = np.zeros(self.image.shape, np.uint8)
|
|
935
|
+
self.binary_image[min_y:max_y, min_x:max_x] = (grid_image >= (side_length // step)).astype(np.uint8)
|
|
936
|
+
self.binary_image[min_y:max_y, min_x:max_x][homogeneities >= (((side_length // step) // 2) + 1)] = 0
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
"""
|
|
940
|
+
III/ Use validated shapes to exclude from analysis the image parts that are far from them
|
|
941
|
+
i.e. detect projected shape boundaries over both axis and determine crop coordinates
|
|
942
|
+
"""
|
|
943
|
+
def get_crop_coordinates(self, are_zigzag=None):
|
|
944
|
+
logging.info("Project the image on the y axis to detect rows of arenas")
|
|
945
|
+
self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
|
|
946
|
+
logging.info("Project the image on the x axis to detect columns of arenas")
|
|
947
|
+
self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
|
|
948
|
+
logging.info("Get crop coordinates using the get_crop_coordinates method of OneImageAnalysis class")
|
|
949
|
+
row_number = len(np.nonzero(self.y_boundaries)[0]) // 2
|
|
950
|
+
col_number = len(np.nonzero(self.x_boundaries)[0]) // 2
|
|
951
|
+
if (x_max_sum / col_number) * 2 < (y_max_sum / row_number):
|
|
952
|
+
are_zigzag = "columns"
|
|
953
|
+
elif (x_max_sum / col_number) > (y_max_sum / row_number) * 2:
|
|
954
|
+
are_zigzag = "rows"
|
|
955
|
+
else:
|
|
956
|
+
are_zigzag = None
|
|
957
|
+
# here automatically determine if are zigzag
|
|
958
|
+
x_boundary_number = (self.x_boundaries == 1).sum()
|
|
959
|
+
if x_boundary_number > 1:
|
|
960
|
+
if x_boundary_number < 4:
|
|
961
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
|
|
962
|
+
else:
|
|
963
|
+
if are_zigzag == "columns":
|
|
964
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0][::2]))) // 2
|
|
965
|
+
else:
|
|
966
|
+
x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
|
|
967
|
+
cx_min = np.where(self.x_boundaries == - 1)[0][0] - x_interval.astype(int)
|
|
968
|
+
cx_max = np.where(self.x_boundaries == 1)[0][col_number - 1] + x_interval.astype(int)
|
|
969
|
+
if cx_min < 0: cx_min = 0
|
|
970
|
+
if cx_max > len(self.x_boundaries): cx_max = len(self.x_boundaries) - 1
|
|
971
|
+
else:
|
|
972
|
+
cx_min = 0
|
|
973
|
+
cx_max = len(self.x_boundaries) - 1
|
|
974
|
+
|
|
975
|
+
y_boundary_number = (self.y_boundaries == 1).sum()
|
|
976
|
+
if y_boundary_number > 1:
|
|
977
|
+
if y_boundary_number < 4:
|
|
978
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
|
|
979
|
+
else:
|
|
980
|
+
if are_zigzag == "rows":
|
|
981
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0][::2]))) // 2
|
|
982
|
+
else:
|
|
983
|
+
y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
|
|
984
|
+
cy_min = np.where(self.y_boundaries == - 1)[0][0] - y_interval.astype(int)
|
|
985
|
+
cy_max = np.where(self.y_boundaries == 1)[0][row_number - 1] + y_interval.astype(int)
|
|
986
|
+
if cy_min < 0: cy_min = 0
|
|
987
|
+
if cy_max > len(self.y_boundaries): cy_max = len(self.y_boundaries) - 1
|
|
988
|
+
else:
|
|
989
|
+
cy_min = 0
|
|
990
|
+
cy_max = len(self.y_boundaries) - 1
|
|
991
|
+
|
|
992
|
+
self.crop_coord = [cy_min, cy_max, cx_min, cx_max]
|
|
993
|
+
return are_zigzag
|
|
994
|
+
# plt.imshow(self.image)
|
|
995
|
+
#plt.scatter(cx_min,cy_min)
|
|
996
|
+
#plt.scatter(cx_max, cy_max)
|
|
997
|
+
|
|
998
|
+
def projection_to_get_peaks_boundaries(self, axis):
|
|
999
|
+
sums = np.sum(self.validated_shapes, axis)
|
|
1000
|
+
slopes = np.greater(sums, 0)
|
|
1001
|
+
slopes = np.append(0, np.diff(slopes))
|
|
1002
|
+
coord = np.nonzero(slopes)[0]
|
|
1003
|
+
for ci in np.arange(len(coord)):
|
|
1004
|
+
if ci % 2 == 0:
|
|
1005
|
+
slopes[coord[ci]] = - 1
|
|
1006
|
+
return slopes, sums.max()
|
|
1007
|
+
|
|
1008
|
+
def jackknife_cutting(self, changes):
|
|
1009
|
+
"""
|
|
1010
|
+
This function compare the mean distance between each 1 in a vector of 0.
|
|
1011
|
+
Since a few irregular intervals affect less the median that the mean,
|
|
1012
|
+
It try to remove each 1, one by one to see if it reduce enough the difference between mean and median.
|
|
1013
|
+
If the standard error of that difference is higher than 2,
|
|
1014
|
+
we remove each point whose removal decrease that difference by half of the median of these differences.
|
|
1015
|
+
i.e. differences between jackkniffed means and original median of the distance between each 1.
|
|
1016
|
+
"""
|
|
1017
|
+
indices = np.nonzero(changes)[0]
|
|
1018
|
+
indices_to_remove = np.zeros(len(indices), dtype=bool)
|
|
1019
|
+
# To test the impact of a removal, changes must contain at least four 1.
|
|
1020
|
+
if len(indices) > 3:
|
|
1021
|
+
jackknifed_mean = np.zeros(np.sum(changes == 1))
|
|
1022
|
+
for dot_i in np.arange(len(indices)):
|
|
1023
|
+
steep = changes == 1
|
|
1024
|
+
steep[indices[dot_i]] = False
|
|
1025
|
+
new_indices = np.where(steep == 1)[0]
|
|
1026
|
+
if dot_i != 0:
|
|
1027
|
+
new_indices[dot_i:] = indices[(dot_i + 1):] - (indices[dot_i] - indices[dot_i - 1])
|
|
1028
|
+
jackknifed_mean[dot_i] = np.mean(np.diff(new_indices))
|
|
1029
|
+
improving_cuts = np.absolute(jackknifed_mean - np.median(np.diff(indices)))
|
|
1030
|
+
if np.std(improving_cuts) > 2:
|
|
1031
|
+
improving_cuts = np.argwhere(improving_cuts < 0.5 * np.median(improving_cuts))
|
|
1032
|
+
indices_to_remove[improving_cuts] = 1
|
|
1033
|
+
return indices_to_remove
|
|
1034
|
+
|
|
1035
|
+
def automatically_crop(self, crop_coord):
|
|
1036
|
+
if not self.cropped:
|
|
1037
|
+
logging.info("Crop using the automatically_crop method of OneImageAnalysis class")
|
|
1038
|
+
self.cropped = True
|
|
1039
|
+
self.image = self.image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1040
|
+
self.bgr = deepcopy(self.bgr[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...])
|
|
1041
|
+
if len(self.all_c_spaces) > 0:
|
|
1042
|
+
self.all_c_spaces = get_color_spaces(self.bgr)
|
|
1043
|
+
if self.im_combinations is not None:
|
|
1044
|
+
for i in np.arange(len(self.im_combinations)):
|
|
1045
|
+
self.im_combinations[i]["binary_image"] = self.im_combinations[i]["binary_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1046
|
+
self.im_combinations[i]["converted_image"] = self.im_combinations[i]["converted_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1047
|
+
self.binary_image = self.binary_image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1048
|
+
if self.image2 is not None:
|
|
1049
|
+
self.image2 = self.image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1050
|
+
if self.binary_image2 is not None:
|
|
1051
|
+
self.binary_image2 = self.binary_image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1052
|
+
if self.subtract_background is not None:
|
|
1053
|
+
self.subtract_background = self.subtract_background[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1054
|
+
if self.subtract_background2 is not None:
|
|
1055
|
+
self.subtract_background2 = self.subtract_background2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
|
|
1056
|
+
self.validated_shapes = self.validated_shapes[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
|
|
1057
|
+
|
|
1058
|
+
self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
|
|
1059
|
+
self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
|
|
1060
|
+
|
|
1061
|
+
|