cellects 0.1.2__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +390 -330
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +16 -33
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +866 -454
  14. cellects/gui/required_output.py +104 -57
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +333 -155
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -109
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.6.dist-info/LICENSE +675 -0
  29. cellects-0.2.6.dist-info/METADATA +829 -0
  30. cellects-0.2.6.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.2.dist-info/METADATA +0 -132
  35. cellects-0.1.2.dist-info/RECORD +0 -44
  36. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
@@ -1,1061 +1,1082 @@
1
- #!/usr/bin/env python3
2
- """
3
- This script contains the OneImageAnalysis class
4
- OneImageAnalysis is a class containing many tools to analyze one image
5
-
6
- An image can be coded in different color spaces, such as RGB, HSV, etc. These color spaces code the color of each pixel as three numbers, ranging from 0 to 255. Our aim is to find a combination of these three numbers that provides a single intensity value for each pixel, and which maximizes the contrast between the organism and the background. To increase the flexibility of our algorithm, we use more than one color space to look for these combinations. In particular, we use the RGB, LAB, HSV, LUV, HLS and YUV color spaces. What we call a color space combination is a transformation combining several channels of one or more color spaces.
7
- To find the optimal color space combination, Cellects uses one image (which we will call “seed image”). The software selects by default the first image of the sequence as seed image, but the user can select a different image where the cells are more visible.
8
- Cellects has a fully automatic algorithm to select a good color space combination, which proceeds in four steps:
9
-
10
- First, it screens every channel of every color space. For instance, it converts the image into grayscale using the second channel of the color space HSV, and segments that grayscale image using Otsu thresholding. Once a binary image is computed from every channel, Cellects only keep the channels for which the number of connected components is lower than 10000, and the total area detected is higher than 100 pixels but lower than 0.75 times the total size of the image. By doing so, we eliminate the channels that produce the most noise.
11
-
12
- In the second step, Cellects uses all the channels that pass the first filter and tests all possible pairwise combinations. Cellects combines channels by summing their intensities and re-scaling the result between 0 and 255. It then performs the segmentation on these combinations, and filters them with the same criteria as in the first step.
13
-
14
- The third step uses the previously selected channels and combinations that produce the highest and lowest detected surface to make logical operations between them. It applies the AND operator between the two results having the highest surface, and the OR operator between the two results having the lowest surface. It thus generates another two candidate segmentations, which are added to the ones obtained in the previous steps.
15
-
16
- In the fourth step, Cellects works under the assumption that the image contains multiple similar arenas containing a collection of objects with similar size and shape, and keeps the segmentations whose standard error of the area is smaller than ten times the smallest area standard error across all segmentations. To account for cases in which the experimental setup induces segmentation errors in one particular direction, Cellects also keeps the segmentation with minimal width standard error across all segmentations, and the one with minimal height standard error across all segmentations. All retained segmentations are shown to the user, who can then select the best one.
17
-
18
- As an optional step, Cellects can refine the choice of color space combination, using the last image of the sequence instead of the seed image. In order to increase the diversity of combinations explored, this optional analysis is performed in a different way than for the seed image. Also, this refining can use information from the segmentation of the seed frame and from the geometry of the arenas to rank the quality of the segmentation emerging from each color space combination. To generate these combinations, Cellects follows four steps.
19
- The first step is identical to the first step of the previously described automatic algorithm (in section 1) and starts by screening every possible channel and color space.
20
-
21
- The second step aims to find combinations that consider many channels, rather than those with only one or two. To do that, it creates combinations that consist of the sum of all channels except one. It then filters these combinations in the same way as for the previous step. Then, all surviving combinations are retained, and also undergo the same process in which one more channel is excluded, and the process continues until reaching single-channel combinations. This process thus creates new combinations that include any number of channels.
22
-
23
- The third step filters these segmentations, keeping those that fulfill the following criteria: (1) The number of connected components is higher than the number of arenas and lower than 10000. (2) The detected area covers less than 99% of the image. (2) Less than 1% of the detected area falls outside the arenas. (4) Each connected component of the detected area covers less than 75% of the image.
24
-
25
- Finally, the fourth step ranks the remaining segmentations using the following criteria: If the user labeled any areas as “cell”, the ranking will reflect the amount of cell pixels in common between the segmentation and the user labels. If the user did not label any areas as cells but labeled areas as background, the ranking will reflect the number of background pixels in common. Otherwise, the ranking will reflect the number of pixels in common with the segmentation of the first image.
26
-
27
-
28
- """
29
-
30
- import logging
31
- import os
32
- from copy import deepcopy
33
- import numpy as np
34
- import cv2 # named opencv-python
35
- import multiprocessing.pool as mp
36
- from numba.typed import List as TList
37
- from numba.typed import Dict as TDict
38
- from cellects.image_analysis.morphological_operations import cross_33, Ellipse
39
- from cellects.image_analysis.image_segmentation import get_color_spaces, combine_color_spaces, apply_filter, otsu_thresholding, get_otsu_threshold
40
- from cellects.image_analysis.one_image_analysis_threads import SaveCombinationThread, ProcessFirstImage
41
- from cellects.utils.formulas import bracket_to_uint8_image_contrast
42
-
43
-
44
- class OneImageAnalysis:
45
- """
46
- This class takes a 3D matrix (2 space and 1 color [BGR] dimensions),
47
- Its methods allow image
48
- - conversion to any bgr/hsv/lab channels
49
- - croping
50
- - rotating
51
- - filtering using some of the mainly used techniques:
52
- - Gaussian, Median, Bilateral, Laplacian, Mexican hat
53
- - segmenting using thresholds or kmeans
54
- - shape selection according to horizontal size or shape ('circle' vs 'quadrilateral')
55
-
56
- ps: A viewing method displays the image before and after the most advanced modification made in instance
57
- """
58
- def __init__(self, image):
59
- self.image = image
60
- if len(self.image.shape) == 2:
61
- self.already_greyscale = True
62
- else:
63
- self.already_greyscale = False
64
- self.image2 = None
65
- self.binary_image2 = None
66
- self.drift_correction_already_adjusted: bool = False
67
- # Create empty variables to fill in the following functions
68
- self.binary_image = np.zeros(self.image.shape[:2], dtype=np.uint8)
69
- self.previous_binary_image = None
70
- self.validated_shapes = np.zeros(self.image.shape[:2], dtype=np.uint8)
71
- self.centroids = 0
72
- self.shape_number = 0
73
- self.concomp_stats = 0
74
- self.y_boundaries = None
75
- self.x_boundaries = None
76
- self.crop_coord = None
77
- self.cropped: bool = False
78
- self.subtract_background = None
79
- self.subtract_background2 = None
80
- self.im_combinations = None
81
- self.bgr = image
82
- self.colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
83
- self.spot_shapes = None
84
- self.all_c_spaces = TDict()
85
- self.hsv = None
86
- self.hls = None
87
- self.lab = None
88
- self.luv = None
89
- self.yuv = None
90
- """
91
- I/ Image modification for segmentation through thresholding
92
- This part contain methods to convert, visualize, filter and threshold one image.
93
- """
94
- def convert_and_segment(self, c_space_dict, color_number=2, biomask=None,
95
- backmask=None, subtract_background=None, subtract_background2=None, grid_segmentation=False,
96
- lighter_background=None, side_length=20, step=5, int_variation_thresh=None, mask=None,
97
- filter_spec=None):
98
-
99
- if self.already_greyscale:
100
- self.segmentation(logical='None', color_number=2, biomask=biomask, backmask=backmask,
101
- grid_segmentation=grid_segmentation, lighter_background=lighter_background,
102
- side_length=side_length, step=step, int_variation_thresh=int_variation_thresh, mask=mask,
103
- filter_spec=filter_spec)
104
- else:
105
- if len(self.all_c_spaces) == 0:
106
- self.all_c_spaces = get_color_spaces(self.bgr)
107
- # if c_space_dict['logical'] != 'None':
108
- first_dict = TDict()
109
- second_dict = TDict()
110
- for k, v in c_space_dict.items():
111
- if k != 'logical' and v.sum() > 0:
112
- if k[-1] != '2':
113
- first_dict[k] = v
114
- else:
115
- second_dict[k[:-1]] = v
116
- logging.info(first_dict)
117
- self.image = combine_color_spaces(first_dict, self.all_c_spaces, subtract_background)
118
- if len(second_dict) > 0:
119
- self.image2 = combine_color_spaces(second_dict, self.all_c_spaces, subtract_background2)
120
- self.segmentation(logical=c_space_dict['logical'], color_number=color_number, biomask=biomask,
121
- backmask=backmask, grid_segmentation=grid_segmentation,
122
- lighter_background=lighter_background, side_length=side_length, step=step,
123
- int_variation_thresh=int_variation_thresh, mask=mask, filter_spec=filter_spec)
124
-
125
- else:
126
-
127
- self.segmentation(logical='None', color_number=color_number, biomask=biomask,
128
- backmask=backmask, grid_segmentation=grid_segmentation,
129
- lighter_background=lighter_background, side_length=side_length, step=step,
130
- int_variation_thresh=int_variation_thresh, mask=mask, filter_spec=filter_spec)
131
-
132
-
133
- def segmentation(self, logical='None', color_number=2, biomask=None, backmask=None, bio_label=None, bio_label2=None, grid_segmentation=False, lighter_background=None, side_length=20, step=5, int_variation_thresh=None, mask=None, filter_spec=None):
134
- if filter_spec is not None and filter_spec["filter1_type"] != "":
135
- self.image = apply_filter(self.image, filter_spec["filter1_type"], filter_spec["filter1_param"])
136
- if (color_number > 2):
137
- self.kmeans(color_number, biomask, backmask, logical, bio_label, bio_label2)
138
- elif grid_segmentation:
139
- if lighter_background is None:
140
- self.binary_image = otsu_thresholding(self.image)
141
- lighter_background = self.binary_image.sum() > (self.binary_image.size / 2)
142
- if int_variation_thresh is None:
143
- int_variation_thresh =100 - (np.ptp(self.image) * 90 / 255)
144
- self.grid_segmentation(lighter_background, side_length, step, int_variation_thresh, mask)
145
- else:
146
- # logging.info("Segment the image using Otsu thresholding")
147
- self.binary_image = otsu_thresholding(self.image)
148
- if self.previous_binary_image is not None:
149
- if (self.binary_image * (1 - self.previous_binary_image)).sum() > (self.binary_image * self.previous_binary_image).sum():
150
- # Ones of the binary image have more in common with the background than with the specimen
151
- self.binary_image = 1 - self.binary_image
152
- # self.binary_image = self.correct_with_previous_binary_image(self.binary_image.copy())
153
-
154
- if logical != 'None':
155
- # logging.info("Segment the image using Otsu thresholding")
156
- if filter_spec is not None and filter_spec["filter2_type"] != "":
157
- self.image2 = apply_filter(self.image2, filter_spec["filter2_type"], filter_spec["filter2_param"])
158
- self.binary_image2 = otsu_thresholding(self.image2)
159
- if self.previous_binary_image is not None:
160
- if (self.binary_image2 * (1 - self.previous_binary_image)).sum() > (
161
- self.binary_image2 * self.previous_binary_image).sum():
162
- self.binary_image2 = 1 - self.binary_image2
163
- # self.binary_image2 = self.correct_with_previous_binary_image(self.binary_image2.copy())
164
-
165
- if logical != 'None':
166
- if logical == 'Or':
167
- self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
168
- elif logical == 'And':
169
- self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
170
- elif logical == 'Xor':
171
- self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
172
- self.binary_image = self.binary_image.astype(np.uint8)
173
-
174
-
175
- def correct_with_previous_binary_image(self, binary_image):
176
- # If binary image is more than twenty times bigger or smaller than the previous binary image:
177
- # otsu thresholding failed, we use a threshold of 127 instead
178
- if binary_image.sum() > self.previous_binary_image.sum() * 20 or binary_image.sum() < self.previous_binary_image.sum() * 0.05:
179
- binary_adaptive = cv2.adaptiveThreshold(bracket_to_uint8_image_contrast(self.image), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
180
- # from skimage import filters
181
- # threshold_value = filters.threshold_li(self.image)
182
- # binary_image = self.image >= threshold_value
183
- binary_image = self.image >= 127
184
- # And again, make sure than these pixels are shared with the previous binary image
185
- if (binary_image * (1 - self.previous_binary_image)).sum() > (binary_image * self.previous_binary_image).sum():
186
- binary_image = 1 - binary_image
187
- return binary_image.astype(np.uint8)
188
-
189
-
190
- def get_largest_shape(self):
191
- shape_number, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
192
- sorted_area = np.sort(stats[1:, 4])
193
- self.validated_shapes = np.zeros(self.binary_image.shape, dtype=np.uint8)
194
- self.validated_shapes[np.nonzero(shapes == np.nonzero(stats[:, 4] == sorted_area[-1])[0])] = 1
195
-
196
- def generate_subtract_background(self, c_space_dict):
197
- logging.info("Generate background using the generate_subtract_background method of OneImageAnalysis class")
198
- if len(self.all_c_spaces) == 0 and not self.already_greyscale:
199
- self.all_c_spaces = get_color_spaces(self.bgr)
200
- self.convert_and_segment(c_space_dict, grid_segmentation=False)
201
- # self.image = generate_color_space_combination(c_space_dict, self.all_c_spaces)
202
- disk_size = int(np.floor(np.sqrt(np.min(self.bgr.shape[:2])) / 2))
203
- disk = np.uint8(Ellipse((disk_size, disk_size)).create())
204
- self.subtract_background = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, disk)
205
- if self.image2 is not None:
206
- self.subtract_background2 = cv2.morphologyEx(self.image2, cv2.MORPH_OPEN, disk)
207
-
208
- def check_if_image_border_attest_drift_correction(self):
209
- t = np.all(self.binary_image[0, :])
210
- b = np.all(self.binary_image[-1, :])
211
- l = np.all(self.binary_image[:, 0])
212
- r = np.all(self.binary_image[:, -1])
213
- if (t and b) or (t and r) or (t and l) or (t and r) or (b and l) or (b and r) or (l and r):
214
- cc_nb, shapes = cv2.connectedComponents(self.binary_image)
215
- if cc_nb == 2:
216
- return True
217
- else:
218
- return False
219
- else:
220
- return False
221
-
222
- def adjust_to_drift_correction(self, logical):
223
- if not self.drift_correction_already_adjusted:
224
- self.drift_correction_already_adjusted = True
225
-
226
- mask = cv2.dilate(self.binary_image, kernel=cross_33)
227
- mask -= self.binary_image
228
- mask = np.nonzero(mask)
229
-
230
- drift_correction = np.mean(self.image[mask[0], mask[1]])
231
- self.image[np.nonzero(self.binary_image)] = drift_correction
232
- threshold = get_otsu_threshold(self.image)
233
- binary = (self.image > threshold)
234
- # while np.any(binary * self.binary_image) and threshold > 1: #binary.sum() > self.binary_image.sum()
235
- # threshold -= 1
236
- # binary1 = (self.image > threshold)
237
- # binary2 = np.logical_not(binary1)
238
- # if binary1.sum() < binary2.sum():
239
- # binary = binary1
240
- # else:
241
- # binary = binary2
242
- self.binary_image = binary.astype(np.uint8)
243
-
244
- if self.image2 is not None:
245
- drift_correction2 = np.mean(self.image2[mask[0], mask[1]])
246
- self.image2[np.nonzero(self.binary_image)] = drift_correction2
247
- threshold = get_otsu_threshold(self.image2)
248
- binary1 = (self.image2 > threshold)
249
- binary2 = np.logical_not(binary1)
250
- if binary1.sum() < binary2.sum():
251
- binary = binary1
252
- else:
253
- binary = binary2
254
- while np.any(binary * self.binary_image2) and threshold > 1: # binary.sum() > self.binary_image.sum()
255
- threshold -= 1
256
- binary1 = (self.image2 > threshold)
257
- binary2 = np.logical_not(binary1)
258
- if binary1.sum() < binary2.sum():
259
- binary = binary1
260
- else:
261
- binary = binary2
262
- self.binary_image2 = binary.astype(np.uint8)
263
- if logical == 'Or':
264
- self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
265
- elif logical == 'And':
266
- self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
267
- elif logical == 'Xor':
268
- self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
269
- self.binary_image = self.binary_image.astype(np.uint8)
270
-
271
- def set_spot_shapes_and_size_confint(self, spot_shape):
272
- self.spot_size_confints = np.arange(0.75, 0.00, - 0.05)# np.concatenate((np.arange(0.75, 0.00, - 0.05), np.arange(0.05, 0.00, -0.005)))#
273
- if spot_shape is None:
274
- self.spot_shapes = np.tile(["circle", "rectangle"], len(self.spot_size_confints))
275
- self.spot_size_confints = np.repeat(self.spot_size_confints, 2)
276
- else:
277
- self.spot_shapes = np.repeat(spot_shape, len(self.spot_size_confints))
278
-
279
- def find_first_im_csc(self, sample_number=None, several_blob_per_arena=True, spot_shape=None, spot_size=None, kmeans_clust_nb=None, biomask=None, backmask=None, color_space_dictionaries=None, carefully=False):
280
- logging.info(f"Prepare color space lists, dictionaries and matrices")
281
- if len(self.all_c_spaces) == 0:
282
- self.all_c_spaces = get_color_spaces(self.bgr)
283
- if color_space_dictionaries is None:
284
- if carefully:
285
- colorspace_list = ["bgr", "lab", "hsv", "luv", "hls", "yuv"]
286
- else:
287
- colorspace_list = ["lab", "hsv"]
288
- color_space_dictionaries = TList()
289
- for i, c_space in enumerate(colorspace_list):
290
- for i in np.arange(3):
291
- channels = np.array((0, 0, 0), dtype=np.int8)
292
- channels[i] = 1
293
- csc_dict = TDict()
294
- csc_dict[c_space] = channels
295
- color_space_dictionaries.append(csc_dict)
296
-
297
- # if not several_blob_per_arena:
298
- self.set_spot_shapes_and_size_confint(spot_shape)
299
-
300
- self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 11), dtype=np.uint32)
301
- # ["c1", "c2", "c3", "unaltered_cc_nb", "concomp_nb", "total_area", "width_std", "height_std", "centrodist_std", "biosum", "backsum"]
302
- unaltered_cc_nb, cc_nb, area, width_std, height_std, area_std, biosum, backsum = 3, 4, 5, 6, 7, 8, 9, 10
303
- self.saved_images_list = TList()
304
- self.converted_images_list = TList()
305
- self.saved_color_space_list = list()
306
- self.saved_csc_nb = 0
307
- self.save_combination_thread = SaveCombinationThread(self)
308
- get_one_channel_result = True
309
- combine_channels = False
310
-
311
- for csc_dict in color_space_dictionaries:
312
- logging.info(f"Try detection with each color space channel, one by one. Currently analyzing {csc_dict}")
313
- list_args = [self, get_one_channel_result, combine_channels, csc_dict, several_blob_per_arena,
314
- sample_number, spot_size, kmeans_clust_nb, biomask, backmask, None]
315
- ProcessFirstImage(list_args)
316
- # logging.info(csc_dict)
317
-
318
- if sample_number is not None and carefully:
319
- # tic = default_timer()
320
- # Try to add csc together
321
- # possibilities = np.arange(len(self.saved_color_space_list))
322
- possibilities = []
323
- if self.saved_csc_nb > 6:
324
- different_color_spaces = np.unique(self.saved_color_space_list)
325
- for color_space in different_color_spaces:
326
- csc_idx = np.nonzero(np.isin(self.saved_color_space_list, color_space))[0]
327
- possibilities.append(csc_idx[0] + np.argmin(self.combination_features[csc_idx, area_std]))
328
- if len(possibilities) < 6:
329
- remaining_possibilities = np.arange(len(self.saved_color_space_list))
330
- remaining_possibilities = remaining_possibilities[np.logical_not(np.isin(remaining_possibilities, possibilities))]
331
- while len(possibilities) < 6:
332
- new_possibility = np.argmin(self.combination_features[remaining_possibilities, area_std])
333
- possibilities.append(new_possibility)
334
- remaining_possibilities = remaining_possibilities[remaining_possibilities != new_possibility]
335
-
336
-
337
- pool = mp.ThreadPool(processes=os.cpu_count() - 1)
338
- get_one_channel_result = False
339
- combine_channels = True
340
- list_args = [[self, get_one_channel_result, combine_channels, i, several_blob_per_arena, sample_number,
341
- spot_size, kmeans_clust_nb, biomask, backmask, possibilities] for i in possibilities]
342
- for process_i in pool.imap_unordered(ProcessFirstImage, list_args):
343
- pass
344
-
345
- # Get the most and the least covered images and the 2 best biomask and backmask scores
346
- # To try combinations of those
347
- if self.saved_csc_nb > 1:
348
- coverage = np.argsort(self.combination_features[:self.saved_csc_nb, area])
349
- most1 = coverage[-1]; most2 = coverage[-2]
350
- least1 = coverage[0]; least2 = coverage[1]
351
- if biomask is not None:
352
- bio_sort = np.argsort(self.combination_features[:self.saved_csc_nb, biosum])
353
- bio1 = bio_sort[-1]; bio2 = bio_sort[-2]
354
- if backmask is not None:
355
- back_sort = np.argsort(self.combination_features[:self.saved_csc_nb, backsum])
356
- back1 = back_sort[-1]; back2 = back_sort[-2]
357
-
358
- # Try a logical And between the most covered images
359
- # Should only need one instanciation
360
- process_i = ProcessFirstImage(
361
- [self, False, False, None, several_blob_per_arena, sample_number, spot_size, kmeans_clust_nb, biomask, backmask, None])
362
- process_i.binary_image = np.logical_and(self.saved_images_list[most1], self.saved_images_list[most2]).astype(np.uint8)
363
- process_i.image = self.converted_images_list[most1]
364
- process_i.process_binary_image()
365
- process_i.csc_dict = {list(self.saved_color_space_list[most1].keys())[0]: self.combination_features[most1, :3],
366
- "logical": "And",
367
- list(self.saved_color_space_list[most2].keys())[0] + "2": self.combination_features[most2, :3]}
368
- process_i.unaltered_concomp_nb = np.min(self.combination_features[(most1, most2), unaltered_cc_nb])
369
- process_i.total_area = process_i.binary_image.sum()
370
- self.save_combination_features(process_i)
371
- process_i.image = self.converted_images_list[least1]
372
- process_i.binary_image = np.logical_or(self.saved_images_list[least1], self.saved_images_list[least2]).astype(np.uint8)
373
- process_i.process_binary_image()
374
- process_i.csc_dict = {list(self.saved_color_space_list[least1].keys())[0]: self.combination_features[least1, :3],
375
- "logical": "Or",
376
- list(self.saved_color_space_list[least2].keys())[0] + "2": self.combination_features[least2, :3]}
377
- process_i.unaltered_concomp_nb = np.max(self.combination_features[(least1, least2), unaltered_cc_nb])
378
- process_i.total_area = process_i.binary_image.sum()
379
- self.save_combination_features(process_i)
380
-
381
- # self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask, backmask)
382
-
383
- # If most images are very low in biosum or backsum, try to mix them together to improve that score
384
- # Do a logical And between the two best biomasks
385
- if biomask is not None:
386
- if not np.all(np.isin((bio1, bio2), (most1, most2))):
387
- process_i.image = self.converted_images_list[bio1]
388
- process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[bio2]).astype(
389
- np.uint8)
390
- process_i.process_binary_image()
391
- process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
392
- "logical": "And",
393
- list(self.saved_color_space_list[bio2].keys())[0] + "2": self.combination_features[bio2,:3]}
394
- process_i.unaltered_concomp_nb = np.min(self.combination_features[(bio1, bio2), unaltered_cc_nb])
395
- process_i.total_area = process_i.binary_image.sum()
396
-
397
- self.save_combination_features(process_i)
398
-
399
- # Do a logical And between the two best backmask
400
- if backmask is not None:
401
-
402
- if not np.all(np.isin((back1, back2), (most1, most2))):
403
- process_i.image = self.converted_images_list[back1]
404
- process_i.binary_image = np.logical_and(self.saved_images_list[back1], self.saved_images_list[back2]).astype(
405
- np.uint8)
406
- process_i.process_binary_image()
407
- process_i.csc_dict = {list(self.saved_color_space_list[back1].keys())[0]: self.combination_features[back1, :3],
408
- "logical": "And",
409
- list(self.saved_color_space_list[back2].keys())[0] + "2": self.combination_features[back2,:3]}
410
- process_i.unaltered_concomp_nb = np.min(self.combination_features[(back1, back2), unaltered_cc_nb])
411
- process_i.total_area = process_i.binary_image.sum()
412
- self.save_combination_features(process_i)
413
- # Do a logical Or between the best biomask and the best backmask
414
- if biomask is not None and backmask is not None:
415
- if not np.all(np.isin((bio1, back1), (least1, least2))):
416
- process_i.image = self.converted_images_list[bio1]
417
- process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[back1]).astype(
418
- np.uint8)
419
- process_i.process_binary_image()
420
- process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
421
- "logical": "Or",
422
- list(self.saved_color_space_list[back1].keys())[0] + "2": self.combination_features[back1, :3]}
423
- process_i.unaltered_concomp_nb = np.max(self.combination_features[(bio1, back1), unaltered_cc_nb])
424
- # self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask,
425
- # backmask)
426
- process_i.total_area = self.binary_image.sum()
427
- self.save_combination_features(process_i)
428
-
429
- if self.save_combination_thread.is_alive():
430
- self.save_combination_thread.join()
431
- self.combination_features = self.combination_features[:self.saved_csc_nb, :]
432
- # Only keep the row that filled conditions
433
- # Save all combinations if they fulfill the following conditions:
434
- # - Their conncomp number is lower than 3 times the smaller conncomp number.
435
- # - OR The minimal area variations
436
- # - OR The minimal width variations
437
- # - OR The minimal height variations
438
- # - AND/OR their segmentation fits with biomask and backmask
439
- width_std_fit = self.combination_features[:, width_std] == np.min(self.combination_features[:, width_std])
440
- height_std_fit = self.combination_features[:, height_std] == np.min(self.combination_features[:, height_std])
441
- area_std_fit = self.combination_features[:, area_std] < np.min(self.combination_features[:, area_std]) * 10
442
- fit = np.logical_or(np.logical_or(width_std_fit, height_std_fit), area_std_fit)
443
- biomask_fit = np.ones(self.saved_csc_nb, dtype=bool)
444
- backmask_fit = np.ones(self.saved_csc_nb, dtype=bool)
445
- if biomask is not None or backmask is not None:
446
- if biomask is not None:
447
- biomask_fit = self.combination_features[:, biosum] > 0.9 * len(biomask[0])
448
- if backmask is not None:
449
- backmask_fit = self.combination_features[:, backsum] > 0.9 * len(backmask[0])
450
- # First test a logical OR between the precedent options and the mask fits.
451
- fit = np.logical_or(fit, np.logical_and(biomask_fit, backmask_fit))
452
- # If this is not stringent enough, use a logical AND and increase progressively the proportion of pixels that
453
- # must match the biomask and the backmask
454
- if np.sum(fit) > 5:
455
- to_add = 0
456
- while np.sum(fit) > 5 and to_add <= 0.25:
457
- if biomask is not None:
458
- biomask_fit = self.combination_features[:, biosum] > (0.75 + to_add) * len(biomask[0])
459
- if backmask is not None:
460
- backmask_fit = self.combination_features[:, backsum] > (0.75 + to_add) * len(backmask[0])
461
- test_fit = np.logical_and(fit, np.logical_and(biomask_fit, backmask_fit))
462
- if np.sum(test_fit) != 0:
463
- fit = test_fit
464
- to_add += 0.05
465
- else:
466
- self.combination_features = self.combination_features[:self.saved_csc_nb, :]
467
- fit = np.array([True])
468
- # If saved_csc_nb is too low, try bool operators to mix them together to fill holes for instance
469
- # Order the table according to the number of shapes that have been removed by filters
470
- # cc_efficiency_order = np.argsort(self.combination_features[:, unaltered_cc_nb] - self.combination_features[:, cc_nb])
471
- cc_efficiency_order = np.argsort(self.combination_features[:, area_std])
472
- # Save and return a dictionnary containing the selected color space combinations
473
- # and their corresponding binary images
474
-
475
- # first_im_combinations = [i for i in np.arange(fit.sum())]
476
- self.im_combinations = []
477
- for saved_csc in cc_efficiency_order:
478
- if fit[saved_csc]:
479
- self.im_combinations.append({})
480
- # self.im_combinations.append({})
481
- # self.im_combinations[len(self.im_combinations) - 1]["csc"] = self.saved_color_space_list[saved_csc]
482
- self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
483
- self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
484
- for k, v in self.saved_color_space_list[saved_csc].items():
485
- self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
486
- # self.im_combinations[len(self.im_combinations) - 1]["csc"] = {list(self.saved_color_space_list[saved_csc])[0]: self.combination_features[saved_csc, :3]}
487
-
488
- if backmask is not None:
489
- shape_number, shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
490
- if np.any(shapes[backmask]):
491
- shapes[np.isin(shapes, np.unique(shapes[backmask]))] = 0
492
- self.saved_images_list[saved_csc] = (shapes > 0).astype(np.uint8)
493
- if biomask is not None:
494
- self.saved_images_list[saved_csc][biomask] = 1
495
- if backmask is not None or biomask is not None:
496
- self.combination_features[saved_csc, cc_nb], shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
497
- self.combination_features[saved_csc, cc_nb] -= 1
498
- self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
499
- self.im_combinations[len(self.im_combinations) - 1]["shape_number"] = self.combination_features[saved_csc, cc_nb]
500
- self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = self.converted_images_list[saved_csc]
501
-
502
- # logging.info(default_timer()-tic)
503
- self.saved_color_space_list = []
504
- self.saved_images_list = None
505
- self.converted_images_list = None
506
- self.combination_features = None
507
-
508
- # def save_combination_features(self, process_i):
509
- # if self.save_combination_thread.is_alive():
510
- # self.save_combination_thread.join()
511
- # self.save_combination_thread = SaveCombinationThread(self)
512
- # self.save_combination_thread.process_i = process_i
513
- # self.save_combination_thread.start()
514
-
515
- def save_combination_features(self, process_i):
516
- self.saved_images_list.append(process_i.validated_shapes)
517
- self.converted_images_list.append(np.round(process_i.image).astype(np.uint8))
518
- self.saved_color_space_list.append(process_i.csc_dict)
519
- self.combination_features[self.saved_csc_nb, :3] = list(process_i.csc_dict.values())[0]
520
- self.combination_features[
521
- self.saved_csc_nb, 3] = process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
522
- self.combination_features[self.saved_csc_nb, 4] = process_i.shape_number # cc_nb
523
- self.combination_features[self.saved_csc_nb, 5] = process_i.total_area # area
524
- self.combination_features[self.saved_csc_nb, 6] = np.std(process_i.stats[1:, 2]) # width_std
525
- self.combination_features[self.saved_csc_nb, 7] = np.std(process_i.stats[1:, 3]) # height_std
526
- self.combination_features[self.saved_csc_nb, 8] = np.std(process_i.stats[1:, 4]) # area_std
527
- if process_i.biomask is not None:
528
- self.combination_features[self.saved_csc_nb, 9] = np.sum(
529
- process_i.validated_shapes[process_i.biomask[0], process_i.biomask[1]])
530
- if process_i.backmask is not None:
531
- self.combination_features[self.saved_csc_nb, 10] = np.sum(
532
- (1 - process_i.validated_shapes)[process_i.backmask[0], process_i.backmask[1]])
533
- self.saved_csc_nb += 1
534
-
535
- def update_current_images(self, current_combination_id):
536
- self.image = self.im_combinations[current_combination_id]["converted_image"]
537
- self.validated_shapes = self.im_combinations[current_combination_id]["binary_image"]
538
-
539
- def find_last_im_csc(self, concomp_nb, total_surfarea, max_shape_size, out_of_arenas=None, ref_image=None,
540
- subtract_background=None, kmeans_clust_nb=None, biomask=None, backmask=None,
541
- color_space_dictionaries=None, carefully=False):
542
- if len(self.all_c_spaces) == 0:
543
- self.all_c_spaces = get_color_spaces(self.bgr)
544
- if color_space_dictionaries is None:
545
- if carefully:
546
- colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
547
- else:
548
- colorspace_list = TList(("lab", "hsv"))
549
- color_space_dictionaries = TList()
550
- for i, c_space in enumerate(colorspace_list):
551
- for i in np.arange(3):
552
- channels = np.array((0, 0, 0), dtype=np.int8)
553
- channels[i] = 1
554
- csc_dict = TDict()
555
- csc_dict[c_space] = channels
556
- color_space_dictionaries.append(csc_dict)
557
- if ref_image is not None:
558
- ref_image = cv2.dilate(ref_image, cross_33)
559
- else:
560
- ref_image = np.ones(self.bgr.shape[:2], dtype=np.uint8)
561
- if out_of_arenas is not None:
562
- out_of_arenas_threshold = 0.01 * out_of_arenas.sum()
563
- else:
564
- out_of_arenas = np.zeros(self.bgr.shape[:2], dtype=np.uint8)
565
- out_of_arenas_threshold = 1
566
- self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 9), dtype=np.uint32)
567
- cc_nb_idx, area_idx, out_of_arenas_idx, surf_in_common_idx, biosum_idx, backsum_idx = 3, 4, 5, 6, 7, 8
568
- self.saved_images_list = TList()
569
- self.converted_images_list = TList()
570
- self.saved_color_space_list = list()
571
- self.saved_csc_nb = 0
572
- self.save_combination_thread = SaveCombinationThread(self)
573
-
574
- # One channel processing
575
- potentials = TDict()
576
- for csc_dict in color_space_dictionaries:
577
- self.image = combine_color_spaces(csc_dict, self.all_c_spaces, subtract_background)
578
- # self.generate_color_space_combination(c_space_dict, subtract_background)
579
- if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
580
- self.kmeans(kmeans_clust_nb, biomask, backmask)
581
- else:
582
- self.binary_image = otsu_thresholding(self.image)
583
- surf = np.sum(self.binary_image)
584
- if surf < total_surfarea:
585
- # nb, shapes = cv2.connectedComponents(oia.binary_image)
586
- nb, shapes = cv2.connectedComponents(self.binary_image)
587
- # outside_pixels = np.sum(oia.binary_image * out_of_arenas)
588
- outside_pixels = np.sum(self.binary_image * out_of_arenas)
589
- if outside_pixels < out_of_arenas_threshold:
590
- if (nb > concomp_nb[0]) and (nb < concomp_nb[1]):
591
- # in_common = np.sum(ref_image * oia.binary_image)
592
- in_common = np.sum(ref_image * self.binary_image)
593
- if in_common > 0:
594
- nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
595
- nb -= 1
596
- if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
597
- # oia.viewing()
598
- c_space = list(csc_dict.keys())[0]
599
- self.converted_images_list.append(self.image)
600
- self.saved_images_list.append(self.binary_image)
601
- self.saved_color_space_list.append(csc_dict)
602
- self.combination_features[self.saved_csc_nb, :3] = csc_dict[c_space]
603
- self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
604
- self.combination_features[self.saved_csc_nb, area_idx] = surf
605
- self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
606
- self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
607
- if biomask is not None:
608
- self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
609
- self.binary_image[biomask[0], biomask[1]])
610
- if backmask is not None:
611
- self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
612
- (1 - self.binary_image)[backmask[0], backmask[1]])
613
- if np.isin(c_space, list(potentials.keys())):
614
- potentials[c_space] += csc_dict[c_space]
615
- else:
616
- potentials[c_space] = csc_dict[c_space]
617
- self.saved_csc_nb += 1
618
- if len(potentials) > 0:
619
- # All combination processing
620
-
621
- # Add a combination of all selected channels :
622
- self.saved_color_space_list.append(potentials)
623
- # all_potential_combinations.append(potentials)
624
- self.image = combine_color_spaces(potentials, self.all_c_spaces, subtract_background)
625
- # self.generate_color_space_combination(potentials, subtract_background)
626
- if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
627
- self.kmeans(kmeans_clust_nb, biomask, backmask)
628
- else:
629
- self.binary_image = otsu_thresholding(self.image)
630
- # self.thresholding()
631
- surf = self.binary_image.sum()
632
- nb, shapes = cv2.connectedComponents(self.binary_image)
633
- nb -= 1
634
- outside_pixels = np.sum(self.binary_image * out_of_arenas)
635
- in_common = np.sum(ref_image * self.binary_image)
636
- self.converted_images_list.append(self.image)
637
- self.saved_images_list.append(self.binary_image)
638
- self.saved_color_space_list.append(potentials)
639
- self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
640
- self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
641
- self.combination_features[self.saved_csc_nb, area_idx] = surf
642
- self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
643
- self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
644
- if biomask is not None:
645
- self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
646
- self.binary_image[biomask[0], biomask[1]])
647
- if backmask is not None:
648
- self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
649
- (1 - self.binary_image)[backmask[0], backmask[1]])
650
- self.saved_csc_nb += 1
651
- # current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
652
- # "surf_in_common": in_common}
653
- # combination_features = combination_features.append(current, ignore_index=True)
654
-
655
- # All combination processing
656
- # Try to remove color space one by one
657
- i = 0
658
- original_length = len(potentials)
659
- while np.logical_and(len(potentials) > 1, i < original_length // 2):
660
- color_space_to_remove = TList()
661
- # The while loop until one col space remains or the removal of one implies a strong enough area change
662
- previous_c_space = list(potentials.keys())[-1]
663
- for c_space in potentials.keys():
664
- try_potentials = potentials.copy()
665
- try_potentials.pop(c_space)
666
- if i > 0:
667
- try_potentials.pop(previous_c_space)
668
- self.image = combine_color_spaces(try_potentials, self.all_c_spaces, subtract_background)
669
- # self.generate_color_space_combination(try_potentials, subtract_background)
670
- if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
671
- self.kmeans(kmeans_clust_nb, biomask, backmask)
672
- else:
673
- self.binary_image = otsu_thresholding(self.image)
674
- # self.thresholding()
675
- surf = np.sum(self.binary_image)
676
- if surf < total_surfarea:
677
- nb, shapes = cv2.connectedComponents(self.binary_image)
678
- outside_pixels = np.sum(self.binary_image * out_of_arenas)
679
- if outside_pixels < out_of_arenas_threshold:
680
- if (nb > concomp_nb[0]) and (nb < concomp_nb[1]):
681
- in_common = np.sum(ref_image * self.binary_image)
682
- if in_common > 0:
683
- nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
684
- nb -= 1
685
- if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
686
- # If a color space remove fits in the requirements, we store its values
687
- self.converted_images_list.append(self.image)
688
- self.saved_images_list.append(self.binary_image)
689
- self.saved_color_space_list.append(try_potentials)
690
- self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
691
- self.combination_features[self.saved_csc_nb, area_idx] = surf
692
- self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
693
- self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
694
- if biomask is not None:
695
- self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
696
- self.binary_image[biomask[0], biomask[1]])
697
- if backmask is not None:
698
- self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
699
- (1 - self.binary_image)[backmask[0], backmask[1]])
700
- self.saved_csc_nb += 1
701
- # all_potential_combinations.append(try_potentials)
702
- # current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
703
- # "surf_in_common": in_common}
704
- # combination_features = combination_features.append(current, ignore_index=True)
705
- color_space_to_remove.append(c_space)
706
- if i > 0:
707
- color_space_to_remove.append(previous_c_space)
708
- # If it does not (if it did not pass every "if" layers), we definitely remove that color space
709
- previous_c_space = c_space
710
- color_space_to_remove = np.unique(color_space_to_remove)
711
- for remove_col_space in color_space_to_remove:
712
- potentials.pop(remove_col_space)
713
- i += 1
714
- if np.logical_and(len(potentials) > 0, i > 1):
715
- self.converted_images_list.append(self.image)
716
- self.saved_images_list.append(self.binary_image)
717
- self.saved_color_space_list.append(potentials)
718
- self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
719
- self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
720
- self.combination_features[self.saved_csc_nb, area_idx] = surf
721
- self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
722
- self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
723
- if biomask is not None:
724
- self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
725
- self.binary_image[biomask[0], biomask[1]])
726
- if backmask is not None:
727
- self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
728
- (1 - self.binary_image)[backmask[0], backmask[1]])
729
- self.saved_csc_nb += 1
730
- # all_potential_combinations.append(potentials)
731
- # current = {"total_area": surf, "concomp_nb": nb, "out_of_arenas": outside_pixels,
732
- # "surf_in_common": in_common}
733
- # combination_features = combination_features.append(current, ignore_index=True)
734
-
735
- self.combination_features = self.combination_features[:self.saved_csc_nb, :]
736
- # Among all potentials, select the best one, according to criterion decreasing in importance
737
- # a = combination_features.sort_values(by=["surf_in_common"], ascending=False)
738
- # self.channel_combination = all_potential_combinations[a[:1].index[0]]
739
- cc_efficiency_order = np.argsort(self.combination_features[:, surf_in_common_idx])
740
-
741
- # Save and return a dictionnary containing the selected color space combinations
742
- # and their corresponding binary images
743
-
744
- self.im_combinations = []
745
- for saved_csc in cc_efficiency_order:
746
- if len(self.saved_color_space_list[saved_csc]) > 0:
747
- self.im_combinations.append({})
748
- self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
749
- self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
750
- for k, v in self.saved_color_space_list[saved_csc].items():
751
- self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
752
- self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
753
- self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = np.round(self.converted_images_list[
754
- saved_csc]).astype(np.uint8)
755
- self.saved_color_space_list = []
756
- self.saved_images_list = None
757
- self.converted_images_list = None
758
- self.combination_features = None
759
-
760
- """
761
- Thresholding is a very simple and fast segmentation method. Kmeans can be implemented in a function bellow
762
- """
763
- def thresholding(self, luminosity_threshold=None, lighter_background=None):
764
- if luminosity_threshold is not None:
765
- binarymg = np.zeros(self.image.shape, dtype=np.uint8)
766
- if lighter_background:
767
- binarymg[self.image < luminosity_threshold] = 1
768
- else:
769
- binarymg[self.image > luminosity_threshold] = 1
770
- else:
771
- ret, binarymg = cv2.threshold(self.image, 0, 1, cv2.THRESH_OTSU)
772
- #binarymg = binarymg - 1
773
- # Make sure that blobs are 1 and background is 0
774
- if np.sum(binarymg) > np.sum(1 - binarymg):
775
- binarymg = 1 - binarymg
776
- self.binary_image = binarymg
777
-
778
- def kmeans(self, cluster_number, biomask=None, backmask=None, logical='None', bio_label=None, bio_label2=None):
779
- image = self.image.reshape((-1, 1))
780
- image = np.float32(image)
781
- criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
782
- compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10, flags=cv2.KMEANS_RANDOM_CENTERS)
783
- kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
784
- sum_per_label = np.zeros(cluster_number)
785
- self.binary_image = np.zeros(self.image.shape[:2], np.uint8)
786
- if self.previous_binary_image is not None:
787
- binary_images = []
788
- image_scores = np.zeros(cluster_number, np.uint64)
789
- for i in range(cluster_number):
790
- binary_image_i = np.zeros(self.image.shape[:2], np.uint8)
791
- binary_image_i[np.nonzero(kmeans_image == i)] = 1
792
- image_scores[i] = (binary_image_i * self.previous_binary_image).sum()
793
- binary_images.append(binary_image_i)
794
- self.binary_image[np.nonzero(kmeans_image == np.argmax(image_scores))] = 1
795
- elif bio_label is not None:
796
- self.binary_image[np.nonzero(kmeans_image == bio_label)] = 1
797
- self.bio_label = bio_label
798
- else:
799
- if biomask is not None:
800
- all_labels = kmeans_image[biomask[0], biomask[1]]
801
- for i in range(cluster_number):
802
- sum_per_label[i] = (all_labels == i).sum()
803
- self.bio_label = np.nonzero(sum_per_label == np.max(sum_per_label))
804
- elif backmask is not None:
805
- all_labels = kmeans_image[backmask[0], backmask[1]]
806
- for i in range(cluster_number):
807
- sum_per_label[i] = (all_labels == i).sum()
808
- self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
809
- else:
810
- for i in range(cluster_number):
811
- sum_per_label[i] = (kmeans_image == i).sum()
812
- self.bio_label = np.nonzero(sum_per_label == np.min(sum_per_label))
813
- self.binary_image[np.nonzero(kmeans_image == self.bio_label)] = 1
814
-
815
- if logical != 'None':
816
- image = self.image2.reshape((-1, 1))
817
- image = np.float32(image)
818
- criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
819
- compactness, label, center = cv2.kmeans(image, cluster_number, None, criteria, attempts=10,
820
- flags=cv2.KMEANS_RANDOM_CENTERS)
821
- kmeans_image = np.uint8(label.flatten().reshape(self.image.shape[:2]))
822
- sum_per_label = np.zeros(cluster_number)
823
- self.binary_image2 = np.zeros(self.image.shape[:2], np.uint8)
824
- if self.previous_binary_image is not None:
825
- binary_images = []
826
- image_scores = np.zeros(cluster_number, np.uint64)
827
- for i in range(cluster_number):
828
- binary_image_i = np.zeros(self.image.shape[:2], np.uint8)
829
- binary_image_i[np.nonzero(kmeans_image == i)] = 1
830
- image_scores[i] = (binary_image_i * self.previous_binary_image).sum()
831
- binary_images.append(binary_image_i)
832
- self.binary_image2[np.nonzero(kmeans_image == np.argmax(image_scores))] = 1
833
- elif bio_label2 is not None:
834
- self.binary_image2[np.nonzero(kmeans_image == bio_label2)] = 1
835
- self.bio_label2 = bio_label2
836
- else:
837
- if biomask is not None:
838
- all_labels = kmeans_image[biomask[0], biomask[1]]
839
- for i in range(cluster_number):
840
- sum_per_label[i] = (all_labels == i).sum()
841
- self.bio_label2 = np.nonzero(sum_per_label == np.max(sum_per_label))
842
- elif backmask is not None:
843
- all_labels = kmeans_image[backmask[0], backmask[1]]
844
- for i in range(cluster_number):
845
- sum_per_label[i] = (all_labels == i).sum()
846
- self.bio_label2 = np.nonzero(sum_per_label == np.min(sum_per_label))
847
- else:
848
- for i in range(cluster_number):
849
- sum_per_label[i] = (kmeans_image == i).sum()
850
- self.bio_label2 = np.nonzero(sum_per_label == np.min(sum_per_label))
851
- self.binary_image2[np.nonzero(kmeans_image == self.bio_label2)] = 1
852
-
853
- def binarize_k_means_product(self, grey_idx):
854
- binarization = np.zeros_like(self.binary_image)
855
- binarization[np.nonzero(self.binary_image == grey_idx)] = 1
856
- self.binary_image = binarization
857
-
858
- def grid_segmentation(self, lighter_background, side_length=8, step=2, int_variation_thresh=20, mask=None):
859
- """
860
- Segment small squares of the images to detect local intensity valleys
861
- This method segment the image locally using otsu thresholding on a rolling window
862
- :param side_length: The size of the window to detect the blobs
863
- :type side_length: uint8
864
- :param step:
865
- :type step: uint8
866
- :return:
867
- """
868
- if len(self.image.shape) == 3:
869
- print("Image is not Grayscale")
870
- if mask is None:
871
- min_y = 0
872
- min_x = 0
873
- y_size = self.image.shape[0]
874
- x_size = self.image.shape[1]
875
- max_y = y_size + 1
876
- max_x = x_size + 1
877
- mask = np.ones_like(self.image)
878
- else:
879
- y, x = np.nonzero(mask)
880
- min_y = np.min(y)
881
- if (min_y - 20) >= 0:
882
- min_y -= 20
883
- else:
884
- min_y = 0
885
- max_y = np.max(y) + 1
886
- if (max_y + 20) < mask.shape[0]:
887
- max_y += 20
888
- else:
889
- max_y = mask.shape[0] - 1
890
- min_x = np.min(x)
891
- if (min_x - 20) >= 0:
892
- min_x -= 20
893
- else:
894
- min_x = 0
895
- max_x = np.max(x) + 1
896
- if (max_x + 20) < mask.shape[1]:
897
- max_x += 20
898
- else:
899
- max_x = mask.shape[1] - 1
900
- y_size = max_y - min_y
901
- x_size = max_x - min_x
902
- grid_image = np.zeros((y_size, x_size), np.uint64)
903
- homogeneities = np.zeros((y_size, x_size), np.uint64)
904
- cropped_mask = mask[min_y:max_y, min_x:max_x]
905
- cropped_image = self.image[min_y:max_y, min_x:max_x]
906
- # will be more efficient if it only loops over a zoom on self.mask == 1
907
- for to_add in np.arange(0, side_length, step):
908
- y_windows = np.arange(0, y_size, side_length)
909
- x_windows = np.arange(0, x_size, side_length)
910
- y_windows += to_add
911
- x_windows += to_add
912
- for y_start in y_windows:
913
- # y_start = 4
914
- if y_start < self.image.shape[0]:
915
- y_end = y_start + side_length
916
- if y_end < self.image.shape[0]:
917
- for x_start in x_windows:
918
- if x_start < self.image.shape[1]:
919
- x_end = x_start + side_length
920
- if x_end < self.image.shape[1]:
921
- if np.any(cropped_mask[y_start:y_end, x_start:x_end]):
922
- potential_detection = cropped_image[y_start:y_end, x_start:x_end]
923
- if np.any(potential_detection):
924
- if np.ptp(potential_detection[np.nonzero(potential_detection)]) < int_variation_thresh:
925
- homogeneities[y_start:y_end, x_start:x_end] += 1
926
-
927
- threshold = get_otsu_threshold(potential_detection)
928
- if lighter_background:
929
- net_coord = np.nonzero(potential_detection < threshold)
930
- else:
931
- net_coord = np.nonzero(potential_detection > threshold)
932
- grid_image[y_start + net_coord[0], x_start + net_coord[1]] += 1
933
-
934
- self.binary_image = np.zeros(self.image.shape, np.uint8)
935
- self.binary_image[min_y:max_y, min_x:max_x] = (grid_image >= (side_length // step)).astype(np.uint8)
936
- self.binary_image[min_y:max_y, min_x:max_x][homogeneities >= (((side_length // step) // 2) + 1)] = 0
937
-
938
-
939
- """
940
- III/ Use validated shapes to exclude from analysis the image parts that are far from them
941
- i.e. detect projected shape boundaries over both axis and determine crop coordinates
942
- """
943
- def get_crop_coordinates(self, are_zigzag=None):
944
- logging.info("Project the image on the y axis to detect rows of arenas")
945
- self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
946
- logging.info("Project the image on the x axis to detect columns of arenas")
947
- self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
948
- logging.info("Get crop coordinates using the get_crop_coordinates method of OneImageAnalysis class")
949
- row_number = len(np.nonzero(self.y_boundaries)[0]) // 2
950
- col_number = len(np.nonzero(self.x_boundaries)[0]) // 2
951
- if (x_max_sum / col_number) * 2 < (y_max_sum / row_number):
952
- are_zigzag = "columns"
953
- elif (x_max_sum / col_number) > (y_max_sum / row_number) * 2:
954
- are_zigzag = "rows"
955
- else:
956
- are_zigzag = None
957
- # here automatically determine if are zigzag
958
- x_boundary_number = (self.x_boundaries == 1).sum()
959
- if x_boundary_number > 1:
960
- if x_boundary_number < 4:
961
- x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
962
- else:
963
- if are_zigzag == "columns":
964
- x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0][::2]))) // 2
965
- else:
966
- x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
967
- cx_min = np.where(self.x_boundaries == - 1)[0][0] - x_interval.astype(int)
968
- cx_max = np.where(self.x_boundaries == 1)[0][col_number - 1] + x_interval.astype(int)
969
- if cx_min < 0: cx_min = 0
970
- if cx_max > len(self.x_boundaries): cx_max = len(self.x_boundaries) - 1
971
- else:
972
- cx_min = 0
973
- cx_max = len(self.x_boundaries) - 1
974
-
975
- y_boundary_number = (self.y_boundaries == 1).sum()
976
- if y_boundary_number > 1:
977
- if y_boundary_number < 4:
978
- y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
979
- else:
980
- if are_zigzag == "rows":
981
- y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0][::2]))) // 2
982
- else:
983
- y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
984
- cy_min = np.where(self.y_boundaries == - 1)[0][0] - y_interval.astype(int)
985
- cy_max = np.where(self.y_boundaries == 1)[0][row_number - 1] + y_interval.astype(int)
986
- if cy_min < 0: cy_min = 0
987
- if cy_max > len(self.y_boundaries): cy_max = len(self.y_boundaries) - 1
988
- else:
989
- cy_min = 0
990
- cy_max = len(self.y_boundaries) - 1
991
-
992
- self.crop_coord = [cy_min, cy_max, cx_min, cx_max]
993
- return are_zigzag
994
- # plt.imshow(self.image)
995
- #plt.scatter(cx_min,cy_min)
996
- #plt.scatter(cx_max, cy_max)
997
-
998
- def projection_to_get_peaks_boundaries(self, axis):
999
- sums = np.sum(self.validated_shapes, axis)
1000
- slopes = np.greater(sums, 0)
1001
- slopes = np.append(0, np.diff(slopes))
1002
- coord = np.nonzero(slopes)[0]
1003
- for ci in np.arange(len(coord)):
1004
- if ci % 2 == 0:
1005
- slopes[coord[ci]] = - 1
1006
- return slopes, sums.max()
1007
-
1008
- def jackknife_cutting(self, changes):
1009
- """
1010
- This function compare the mean distance between each 1 in a vector of 0.
1011
- Since a few irregular intervals affect less the median that the mean,
1012
- It try to remove each 1, one by one to see if it reduce enough the difference between mean and median.
1013
- If the standard error of that difference is higher than 2,
1014
- we remove each point whose removal decrease that difference by half of the median of these differences.
1015
- i.e. differences between jackkniffed means and original median of the distance between each 1.
1016
- """
1017
- indices = np.nonzero(changes)[0]
1018
- indices_to_remove = np.zeros(len(indices), dtype=bool)
1019
- # To test the impact of a removal, changes must contain at least four 1.
1020
- if len(indices) > 3:
1021
- jackknifed_mean = np.zeros(np.sum(changes == 1))
1022
- for dot_i in np.arange(len(indices)):
1023
- steep = changes == 1
1024
- steep[indices[dot_i]] = False
1025
- new_indices = np.where(steep == 1)[0]
1026
- if dot_i != 0:
1027
- new_indices[dot_i:] = indices[(dot_i + 1):] - (indices[dot_i] - indices[dot_i - 1])
1028
- jackknifed_mean[dot_i] = np.mean(np.diff(new_indices))
1029
- improving_cuts = np.absolute(jackknifed_mean - np.median(np.diff(indices)))
1030
- if np.std(improving_cuts) > 2:
1031
- improving_cuts = np.argwhere(improving_cuts < 0.5 * np.median(improving_cuts))
1032
- indices_to_remove[improving_cuts] = 1
1033
- return indices_to_remove
1034
-
1035
- def automatically_crop(self, crop_coord):
1036
- if not self.cropped:
1037
- logging.info("Crop using the automatically_crop method of OneImageAnalysis class")
1038
- self.cropped = True
1039
- self.image = self.image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1040
- self.bgr = deepcopy(self.bgr[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...])
1041
- if len(self.all_c_spaces) > 0:
1042
- self.all_c_spaces = get_color_spaces(self.bgr)
1043
- if self.im_combinations is not None:
1044
- for i in np.arange(len(self.im_combinations)):
1045
- self.im_combinations[i]["binary_image"] = self.im_combinations[i]["binary_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1046
- self.im_combinations[i]["converted_image"] = self.im_combinations[i]["converted_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1047
- self.binary_image = self.binary_image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1048
- if self.image2 is not None:
1049
- self.image2 = self.image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1050
- if self.binary_image2 is not None:
1051
- self.binary_image2 = self.binary_image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1052
- if self.subtract_background is not None:
1053
- self.subtract_background = self.subtract_background[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1054
- if self.subtract_background2 is not None:
1055
- self.subtract_background2 = self.subtract_background2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1056
- self.validated_shapes = self.validated_shapes[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1057
-
1058
- self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
1059
- self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
1060
-
1061
-
1
+ #!/usr/bin/env python3
2
+ """
3
+ Module providing tools for single-image color space analysis and segmentation.
4
+
5
+ The OneImageAnalysis class offers comprehensive image processing capabilities including
6
+ color space conversion (RGB, HSV, LAB, LUV, HLS, YUV), filtering (Gaussian, median, bilateral),
7
+ segmentation (Otsu thresholding, k-means clustering), and shape-based validation. It supports
8
+ multi-step optimization of color channel combinations to maximize contrast between organisms
9
+ and background through automated selection workflows involving logical operations on segmented regions.
10
+
11
+ Classes
12
+ OneImageAnalysis : Analyze images using multiple color spaces for optimal segmentation
13
+
14
+ Notes
15
+ Uses QThread for background operations during combination processing.
16
+ """
17
+
18
+ import logging
19
+ import os
20
+ from copy import deepcopy
21
+ import numpy as np
22
+ import cv2 # named opencv-python
23
+ import multiprocessing.pool as mp
24
+ from numba.typed import List as TList
25
+ from numba.typed import Dict as TDict
26
+ from numpy.typing import NDArray
27
+ from typing import Tuple
28
+ from skimage.measure import perimeter
29
+ from cellects.image_analysis.morphological_operations import cross_33, create_ellipse, spot_size_coefficients
30
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination, get_color_spaces, extract_first_pc, combine_color_spaces, apply_filter, otsu_thresholding, get_otsu_threshold, kmeans, windowed_thresholding
31
+ from cellects.image_analysis.one_image_analysis_threads import SaveCombinationThread, ProcessFirstImage
32
+ from cellects.image_analysis.network_functions import NetworkDetection
33
+ from cellects.utils.formulas import bracket_to_uint8_image_contrast
34
+ from cellects.utils.utilitarian import split_dict, translate_dict
35
+
36
+
37
+ class OneImageAnalysis:
38
+ """
39
+ This class takes a 3D matrix (2 space and 1 color [BGR] dimensions),
40
+ Its methods allow image
41
+ - conversion to any bgr/hsv/lab channels
42
+ - croping
43
+ - rotating
44
+ - filtering using some of the mainly used techniques:
45
+ - Gaussian, Median, Bilateral, Laplacian, Mexican hat
46
+ - segmenting using thresholds or kmeans
47
+ - shape selection according to horizontal size or shape ('circle' vs 'quadrilateral')
48
+
49
+ ps: A viewing method displays the image before and after the most advanced modification made in instance
50
+ """
51
+ def __init__(self, image, shape_number=0):
52
+ self.image = image
53
+ if len(self.image.shape) == 2:
54
+ self.already_greyscale = True
55
+ else:
56
+ self.already_greyscale = False
57
+ self.image2 = None
58
+ self.binary_image2 = None
59
+ self.drift_correction_already_adjusted: bool = False
60
+ # Create empty variables to fill in the following functions
61
+ self.binary_image = np.zeros(self.image.shape[:2], dtype=np.uint8)
62
+ self.previous_binary_image = None
63
+ self.validated_shapes = np.zeros(self.image.shape[:2], dtype=np.uint8)
64
+ self.centroids = 0
65
+ self.shape_number = shape_number
66
+ self.concomp_stats = 0
67
+ self.y_boundaries = None
68
+ self.x_boundaries = None
69
+ self.crop_coord = None
70
+ self.cropped: bool = False
71
+ self.subtract_background = None
72
+ self.subtract_background2 = None
73
+ self.im_combinations = None
74
+ self.bgr = image
75
+ self.colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
76
+ self.spot_shapes = None
77
+ self.all_c_spaces = TDict()
78
+ self.hsv = None
79
+ self.hls = None
80
+ self.lab = None
81
+ self.luv = None
82
+ self.yuv = None
83
+ self.greyscale = None
84
+ self.greyscale2 = None
85
+ self.first_pc_vector = None
86
+ self.drift_mask_coord = None
87
+ self.saved_csc_nb = 0
88
+
89
+ def convert_and_segment(self, c_space_dict: dict, color_number=2, biomask: NDArray[np.uint8]=None,
90
+ backmask: NDArray[np.uint8]=None, subtract_background: NDArray=None,
91
+ subtract_background2: NDArray=None, rolling_window_segmentation: dict=None,
92
+ lighter_background: bool=None,
93
+ allowed_window: NDArray=None, filter_spec: dict=None):
94
+ """
95
+ Convert an image to grayscale and segment it based on specified parameters.
96
+
97
+ This method converts the given color space dictionary into grayscale
98
+ images, combines them with existing color spaces and performs segmentation.
99
+ It has special handling for images that are already in grayscale.
100
+
101
+ **Args:**
102
+
103
+ - `c_space_dict` (dict): Dictionary containing color spaces.
104
+ - `color_number` (int, optional): Number of colors to use in segmentation. Defaults to 2.
105
+ - `biomask` (NDArray[np.uint8], optional): Biomask for segmentation. Defaults to None.
106
+ - `backmask` (NDArray[np.uint8], optional): Backmask for segmentation. Defaults to None.
107
+ - `subtract_background` (NDArray, optional): Background to subtract. Defaults to None.
108
+ - `subtract_background2` (NDArray, optional): Second background to subtract. Defaults to None.
109
+ - rolling_window_segmentation (dict, optional): Flag for grid segmentation. Defaults to None.
110
+ - `lighter_background` (bool, optional): Flag for lighter background. Defaults to None.
111
+ - `mask` (NDArray, optional): Additional mask for segmentation. Defaults to None.
112
+ - `filter_spec` (dict, optional): Filter specifications. Defaults to None.
113
+
114
+ **Attributes:**
115
+
116
+ - `self.already_greyscale` (bool): Indicates whether the image is already greyscale.
117
+ - `self.all_c_spaces` (list): List of color spaces.
118
+
119
+ """
120
+ if not self.already_greyscale:
121
+ first_dict, second_dict, c_spaces = split_dict(c_space_dict)
122
+ self.image, self.image2, all_c_spaces, self.first_pc_vector = generate_color_space_combination(self.bgr, c_spaces, first_dict, second_dict, subtract_background, subtract_background2)
123
+ if len(all_c_spaces) > len(self.all_c_spaces):
124
+ self.all_c_spaces = all_c_spaces
125
+
126
+ self.segmentation(logical=c_space_dict['logical'], color_number=color_number, biomask=biomask,
127
+ backmask=backmask, rolling_window_segmentation=rolling_window_segmentation,
128
+ lighter_background=lighter_background, allowed_window=allowed_window, filter_spec=filter_spec)
129
+
130
+
131
+ def segmentation(self, logical: str='None', color_number: int=2, biomask: NDArray[np.uint8]=None,
132
+ backmask: NDArray[np.uint8]=None, bio_label=None, bio_label2=None,
133
+ rolling_window_segmentation: dict=None, lighter_background: bool=None, allowed_window: Tuple=None,
134
+ filter_spec: dict=None):
135
+ """
136
+ Implement segmentation on the image using various methods and parameters.
137
+
138
+ Args:
139
+ logical (str): Logical operation to perform between two binary images.
140
+ Options are 'Or', 'And', 'Xor'. Default is 'None'.
141
+ color_number (int): Number of colors to use in segmentation. Must be greater than 2
142
+ for kmeans clustering. Default is 2.
143
+ biomask (NDArray[np.uint8]): Binary mask for biological areas. Default is None.
144
+ backmask (NDArray[np.uint8]): Binary mask for background areas. Default is None.
145
+ bio_label (Any): Label for biological features. Default is None.
146
+ bio_label2 (Any): Secondary label for biological features. Default is None.
147
+ rolling_window_segmentation (dict): Whether to perform grid segmentation. Default is None.
148
+ lighter_background (bool): Indicates if the background is lighter than objects.
149
+ Default is None.
150
+ allowed_window (Tuple): Mask to apply during segmentation. Default is None.
151
+ filter_spec (dict): Dictionary of filters to apply on the image before segmentation.
152
+
153
+ """
154
+ # 1. Check valid pixels for segmentation (e.g. when there is a drift correction)
155
+ if allowed_window is None:
156
+ min_y, max_y, min_x, max_x = 0, self.image.shape[0] + 1, 0, self.image.shape[1] + 1
157
+ else:
158
+ min_y, max_y, min_x, max_x = allowed_window
159
+ greyscale = self.image[min_y:max_y, min_x:max_x].copy()
160
+ # 2. Apply filter on the greyscale images
161
+ if filter_spec is not None and filter_spec["filter1_type"] != "":
162
+ greyscale = apply_filter(greyscale, filter_spec["filter1_type"], filter_spec["filter1_param"])
163
+
164
+ greyscale2 = None
165
+ if logical != 'None':
166
+ greyscale2 = self.image2[min_y:max_y, min_x:max_x].copy()
167
+ if filter_spec is not None and filter_spec["filter2_type"] != "":
168
+ greyscale2 = apply_filter(greyscale2, filter_spec["filter2_type"], filter_spec["filter2_param"])
169
+
170
+ # 3. Do one of the three segmentation algorithms: kmeans, otsu, windowed
171
+ if color_number > 2:
172
+ binary_image, binary_image2, self.bio_label, self.bio_label2 = kmeans(greyscale, greyscale2, color_number, biomask, backmask, logical, bio_label, bio_label2)
173
+ elif rolling_window_segmentation is not None and rolling_window_segmentation['do']:
174
+ binary_image = windowed_thresholding(greyscale, lighter_background, rolling_window_segmentation['side_len'],
175
+ rolling_window_segmentation['step'], rolling_window_segmentation['min_int_var'])
176
+ else:
177
+ binary_image = otsu_thresholding(greyscale)
178
+ if logical != 'None' and color_number == 2:
179
+ if rolling_window_segmentation is not None and rolling_window_segmentation['do']:
180
+ binary_image2 = windowed_thresholding(greyscale2, lighter_background, rolling_window_segmentation['side_len'],
181
+ rolling_window_segmentation['step'], rolling_window_segmentation['min_int_var'])
182
+ else:
183
+ binary_image2 = otsu_thresholding(greyscale2)
184
+
185
+ # 4. Use previous_binary_image to make sure that the specimens are labelled with ones and the background zeros
186
+ if self.previous_binary_image is not None:
187
+ previous_binary_image = self.previous_binary_image[min_y:max_y, min_x:max_x]
188
+ if not (binary_image * previous_binary_image).any() or (binary_image[0, :].all() and binary_image[-1, :].all() and binary_image[:, 0].all() and binary_image[:, -1].all()):
189
+ # if (binary_image * (1 - previous_binary_image)).sum() > (binary_image * previous_binary_image).sum() + perimeter(binary_image):
190
+ # Ones of the binary image have more in common with the background than with the specimen
191
+ binary_image = 1 - binary_image
192
+ if logical != 'None':
193
+ if (binary_image2 * (1 - previous_binary_image)).sum() > (binary_image2 * previous_binary_image).sum():
194
+ binary_image2 = 1 - binary_image2
195
+
196
+ # 5. Give back the image their original size and combine binary images (optional)
197
+ self.binary_image = np.zeros(self.image.shape, dtype=np.uint8)
198
+ self.binary_image[min_y:max_y, min_x:max_x] = binary_image
199
+ self.greyscale = np.zeros(self.image.shape, dtype=np.uint8)
200
+ self.greyscale[min_y:max_y, min_x:max_x] = greyscale
201
+ if logical != 'None':
202
+ self.binary_image2 = np.zeros(self.image.shape, dtype=np.uint8)
203
+ self.binary_image2[min_y:max_y, min_x:max_x] = binary_image2
204
+ self.greyscale2 = np.zeros(self.image.shape, dtype=np.uint8)
205
+ self.greyscale2[min_y:max_y, min_x:max_x] = greyscale2
206
+ if logical != 'None':
207
+ if logical == 'Or':
208
+ self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
209
+ elif logical == 'And':
210
+ self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
211
+ elif logical == 'Xor':
212
+ self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
213
+ self.binary_image = self.binary_image.astype(np.uint8)
214
+
215
+ def _get_all_color_spaces(self):
216
+ """Generate and store all supported color spaces for the image."""
217
+ if len(self.all_c_spaces) < 6 and not self.already_greyscale:
218
+ self.all_c_spaces = get_color_spaces(self.bgr)
219
+
220
+ def generate_subtract_background(self, c_space_dict: dict, drift_corrected: bool=False):
221
+ """
222
+ Generate a background-subtracted image using specified color space dictionary.
223
+
224
+ This method first checks if color spaces have already been generated or
225
+ if the image is greyscale. If not, it generates color spaces from the BGR
226
+ image. It then converts and segments the image using the provided color space
227
+ dictionary without grid segmentation. A disk-shaped structuring element is
228
+ created and used to perform a morphological opening operation on the image,
229
+ resulting in a background-subtracted version. If there is a second image
230
+ (see Also: image2), the same operation is performed on it.
231
+
232
+ Args:
233
+ c_space_dict (dict): Dictionary containing color space specifications
234
+ for the segmentation process.
235
+
236
+ Attributes:
237
+ disk_size: Radius of the disk-shaped structuring element
238
+ used for morphological operations, calculated based on image dimensions.
239
+ subtract_background: Background-subtracted version of `image` obtained
240
+ after morphological operations with the disk-shaped structuring element.
241
+ subtract_background2: Background-subtracted version of `image2` obtained
242
+ after morphological operations with the disk-shaped structuring element,
243
+ if `image2` is present."""
244
+ logging.info("Generate background using the generate_subtract_background method of OneImageAnalysis class")
245
+ self._get_all_color_spaces()
246
+ if drift_corrected:
247
+ # self.adjust_to_drift_correction(c_space_dict['logical'])
248
+ self.check_if_image_border_attest_drift_correction()
249
+ self.convert_and_segment(c_space_dict, rolling_window_segmentation=None, allowed_window=self.drift_mask_coord)
250
+ disk_size = np.max((3, int(np.floor(np.sqrt(np.min(self.bgr.shape[:2])) / 2))))
251
+ disk = create_ellipse(disk_size, disk_size).astype(np.uint8)
252
+ self.subtract_background = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, disk)
253
+ if self.image2 is not None:
254
+ self.subtract_background2 = cv2.morphologyEx(self.image2, cv2.MORPH_OPEN, disk)
255
+
256
+ def check_if_image_border_attest_drift_correction(self) -> bool:
257
+ """
258
+ Check if the given binary image requires border attenuation and drift correction.
259
+
260
+ In order to determine the need for border attenuation or drift correction, this function
261
+ evaluates the borders of a binary image. If any two opposite borders are fully black,
262
+ it assumes that there is an issue requiring correction.
263
+
264
+ Returns:
265
+ bool: True if border attenuation or drift correction is required, False otherwise.
266
+
267
+ """
268
+ t = np.all(self.binary_image[0, :])
269
+ b = np.all(self.binary_image[-1, :])
270
+ l = np.all(self.binary_image[:, 0])
271
+ r = np.all(self.binary_image[:, -1])
272
+ self.drift_mask_coord = None
273
+ if (t and b) or (t and r) or (t and l) or (t and r) or (b and l) or (b and r) or (l and r):
274
+ cc_nb, shapes = cv2.connectedComponents(self.binary_image)
275
+ if cc_nb > 1:
276
+ if cc_nb == 2:
277
+ drift_mask_coord = np.nonzero(1 - self.binary_image)
278
+ else:
279
+ back = np.unique(np.concatenate((shapes[0, :], shapes[-1, :], shapes[:, 0], shapes[:, -1]), axis=0))
280
+ drift_mask_coord = np.nonzero(np.logical_or(1 - self.binary_image, 1 - np.isin(shapes, back[back != 0])))
281
+ drift_mask_coord = (np.min(drift_mask_coord[0]), np.max(drift_mask_coord[0]) + 1,
282
+ np.min(drift_mask_coord[1]), np.max(drift_mask_coord[1]) + 1)
283
+ self.drift_mask_coord = drift_mask_coord
284
+ return True
285
+ else:
286
+ return False
287
+ else:
288
+ return False
289
+
290
+ def adjust_to_drift_correction(self, logical: str):
291
+ """
292
+ Adjust the image and binary image to correct for drift.
293
+
294
+ This method applies a drift correction by dilating the binary image, calculating
295
+ the mean value of the drifted region and applying it back to the image. After this,
296
+ it applies Otsu's thresholding method to determine a new binary image and adjusts
297
+ the second image if present. The logical operation specified is then applied to the
298
+ binary images.
299
+
300
+ Args:
301
+ logical (str): Logical operation ('Or', 'And', 'Xor') to apply to the binary
302
+ images."""
303
+ if not self.drift_correction_already_adjusted:
304
+ self.drift_correction_already_adjusted = True
305
+
306
+ mask = cv2.dilate(self.binary_image, kernel=cross_33)
307
+ mask -= self.binary_image
308
+ mask = np.nonzero(mask)
309
+ drift_correction = np.mean(self.image[mask[0], mask[1]])
310
+ self.image[np.nonzero(self.binary_image)] = drift_correction
311
+ threshold = get_otsu_threshold(self.image)
312
+ binary = (self.image > threshold)
313
+ self.binary_image = binary.astype(np.uint8)
314
+
315
+ if self.image2 is not None:
316
+ drift_correction2 = np.mean(self.image2[mask[0], mask[1]])
317
+ self.image2[np.nonzero(self.binary_image)] = drift_correction2
318
+ threshold = get_otsu_threshold(self.image2)
319
+ binary1 = (self.image2 > threshold)
320
+ binary2 = np.logical_not(binary1)
321
+ if binary1.sum() < binary2.sum():
322
+ binary = binary1
323
+ else:
324
+ binary = binary2
325
+ while np.any(binary * self.binary_image2) and threshold > 1:
326
+ threshold -= 1
327
+ binary1 = (self.image2 > threshold)
328
+ binary2 = np.logical_not(binary1)
329
+ if binary1.sum() < binary2.sum():
330
+ binary = binary1
331
+ else:
332
+ binary = binary2
333
+ self.binary_image2 = binary.astype(np.uint8)
334
+ if logical == 'Or':
335
+ self.binary_image = np.logical_or(self.binary_image, self.binary_image2)
336
+ elif logical == 'And':
337
+ self.binary_image = np.logical_and(self.binary_image, self.binary_image2)
338
+ elif logical == 'Xor':
339
+ self.binary_image = np.logical_xor(self.binary_image, self.binary_image2)
340
+ self.binary_image = self.binary_image.astype(np.uint8)
341
+
342
+ def find_first_im_csc(self, sample_number: int=None, several_blob_per_arena:bool=True, spot_shape: str=None,
343
+ spot_size=None, kmeans_clust_nb: int=None, biomask: NDArray[np.uint8]=None,
344
+ backmask: NDArray[np.uint8]=None, color_space_dictionaries: TList=None, basic: bool=True):
345
+ """
346
+ Prepare color space lists, dictionaries and matrices.
347
+
348
+ Args:
349
+ sample_number: An integer representing the sample number. Defaults to None.
350
+ several_blob_per_arena: A boolean indicating whether there are several blobs per arena. Defaults to True.
351
+ spot_shape: A string representing the shape of the spot. Defaults to None.
352
+ spot_size: An integer representing the size of the spot. Defaults to None.
353
+ kmeans_clust_nb: An integer representing the number of clusters for K-means. Defaults to None.
354
+ biomask: A 2D numpy array of type np.uint8 representing the bio mask. Defaults to None.
355
+ backmask: A 2D numpy array of type np.uint8 representing the background mask. Defaults to None.
356
+ color_space_dictionaries: A list of dictionaries containing color space information. Defaults to None.
357
+ basic: A boolean indicating whether to process the data basic. Defaults to True.
358
+
359
+ Note:
360
+ This method processes the input data to find the first image that matches certain criteria, using various color spaces and masks.
361
+
362
+ """
363
+ logging.info(f"Start automatic detection of the first image")
364
+ self.im_combinations = []
365
+ self.saved_images_list = TList()
366
+ self.converted_images_list = TList()
367
+ self.saved_color_space_list = list()
368
+ self.saved_csc_nb = 0
369
+
370
+ if self.image.any():
371
+ self._get_all_color_spaces()
372
+ if color_space_dictionaries is None:
373
+ if basic:
374
+ colorspace_list = ["bgr", "lab", "hsv", "luv", "hls", "yuv"]
375
+ else:
376
+ colorspace_list = ["bgr"]
377
+ color_space_dictionaries = TList()
378
+ for i, c_space in enumerate(colorspace_list):
379
+ for i in np.arange(3):
380
+ channels = np.array((0, 0, 0), dtype=np.int8)
381
+ channels[i] = 1
382
+ csc_dict = TDict()
383
+ csc_dict[c_space] = channels
384
+ color_space_dictionaries.append(csc_dict)
385
+
386
+ self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 11), dtype=np.uint32)
387
+ unaltered_cc_nb, cc_nb, area, width_std, height_std, area_std, biosum, backsum = 3, 4, 5, 6, 7, 8, 9, 10
388
+ self.save_combination_thread = SaveCombinationThread(self)
389
+ get_one_channel_result = True
390
+ combine_channels = False
391
+ logging.info(f"Try detection with each available color space channel, one by one.")
392
+ for csc_dict in color_space_dictionaries:
393
+ list_args = [self, get_one_channel_result, combine_channels, csc_dict, several_blob_per_arena,
394
+ sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None]
395
+ ProcessFirstImage(list_args)
396
+
397
+ if sample_number is not None and basic:
398
+ # Try to add csc together
399
+ possibilities = []
400
+ if self.saved_csc_nb > 6:
401
+ different_color_spaces = np.unique(self.saved_color_space_list)
402
+ for color_space in different_color_spaces:
403
+ csc_idx = np.nonzero(np.isin(self.saved_color_space_list, color_space))[0]
404
+ possibilities.append(csc_idx[0] + np.argmin(self.combination_features[csc_idx, area_std]))
405
+ if len(possibilities) <= 6:
406
+ remaining_possibilities = np.arange(len(self.saved_color_space_list))
407
+ remaining_possibilities = remaining_possibilities[np.logical_not(np.isin(remaining_possibilities, possibilities))]
408
+ while len(possibilities) <= 6:
409
+ new_possibility = np.argmin(self.combination_features[remaining_possibilities, area_std])
410
+ possibilities.append(new_possibility)
411
+ remaining_possibilities = remaining_possibilities[remaining_possibilities != new_possibility]
412
+
413
+
414
+ pool = mp.ThreadPool(processes=os.cpu_count() - 1)
415
+ get_one_channel_result = False
416
+ combine_channels = True
417
+ list_args = [[self, get_one_channel_result, combine_channels, i, several_blob_per_arena, sample_number,
418
+ spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, possibilities] for i in possibilities]
419
+ for process_i in pool.imap_unordered(ProcessFirstImage, list_args):
420
+ pass
421
+
422
+ # Get the most and the least covered images and the 2 best biomask and backmask scores
423
+ # To try combinations of those
424
+ if self.saved_csc_nb <= 1:
425
+ csc_dict = {'bgr': np.array((1, 1, 1))}
426
+ list_args = [self, False, False, csc_dict, several_blob_per_arena,
427
+ sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None]
428
+ process_i = ProcessFirstImage(list_args)
429
+ process_i.image = self.bgr.mean(axis=-1)
430
+ process_i.binary_image = otsu_thresholding(process_i.image)
431
+ process_i.csc_dict = csc_dict
432
+ process_i.total_area = process_i.binary_image.sum()
433
+ process_i.process_binary_image()
434
+ process_i.unaltered_concomp_nb, shapes = cv2.connectedComponents(process_i.validated_shapes)
435
+ self.save_combination_features(process_i)
436
+ self.combination_features = self.combination_features[:self.saved_csc_nb, :]
437
+ fit = np.array([True])
438
+ else:
439
+ coverage = np.argsort(self.combination_features[:self.saved_csc_nb, area])
440
+ most1 = coverage[-1]; most2 = coverage[-2]
441
+ least1 = coverage[0]; least2 = coverage[1]
442
+ if biomask is not None:
443
+ bio_sort = np.argsort(self.combination_features[:self.saved_csc_nb, biosum])
444
+ bio1 = bio_sort[-1]; bio2 = bio_sort[-2]
445
+ if backmask is not None:
446
+ back_sort = np.argsort(self.combination_features[:self.saved_csc_nb, backsum])
447
+ back1 = back_sort[-1]; back2 = back_sort[-2]
448
+
449
+ # Try a logical And between the most covered images
450
+ # Should only need one instanciation
451
+ process_i = ProcessFirstImage(
452
+ [self, False, False, None, several_blob_per_arena, sample_number, spot_size, spot_shape, kmeans_clust_nb, biomask, backmask, None])
453
+ process_i.binary_image = np.logical_and(self.saved_images_list[most1], self.saved_images_list[most2]).astype(np.uint8)
454
+ process_i.image = self.converted_images_list[most1]
455
+ process_i.process_binary_image()
456
+ process_i.csc_dict = {list(self.saved_color_space_list[most1].keys())[0]: self.combination_features[most1, :3],
457
+ "logical": "And",
458
+ list(self.saved_color_space_list[most2].keys())[0] + "2": self.combination_features[most2, :3]}
459
+ process_i.unaltered_concomp_nb = np.min(self.combination_features[(most1, most2), unaltered_cc_nb])
460
+ process_i.total_area = process_i.binary_image.sum()
461
+ self.save_combination_features(process_i)
462
+ process_i.image = self.converted_images_list[least1]
463
+ process_i.binary_image = np.logical_or(self.saved_images_list[least1], self.saved_images_list[least2]).astype(np.uint8)
464
+ process_i.process_binary_image()
465
+ process_i.csc_dict = {list(self.saved_color_space_list[least1].keys())[0]: self.combination_features[least1, :3],
466
+ "logical": "Or",
467
+ list(self.saved_color_space_list[least2].keys())[0] + "2": self.combination_features[least2, :3]}
468
+ process_i.unaltered_concomp_nb = np.max(self.combination_features[(least1, least2), unaltered_cc_nb])
469
+ process_i.total_area = process_i.binary_image.sum()
470
+ self.save_combination_features(process_i)
471
+
472
+ # self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask, backmask)
473
+
474
+ # If most images are very low in biosum or backsum, try to mix them together to improve that score
475
+ # Do a logical And between the two best biomasks
476
+ if biomask is not None:
477
+ if not np.all(np.isin((bio1, bio2), (most1, most2))):
478
+ process_i.image = self.converted_images_list[bio1]
479
+ process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[bio2]).astype(
480
+ np.uint8)
481
+ process_i.process_binary_image()
482
+ process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
483
+ "logical": "And",
484
+ list(self.saved_color_space_list[bio2].keys())[0] + "2": self.combination_features[bio2,:3]}
485
+ process_i.unaltered_concomp_nb = np.min(self.combination_features[(bio1, bio2), unaltered_cc_nb])
486
+ process_i.total_area = process_i.binary_image.sum()
487
+
488
+ self.save_combination_features(process_i)
489
+
490
+ # Do a logical And between the two best backmask
491
+ if backmask is not None:
492
+ if not np.all(np.isin((back1, back2), (most1, most2))):
493
+ process_i.image = self.converted_images_list[back1]
494
+ process_i.binary_image = np.logical_and(self.saved_images_list[back1], self.saved_images_list[back2]).astype(
495
+ np.uint8)
496
+ process_i.process_binary_image()
497
+ process_i.csc_dict = {list(self.saved_color_space_list[back1].keys())[0]: self.combination_features[back1, :3],
498
+ "logical": "And",
499
+ list(self.saved_color_space_list[back2].keys())[0] + "2": self.combination_features[back2,:3]}
500
+ process_i.unaltered_concomp_nb = np.min(self.combination_features[(back1, back2), unaltered_cc_nb])
501
+ process_i.total_area = process_i.binary_image.sum()
502
+ self.save_combination_features(process_i)
503
+ # Do a logical Or between the best biomask and the best backmask
504
+ if biomask is not None and backmask is not None:
505
+ if not np.all(np.isin((bio1, back1), (least1, least2))):
506
+ process_i.image = self.converted_images_list[bio1]
507
+ process_i.binary_image = np.logical_and(self.saved_images_list[bio1], self.saved_images_list[back1]).astype(
508
+ np.uint8)
509
+ process_i.process_binary_image()
510
+ process_i.csc_dict = {list(self.saved_color_space_list[bio1].keys())[0]: self.combination_features[bio1, :3],
511
+ "logical": "Or",
512
+ list(self.saved_color_space_list[back1].keys())[0] + "2": self.combination_features[back1, :3]}
513
+ process_i.unaltered_concomp_nb = np.max(self.combination_features[(bio1, back1), unaltered_cc_nb])
514
+ # self.save_combination_features(csc_dict, unaltered_concomp_nb, self.binary_image.sum(), biomask,
515
+ # backmask)
516
+ process_i.total_area = self.binary_image.sum()
517
+ self.save_combination_features(process_i)
518
+
519
+ if self.save_combination_thread.is_alive():
520
+ self.save_combination_thread.join()
521
+ self.combination_features = self.combination_features[:self.saved_csc_nb, :]
522
+ # Only keep the row that filled conditions
523
+ # Save all combinations if they fulfill the following conditions:
524
+ # - Their conncomp number is lower than 3 times the smaller conncomp number.
525
+ # - OR The minimal area variations
526
+ # - OR The minimal width variations
527
+ # - OR The minimal height variations
528
+ # - AND/OR their segmentation fits with biomask and backmask
529
+ width_std_fit = self.combination_features[:, width_std] == np.min(self.combination_features[:, width_std])
530
+ height_std_fit = self.combination_features[:, height_std] == np.min(self.combination_features[:, height_std])
531
+ area_std_fit = self.combination_features[:, area_std] < np.min(self.combination_features[:, area_std]) * 10
532
+ fit = np.logical_or(np.logical_or(width_std_fit, height_std_fit), area_std_fit)
533
+ biomask_fit = np.ones(self.saved_csc_nb, dtype=bool)
534
+ backmask_fit = np.ones(self.saved_csc_nb, dtype=bool)
535
+ if biomask is not None or backmask is not None:
536
+ if biomask is not None:
537
+ biomask_fit = self.combination_features[:, biosum] > 0.9 * len(biomask[0])
538
+ if backmask is not None:
539
+ backmask_fit = self.combination_features[:, backsum] > 0.9 * len(backmask[0])
540
+ # First test a logical OR between the precedent options and the mask fits.
541
+ fit = np.logical_or(fit, np.logical_and(biomask_fit, backmask_fit))
542
+ # If this is not stringent enough, use a logical AND and increase progressively the proportion of pixels that
543
+ # must match the biomask and the backmask
544
+ if np.sum(fit) > 5:
545
+ to_add = 0
546
+ while np.sum(fit) > 5 and to_add <= 0.25:
547
+ if biomask is not None:
548
+ biomask_fit = self.combination_features[:, biosum] > (0.75 + to_add) * len(biomask[0])
549
+ if backmask is not None:
550
+ backmask_fit = self.combination_features[:, backsum] > (0.75 + to_add) * len(backmask[0])
551
+ test_fit = np.logical_and(fit, np.logical_and(biomask_fit, backmask_fit))
552
+ if np.sum(test_fit) != 0:
553
+ fit = test_fit
554
+ to_add += 0.05
555
+ # If saved_csc_nb is too low, try bool operators to mix them together to fill holes for instance
556
+ # Order the table according to the number of shapes that have been removed by filters
557
+ # cc_efficiency_order = np.argsort(self.combination_features[:, unaltered_cc_nb] - self.combination_features[:, cc_nb])
558
+ cc_efficiency_order = np.argsort(self.combination_features[:, area_std])
559
+ # Save and return a dictionnary containing the selected color space combinations
560
+ # and their corresponding binary images
561
+
562
+ for saved_csc in cc_efficiency_order:
563
+ if fit[saved_csc]:
564
+ self.im_combinations.append({})
565
+ # self.im_combinations.append({})
566
+ # self.im_combinations[len(self.im_combinations) - 1]["csc"] = self.saved_color_space_list[saved_csc]
567
+ self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
568
+ self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
569
+ for k, v in self.saved_color_space_list[saved_csc].items():
570
+ self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
571
+ if backmask is not None:
572
+ shape_number, shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
573
+ if np.any(shapes[backmask]):
574
+ shapes[np.isin(shapes, np.unique(shapes[backmask]))] = 0
575
+ self.saved_images_list[saved_csc] = (shapes > 0).astype(np.uint8)
576
+ if biomask is not None:
577
+ self.saved_images_list[saved_csc][biomask] = 1
578
+ if backmask is not None or biomask is not None:
579
+ self.combination_features[saved_csc, cc_nb], shapes = cv2.connectedComponents(self.saved_images_list[saved_csc], connectivity=8)
580
+ self.combination_features[saved_csc, cc_nb] -= 1
581
+ self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
582
+ self.im_combinations[len(self.im_combinations) - 1]["shape_number"] = self.combination_features[saved_csc, cc_nb]
583
+ self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = self.converted_images_list[saved_csc]
584
+
585
+ self.saved_color_space_list = []
586
+ self.saved_images_list = None
587
+ self.converted_images_list = None
588
+ self.combination_features = None
589
+
590
+ def save_combination_features(self, process_i: object):
591
+ """
592
+ Saves the combination features of a given processed image.
593
+
594
+ Args:
595
+ process_i (object): The processed image object containing various attributes
596
+ such as validated_shapes, image, csc_dict, unaltered_concomp_nb,
597
+ shape_number, total_area, stats, biomask, and backmask.
598
+
599
+ Attributes:
600
+ processed image object
601
+ validated_shapes (array-like): The validated shapes of the processed image.
602
+ image (array-like): The image data.
603
+ csc_dict (dict): Color space conversion dictionary
604
+ """
605
+ if process_i.validated_shapes.any():
606
+ self.saved_images_list.append(process_i.validated_shapes)
607
+ self.converted_images_list.append(np.round(process_i.image).astype(np.uint8))
608
+ self.saved_color_space_list.append(process_i.csc_dict)
609
+ self.combination_features[self.saved_csc_nb, :3] = list(process_i.csc_dict.values())[0]
610
+ self.combination_features[
611
+ self.saved_csc_nb, 3] = process_i.unaltered_concomp_nb - 1 # unaltered_cc_nb
612
+ self.combination_features[self.saved_csc_nb, 4] = process_i.shape_number # cc_nb
613
+ self.combination_features[self.saved_csc_nb, 5] = process_i.total_area # area
614
+ self.combination_features[self.saved_csc_nb, 6] = np.std(process_i.stats[1:, 2]) # width_std
615
+ self.combination_features[self.saved_csc_nb, 7] = np.std(process_i.stats[1:, 3]) # height_std
616
+ self.combination_features[self.saved_csc_nb, 8] = np.std(process_i.stats[1:, 4]) # area_std
617
+ if process_i.biomask is not None:
618
+ self.combination_features[self.saved_csc_nb, 9] = np.sum(
619
+ process_i.validated_shapes[process_i.biomask[0], process_i.biomask[1]])
620
+ if process_i.backmask is not None:
621
+ self.combination_features[self.saved_csc_nb, 10] = np.sum(
622
+ (1 - process_i.validated_shapes)[process_i.backmask[0], process_i.backmask[1]])
623
+ self.saved_csc_nb += 1
624
+
625
+ def update_current_images(self, current_combination_id: int):
626
+ """
627
+ Update the current images based on a given combination ID.
628
+
629
+ This method updates two attributes of the instance: `image` and
630
+ `validated_shapes`. The `image` attribute is set to the value of the key
631
+ "converted_image" from a dictionary in `im_combinations` which is
632
+ indexed by the provided `current_combination_id`. Similarly, the
633
+ `validated_shapes` attribute is set to the value of the key "binary_image"
634
+ from the same dictionary.
635
+
636
+ Args:
637
+ current_combination_id (int): The ID of the combination whose
638
+ images should be set as the current ones.
639
+
640
+ """
641
+ self.image = self.im_combinations[current_combination_id]["converted_image"]
642
+ self.validated_shapes = self.im_combinations[current_combination_id]["binary_image"]
643
+
644
+ def find_last_im_csc(self, concomp_nb: int, total_surfarea: int, max_shape_size: int, arenas_mask: NDArray=None,
645
+ ref_image: NDArray=None, subtract_background: NDArray=None, kmeans_clust_nb: int=None,
646
+ biomask: NDArray[np.uint8]=None, backmask: NDArray[np.uint8]=None,
647
+ color_space_dictionaries: dict=None, basic: bool=True):
648
+ """
649
+ Find the last image color space configurations that meets given criteria.
650
+
651
+ Args:
652
+ concomp_nb (int): A tuple of two integers representing the minimum and maximum number of connected components.
653
+ total_surfarea (int): The total surface area required for the image.
654
+ max_shape_size (int): The maximum shape size allowed in the image.
655
+ arenas_mask (NDArray, optional): A numpy array representing areas inside the field of interest.
656
+ ref_image (NDArray, optional): A reference image for comparison.
657
+ subtract_background (NDArray, optional): A numpy array representing the background to be subtracted.
658
+ kmeans_clust_nb (int, optional): The number of clusters for k-means clustering.
659
+ biomask (NDArray[np.uint8], optional): A binary mask for biological structures.
660
+ backmask (NDArray[np.uint8], optional): A binary mask for background areas.
661
+ color_space_dictionaries (dict, optional): Dictionaries of color space configurations.
662
+ basic (bool, optional): A flag indicating whether to process colorspaces basic.
663
+
664
+ """
665
+ logging.info(f"Start automatic detection of the last image")
666
+ self.im_combinations = []
667
+ self.saved_images_list = TList()
668
+ self.converted_images_list = TList()
669
+ self.saved_color_space_list = list()
670
+ self.saved_csc_nb = 0
671
+
672
+ if self.image.any():
673
+ if arenas_mask is None:
674
+ arenas_mask = np.ones_like(self.binary_image)
675
+ out_of_arenas = 1 - arenas_mask
676
+ self._get_all_color_spaces()
677
+ if color_space_dictionaries is None:
678
+ if basic:
679
+ colorspace_list = TList(("bgr", "lab", "hsv", "luv", "hls", "yuv"))
680
+ else:
681
+ colorspace_list = TList(("lab", "hsv"))
682
+ color_space_dictionaries = TList()
683
+ channels = np.array((1, 1, 1), dtype=np.int8)
684
+ csc_dict = TDict()
685
+ csc_dict["bgr"] = channels
686
+ color_space_dictionaries.append(csc_dict)
687
+ for i, c_space in enumerate(colorspace_list):
688
+ for i in np.arange(3):
689
+ channels = np.array((0, 0, 0), dtype=np.int8)
690
+ channels[i] = 1
691
+ csc_dict = TDict()
692
+ csc_dict[c_space] = channels
693
+ color_space_dictionaries.append(csc_dict)
694
+ if ref_image is not None:
695
+ ref_image = cv2.dilate(ref_image, cross_33)
696
+ else:
697
+ ref_image = np.ones(self.bgr.shape[:2], dtype=np.uint8)
698
+ out_of_arenas_threshold = 0.01 * out_of_arenas.sum()
699
+ self.combination_features = np.zeros((len(color_space_dictionaries) + 50, 10), dtype=np.uint32)
700
+ cc_nb_idx, area_idx, out_of_arenas_idx, in_arena_idx, surf_in_common_idx, biosum_idx, backsum_idx = 3, 4, 5, 6, 7, 8, 9
701
+ self.save_combination_thread = SaveCombinationThread(self)
702
+
703
+ # Start with a PCA:
704
+ pca_dict = TDict()
705
+ pca_dict['PCA'] = np.array([1, 1, 1], dtype=np.int8)
706
+ self.image, explained_variance_ratio, first_pc_vector = extract_first_pc(self.bgr)
707
+ self.binary_image = otsu_thresholding(self.image)
708
+ nb, shapes = cv2.connectedComponents(self.binary_image)
709
+ nb -= 1
710
+ surf = self.binary_image.sum()
711
+ outside_pixels = np.sum(self.binary_image * out_of_arenas)
712
+ inside_pixels = np.sum(self.binary_image * arenas_mask)
713
+ in_common = np.sum(ref_image * self.binary_image)
714
+ self.converted_images_list.append(self.image)
715
+ self.saved_images_list.append(self.binary_image)
716
+ self.saved_color_space_list.append(pca_dict)
717
+ self.combination_features[self.saved_csc_nb, :3] = list(pca_dict.values())[0]
718
+ self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
719
+ self.combination_features[self.saved_csc_nb, area_idx] = surf
720
+ self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
721
+ self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
722
+ self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
723
+ if biomask is not None:
724
+ self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
725
+ self.binary_image[biomask[0], biomask[1]])
726
+ if backmask is not None:
727
+ self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
728
+ (1 - self.binary_image)[backmask[0], backmask[1]])
729
+ self.saved_csc_nb += 1
730
+
731
+ potentials = TDict()
732
+ # One channel processing
733
+ for csc_dict in color_space_dictionaries:
734
+ self.image = combine_color_spaces(csc_dict, self.all_c_spaces, subtract_background)
735
+ if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
736
+ self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, self.image2, kmeans_clust_nb, biomask, backmask)
737
+ else:
738
+ self.binary_image = otsu_thresholding(self.image)
739
+ surf = np.sum(self.binary_image)
740
+ if surf < total_surfarea:
741
+ nb, shapes = cv2.connectedComponents(self.binary_image)
742
+ outside_pixels = np.sum(self.binary_image * out_of_arenas)
743
+ inside_pixels = np.sum(self.binary_image * arenas_mask)
744
+ if outside_pixels < inside_pixels:
745
+ if (nb > concomp_nb[0] - 1) and (nb < concomp_nb[1]):
746
+ in_common = np.sum(ref_image * self.binary_image)
747
+ if in_common > 0:
748
+ nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
749
+ nb -= 1
750
+ if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
751
+ c_space = list(csc_dict.keys())[0]
752
+ self.converted_images_list.append(self.image)
753
+ self.saved_images_list.append(self.binary_image)
754
+ self.saved_color_space_list.append(csc_dict)
755
+ self.combination_features[self.saved_csc_nb, :3] = csc_dict[c_space]
756
+ self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
757
+ self.combination_features[self.saved_csc_nb, area_idx] = surf
758
+ self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
759
+ self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
760
+ self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
761
+ if biomask is not None:
762
+ self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
763
+ self.binary_image[biomask[0], biomask[1]])
764
+ if backmask is not None:
765
+ self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
766
+ (1 - self.binary_image)[backmask[0], backmask[1]])
767
+ if np.isin(c_space, list(potentials.keys())):
768
+ potentials[c_space] += csc_dict[c_space]
769
+ else:
770
+ potentials[c_space] = csc_dict[c_space]
771
+ self.saved_csc_nb += 1
772
+ if len(potentials) > 0:
773
+ # All combination processing
774
+
775
+ # Add a combination of all selected channels :
776
+ self.saved_color_space_list.append(potentials)
777
+ self.image = combine_color_spaces(potentials, self.all_c_spaces, subtract_background)
778
+ if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
779
+ self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, kmeans_clust_nb=kmeans_clust_nb, biomask=biomask, backmask=backmask)
780
+ else:
781
+ self.binary_image = otsu_thresholding(self.image)
782
+ surf = self.binary_image.sum()
783
+ nb, shapes = cv2.connectedComponents(self.binary_image)
784
+ nb -= 1
785
+ outside_pixels = np.sum(self.binary_image * out_of_arenas)
786
+ inside_pixels = np.sum(self.binary_image * arenas_mask)
787
+ in_common = np.sum(ref_image * self.binary_image)
788
+ self.converted_images_list.append(self.image)
789
+ self.saved_images_list.append(self.binary_image)
790
+ self.saved_color_space_list.append(potentials)
791
+ self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
792
+ self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
793
+ self.combination_features[self.saved_csc_nb, area_idx] = surf
794
+ self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
795
+ self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
796
+ self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
797
+ if biomask is not None:
798
+ self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
799
+ self.binary_image[biomask[0], biomask[1]])
800
+ if backmask is not None:
801
+ self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
802
+ (1 - self.binary_image)[backmask[0], backmask[1]])
803
+ self.saved_csc_nb += 1
804
+ # All combination processing
805
+ # Try to remove color space one by one
806
+ i = 0
807
+ original_length = len(potentials)
808
+ while np.logical_and(len(potentials) > 1, i < original_length // 2):
809
+ color_space_to_remove = TList()
810
+ # The while loop until one col space remains or the removal of one implies a strong enough area change
811
+ previous_c_space = list(potentials.keys())[-1]
812
+ for c_space in potentials.keys():
813
+ try_potentials = potentials.copy()
814
+ try_potentials.pop(c_space)
815
+ if i > 0:
816
+ try_potentials.pop(previous_c_space)
817
+ self.image = combine_color_spaces(try_potentials, self.all_c_spaces, subtract_background)
818
+ if kmeans_clust_nb is not None and (biomask is not None or backmask is not None):
819
+ self.binary_image, self.binary_image2, self.bio_label, self.bio_label2 = kmeans(self.image, kmeans_clust_nb=kmeans_clust_nb, biomask=biomask, backmask=backmask)
820
+ else:
821
+ self.binary_image = otsu_thresholding(self.image)
822
+ surf = np.sum(self.binary_image)
823
+ if surf < total_surfarea:
824
+ nb, shapes = cv2.connectedComponents(self.binary_image)
825
+ outside_pixels = np.sum(self.binary_image * out_of_arenas)
826
+ inside_pixels = np.sum(self.binary_image * arenas_mask)
827
+ if outside_pixels < inside_pixels:
828
+ if (nb > concomp_nb[0] - 1) and (nb < concomp_nb[1]):
829
+ in_common = np.sum(ref_image * self.binary_image)
830
+ if in_common > 0:
831
+ nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.binary_image)
832
+ nb -= 1
833
+ if np.all(np.sort(stats[:, 4])[:-1] < max_shape_size):
834
+ # If a color space remove fits in the requirements, we store its values
835
+ self.converted_images_list.append(self.image)
836
+ self.saved_images_list.append(self.binary_image)
837
+ self.saved_color_space_list.append(try_potentials)
838
+ self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
839
+ self.combination_features[self.saved_csc_nb, area_idx] = surf
840
+ self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
841
+ self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
842
+ self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
843
+ if biomask is not None:
844
+ self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
845
+ self.binary_image[biomask[0], biomask[1]])
846
+ if backmask is not None:
847
+ self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
848
+ (1 - self.binary_image)[backmask[0], backmask[1]])
849
+ self.saved_csc_nb += 1
850
+ color_space_to_remove.append(c_space)
851
+ if i > 0:
852
+ color_space_to_remove.append(previous_c_space)
853
+ # If it does not (if it did not pass every "if" layers), we definitely remove that color space
854
+ previous_c_space = c_space
855
+ color_space_to_remove = np.unique(color_space_to_remove)
856
+ for remove_col_space in color_space_to_remove:
857
+ potentials.pop(remove_col_space)
858
+ i += 1
859
+ if np.logical_and(len(potentials) > 0, i > 1):
860
+ self.converted_images_list.append(self.image)
861
+ self.saved_images_list.append(self.binary_image)
862
+ self.saved_color_space_list.append(potentials)
863
+ self.combination_features[self.saved_csc_nb, :3] = list(potentials.values())[0]
864
+ self.combination_features[self.saved_csc_nb, cc_nb_idx] = nb
865
+ self.combination_features[self.saved_csc_nb, area_idx] = surf
866
+ self.combination_features[self.saved_csc_nb, out_of_arenas_idx] = outside_pixels
867
+ self.combination_features[self.saved_csc_nb, in_arena_idx] = inside_pixels
868
+ self.combination_features[self.saved_csc_nb, surf_in_common_idx] = in_common
869
+ if biomask is not None:
870
+ self.combination_features[self.saved_csc_nb, biosum_idx] = np.sum(
871
+ self.binary_image[biomask[0], biomask[1]])
872
+ if backmask is not None:
873
+ self.combination_features[self.saved_csc_nb, backsum_idx] = np.sum(
874
+ (1 - self.binary_image)[backmask[0], backmask[1]])
875
+ self.saved_csc_nb += 1
876
+
877
+ self.combination_features = self.combination_features[:self.saved_csc_nb, :]
878
+ # Among all potentials, select the best one, according to criterion decreasing in importance
879
+ cc_efficiency_order = np.argsort(self.combination_features[:, surf_in_common_idx] + self.combination_features[:, in_arena_idx] - self.combination_features[:, out_of_arenas_idx])
880
+
881
+ # Save and return a dictionnary containing the selected color space combinations
882
+ # and their corresponding binary images
883
+ self.im_combinations = []
884
+ for saved_csc in cc_efficiency_order:
885
+ if len(self.saved_color_space_list[saved_csc]) > 0:
886
+ self.im_combinations.append({})
887
+ self.im_combinations[len(self.im_combinations) - 1]["csc"] = {}
888
+ self.im_combinations[len(self.im_combinations) - 1]["csc"]['logical'] = 'None'
889
+ for k, v in self.saved_color_space_list[saved_csc].items():
890
+ self.im_combinations[len(self.im_combinations) - 1]["csc"][k] = v
891
+ self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = self.saved_images_list[saved_csc]
892
+ self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = np.round(self.converted_images_list[
893
+ saved_csc]).astype(np.uint8)
894
+ self.saved_color_space_list = []
895
+ self.saved_images_list = None
896
+ self.converted_images_list = None
897
+ self.combination_features = None
898
+
899
+ def network_detection(self, arenas_mask: NDArray=None, pseudopod_min_size: int=50, csc_dict: dict=None, biomask=None, backmask=None):
900
+ """
901
+ Network Detection Function
902
+
903
+ Perform network detection and pseudopod analysis on an image.
904
+
905
+ Parameters
906
+ ----------
907
+ arenas_mask : NDArray, optional
908
+ The mask indicating the arena regions in the image.
909
+ pseudopod_min_size : int, optional
910
+ The minimum size for pseudopods to be detected.
911
+ csc_dict : dict, optional
912
+ A dictionary containing color space conversion parameters. If None,
913
+ defaults to {'bgr': np.array((1, 1, 1), np.int8), 'logical': 'None'}
914
+ biomask : NDArray, optional
915
+ The mask for biological objects in the image.
916
+ backmask : NDArray, optional
917
+ The background mask.
918
+
919
+ Notes
920
+ -----
921
+ This function modifies the object's state by setting `self.im_combinations`
922
+ with the results of network detection and pseudopod analysis.
923
+ """
924
+ logging.info(f"Start automatic detection of network(s) in the last image")
925
+ if len(self.bgr.shape) == 3:
926
+ if csc_dict is None:
927
+ csc_dict = {'bgr': np.array((1, 1, 1), np.int8), 'logical': 'None'}
928
+ self._get_all_color_spaces()
929
+ # csc_dict = translate_dict(csc_dict)
930
+ # self.image = combine_color_spaces(csc_dict, self.all_c_spaces)
931
+ first_dict, second_dict, c_spaces = split_dict(csc_dict)
932
+ self.image, _, _, first_pc_vector = generate_color_space_combination(self.bgr, c_spaces, first_dict, second_dict, all_c_spaces=self.all_c_spaces)
933
+ # if first_pc_vector is not None:
934
+ # csc_dict = {"bgr": first_pc_vector, "logical": 'None'}
935
+ greyscale = self.image
936
+ NetDet = NetworkDetection(greyscale, possibly_filled_pixels=arenas_mask)
937
+ NetDet.get_best_network_detection_method()
938
+ lighter_background = NetDet.greyscale_image[arenas_mask > 0].mean() < NetDet.greyscale_image[arenas_mask== 0].mean()
939
+ NetDet.detect_pseudopods(lighter_background, pseudopod_min_size=pseudopod_min_size, only_one_connected_component=False)
940
+ NetDet.merge_network_with_pseudopods()
941
+ cc_efficiency_order = np.argsort(NetDet.quality_metrics)
942
+ self.im_combinations = []
943
+ for _i in cc_efficiency_order:
944
+ res_i = NetDet.all_results[_i]
945
+ self.im_combinations.append({})
946
+ self.im_combinations[len(self.im_combinations) - 1]["csc"] = csc_dict
947
+ self.im_combinations[len(self.im_combinations) - 1]["converted_image"] = bracket_to_uint8_image_contrast(res_i['filtered'])
948
+ self.im_combinations[len(self.im_combinations) - 1]["binary_image"] = res_i['binary']
949
+ self.im_combinations[len(self.im_combinations) - 1]['filter_spec']= {'filter1_type': res_i['filter'], 'filter1_param': [np.min(res_i['sigmas']), np.max(res_i['sigmas'])], 'filter2_type': "", 'filter2_param': [1., 1.]}
950
+ self.im_combinations[len(self.im_combinations) - 1]['rolling_window']= res_i['rolling_window']
951
+
952
+ def get_crop_coordinates(self):
953
+ """
954
+ Get the crop coordinates for image processing.
955
+
956
+ This function projects the image on both x and y axes to detect rows
957
+ and columns of arenas, calculates the boundaries for cropping,
958
+ and determines if the arenas are zigzagged.-
959
+
960
+ """
961
+ logging.info("Project the image on the y axis to detect rows of arenas")
962
+ self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
963
+ logging.info("Project the image on the x axis to detect columns of arenas")
964
+ self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
965
+ logging.info("Get crop coordinates using the get_crop_coordinates method of OneImageAnalysis class")
966
+ row_number = len(np.nonzero(self.y_boundaries)[0]) // 2
967
+ col_number = len(np.nonzero(self.x_boundaries)[0]) // 2
968
+ are_zigzag = None
969
+ if col_number > 0 and row_number > 0:
970
+ if (x_max_sum / col_number) * 2 < (y_max_sum / row_number):
971
+ are_zigzag = "columns"
972
+ elif (x_max_sum / col_number) > (y_max_sum / row_number) * 2:
973
+ are_zigzag = "rows"
974
+ # here automatically determine if are zigzag
975
+ x_boundary_number = (self.x_boundaries == 1).sum()
976
+ if x_boundary_number > 1:
977
+ if x_boundary_number < 4:
978
+ x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
979
+ else:
980
+ if are_zigzag == "columns":
981
+ x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0][::2]))) // 2
982
+ else:
983
+ x_interval = np.absolute(np.max(np.diff(np.where(self.x_boundaries == 1)[0]))) // 2
984
+ cx_min = np.where(self.x_boundaries == - 1)[0][0] - x_interval.astype(int)
985
+ cx_max = np.where(self.x_boundaries == 1)[0][col_number - 1] + x_interval.astype(int)
986
+ if cx_min < 0: cx_min = 0
987
+ if cx_max > len(self.x_boundaries): cx_max = len(self.x_boundaries) - 1
988
+ else:
989
+ cx_min = 0
990
+ cx_max = len(self.x_boundaries)# - 1
991
+
992
+ y_boundary_number = (self.y_boundaries == 1).sum()
993
+ if y_boundary_number > 1:
994
+ if y_boundary_number < 4:
995
+ y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
996
+ else:
997
+ if are_zigzag == "rows":
998
+ y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0][::2]))) // 2
999
+ else:
1000
+ y_interval = np.absolute(np.max(np.diff(np.where(self.y_boundaries == 1)[0]))) // 2
1001
+ cy_min = np.where(self.y_boundaries == - 1)[0][0] - y_interval.astype(int)
1002
+ cy_max = np.where(self.y_boundaries == 1)[0][row_number - 1] + y_interval.astype(int)
1003
+ if cy_min < 0: cy_min = 0
1004
+ if cy_max > len(self.y_boundaries): cy_max = len(self.y_boundaries) - 1
1005
+ else:
1006
+ cy_min = 0
1007
+ cy_max = len(self.y_boundaries)# - 1
1008
+
1009
+ self.crop_coord = [cy_min, cy_max, cx_min, cx_max]
1010
+
1011
+ def projection_to_get_peaks_boundaries(self, axis: int) -> Tuple[NDArray, int]:
1012
+ """
1013
+
1014
+ Projection to get peaks' boundaries.
1015
+
1016
+ Calculate the projection of an array along a specified axis and
1017
+ identify the boundaries of non-zero peaks.
1018
+
1019
+ Args:
1020
+ axis: int,
1021
+ The axis along which to calculate the projection and identify
1022
+ peaks' boundaries.
1023
+
1024
+ Returns:
1025
+ Tuple[NDArray, int]:
1026
+ A tuple containing two elements: an array representing the slopes
1027
+ of peaks' boundaries and an integer representing the maximum sum
1028
+ along the specified axis.
1029
+
1030
+ """
1031
+ sums = np.sum(self.validated_shapes, axis)
1032
+ slopes = np.greater(sums, 0)
1033
+ slopes = np.append(0, np.diff(slopes))
1034
+ coord = np.nonzero(slopes)[0]
1035
+ for ci in np.arange(len(coord)):
1036
+ if ci % 2 == 0:
1037
+ slopes[coord[ci]] = - 1
1038
+ return slopes, sums.max()
1039
+
1040
+ def automatically_crop(self, crop_coord):
1041
+ """
1042
+ Automatically crops the image using the given crop coordinates.
1043
+
1044
+ This method crops various attributes of the image such as the main image,
1045
+ binary image, and color spaces. It also updates internal states related to
1046
+ cropping.
1047
+
1048
+ Args:
1049
+ crop_coord (tuple): The coordinates for cropping in the format
1050
+ (start_y, end_y, start_x, end_x), representing the bounding box region
1051
+ to crop from the image.
1052
+
1053
+ """
1054
+ if not self.cropped and crop_coord is not None:
1055
+ logging.info("Crop using the automatically_crop method of OneImageAnalysis class")
1056
+ self.cropped = True
1057
+ self.image = self.image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1058
+ self.bgr = deepcopy(self.bgr[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...])
1059
+ self._get_all_color_spaces()
1060
+ if self.im_combinations is not None:
1061
+ for i in np.arange(len(self.im_combinations)):
1062
+ self.im_combinations[i]["binary_image"] = self.im_combinations[i]["binary_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1063
+ self.im_combinations[i]["converted_image"] = self.im_combinations[i]["converted_image"][crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1064
+ self.binary_image = self.binary_image[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1065
+ if self.greyscale is not None:
1066
+ self.greyscale = self.greyscale[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1067
+ if self.greyscale2 is not None:
1068
+ self.greyscale2 = self.greyscale2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1069
+ if self.image2 is not None:
1070
+ self.image2 = self.image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1071
+ if self.binary_image2 is not None:
1072
+ self.binary_image2 = self.binary_image2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1073
+ if self.subtract_background is not None:
1074
+ self.subtract_background = self.subtract_background[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1075
+ if self.subtract_background2 is not None:
1076
+ self.subtract_background2 = self.subtract_background2[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3], ...]
1077
+ self.validated_shapes = self.validated_shapes[crop_coord[0]:crop_coord[1], crop_coord[2]:crop_coord[3]]
1078
+
1079
+ self.y_boundaries, y_max_sum = self.projection_to_get_peaks_boundaries(axis=1)
1080
+ self.x_boundaries, x_max_sum = self.projection_to_get_peaks_boundaries(axis=0)
1081
+
1082
+