cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,272 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains functions to convert bgr images into grayscale and grayscale images into binary
4
+ """
5
+ import numpy as np
6
+ import cv2
7
+ from tqdm import tqdm
8
+ from numba.typed import Dict as TDict
9
+ from numba import njit
10
+ from cellects.utils.utilitarian import less_along_first_axis, greater_along_first_axis, translate_dict
11
+ from cellects.utils.formulas import bracket_to_uint8_image_contrast
12
+ from cellects.image_analysis.morphological_operations import get_largest_connected_component
13
+ from skimage.measure import perimeter
14
+ from scipy.optimize import minimize
15
+ from skimage.filters import frangi, sato, threshold_otsu
16
+
17
+
18
+ def get_color_spaces(bgr_image, space_names=""):
19
+ """
20
+ Create a typed dictonary containing the bgr image converted into:
21
+ lab, hsv, luv, hls and yuv
22
+ :param bgr_image: 3D matrix of a bgr image, the two first dims are coordinates, the last is color.
23
+ :return: dict[str, float64]
24
+ """
25
+ if 'logical' in space_names:
26
+ space_names.pop(np.nonzero(np.array(space_names, dtype=str) == 'logical')[0][0])
27
+ c_spaces = TDict()
28
+ c_spaces['bgr'] = bgr_image.astype(np.float64)
29
+ if len(space_names) == 0:
30
+ c_spaces['lab'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LAB).astype(np.float64)
31
+ c_spaces['hsv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float64)
32
+ c_spaces['luv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LUV).astype(np.float64)
33
+ c_spaces['hls'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HLS).astype(np.float64)
34
+ c_spaces['yuv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YUV).astype(np.float64)
35
+ else:
36
+ if np.isin('lab', space_names):
37
+ c_spaces['lab'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LAB).astype(np.float64)
38
+ if np.isin('hsv', space_names):
39
+ c_spaces['hsv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float64)
40
+ if np.isin('luv', space_names):
41
+ c_spaces['luv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2LUV).astype(np.float64)
42
+ if np.isin('hls', space_names):
43
+ c_spaces['hls'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HLS).astype(np.float64)
44
+ if np.isin('yuv', space_names):
45
+ c_spaces['yuv'] = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2YUV).astype(np.float64)
46
+ return c_spaces
47
+
48
+
49
+ @njit()
50
+ def combine_color_spaces(c_space_dict, all_c_spaces, subtract_background=None):
51
+ """
52
+ Compute a linear combination of some channels of some color spaces.
53
+ Subtract background if needed.
54
+ Standardize values in the image so that they range between 0 and 255
55
+
56
+ :param c_space_dict: The linear combination of channels to compute
57
+ :param all_c_spaces: All converted versions of the image
58
+ :type all_c_spaces: TDict[str: =np.float64]
59
+ :param subtract_background: array of the background to subtract
60
+ :return: the grayscale image resulting from the linear combination of the selected channels
61
+ """
62
+ image = np.zeros((all_c_spaces['bgr'].shape[0], all_c_spaces['bgr'].shape[1]), dtype=np.float64)
63
+ for space, channels in c_space_dict.items():
64
+ image += c_space_dict[space][0] * all_c_spaces[space][:, :, 0] + c_space_dict[space][1] * \
65
+ all_c_spaces[space][:, :, 1] + c_space_dict[space][2] * all_c_spaces[space][:, :, 2]
66
+ if subtract_background is not None:
67
+ # add (resp. subtract) the most negative (resp. smallest) value to the whole matrix to get a min = 0
68
+ image -= np.min(image)
69
+ # Make analysable this image by bracketing its values between 0 and 255 and converting it to uint8
70
+ max_im = np.max(image)
71
+ if max_im != 0:
72
+ image = 255 * (image / np.max(image))
73
+ if image.sum() > subtract_background.sum():
74
+ image -= subtract_background
75
+ else:
76
+ image = subtract_background - image
77
+ # add (resp. subtract) the most negative (resp. smallest) value to the whole matrix to get a min = 0
78
+ image -= np.min(image)
79
+ # Make analysable this image by bracketing its values between 0 and 255 and converting it to uint8
80
+ max_im = np.max(image)
81
+ if max_im != 0:
82
+ image = 255 * (image / max_im)
83
+ return image
84
+ # c_space_dict=first_dict; all_c_spaces=self.all_c_spaces; subtract_background=background
85
+
86
+
87
+ def generate_color_space_combination(bgr_image, c_spaces, first_dict, second_dict={}, background=None, background2=None, convert_to_uint8=False):
88
+ all_c_spaces = get_color_spaces(bgr_image, c_spaces)
89
+ try:
90
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
91
+ except:
92
+ first_dict = translate_dict(first_dict)
93
+ greyscale_image = combine_color_spaces(first_dict, all_c_spaces, background)
94
+ if convert_to_uint8:
95
+ greyscale_image = bracket_to_uint8_image_contrast(greyscale_image)
96
+ greyscale_image2 = None
97
+ if len(second_dict) > 0:
98
+ greyscale_image2 = combine_color_spaces(second_dict, all_c_spaces, background2)
99
+ if convert_to_uint8:
100
+ greyscale_image2 = bracket_to_uint8_image_contrast(greyscale_image2)
101
+ return greyscale_image, greyscale_image2
102
+
103
+
104
+ def filter_mexican_hat(image):
105
+ """
106
+ Create a mexican hat filter.
107
+ Other filters are:
108
+ - GaussianBlur(image, (size, size), BORDER_DEFAULT)
109
+ - medianBlur(image, ksize=size)
110
+ - Sharpen an image = cv2.filter2D(image, -1, np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]))
111
+ - A blurring that conserve edges = bilateralFilter(image, d=size, sigmaColor=sigma, sigmaSpace=sigma)# 5, 75, 75 9, 150, 150
112
+ - Extract edges = Laplacian(image, ddepth=depth, ksize=(size, size))
113
+ :param image:
114
+ :type image: uint8
115
+ :return: the filtered image
116
+ :rtype image: uint8
117
+ """
118
+ return cv2.filter2D(image, -1, np.array(
119
+ [[0, 0, -1, 0, 0], [0, -1, -2, -1, 0], [-1, -2, 16, -2, -1], [0, -1, -2, -1, 0], [0, 0, -1, 0, 0]]))
120
+
121
+
122
+ @njit()
123
+ def get_otsu_threshold(image):
124
+ """
125
+ Compute the otso threshold of an image. Function from Anastasia, see:
126
+ https://github.com/spmallick/learnopencv/blob/master/otsu-method/otsu_implementation.py
127
+
128
+ :param image:
129
+ :return:
130
+ """
131
+ # Set total number of bins in the histogram
132
+ bins_num = 256
133
+
134
+ # Get the image histogram
135
+ hist, bin_edges = np.histogram(image, bins=bins_num)
136
+
137
+ # Calculate centers of bins
138
+ bin_mids = (bin_edges[:-1] + bin_edges[1:]) / 2.
139
+
140
+ # Iterate over all thresholds (indices) and get the probabilities w1(t), w2(t)
141
+ weight1 = np.cumsum(hist)
142
+ weight2 = np.cumsum(hist[::-1])[::-1]
143
+
144
+ # Get the class means mu0(t)
145
+ mean1 = np.cumsum(hist * bin_mids) / weight1
146
+ # Get the class means mu1(t)
147
+ mean2 = (np.cumsum((hist * bin_mids)[::-1]) / weight2[::-1])[::-1]
148
+
149
+ inter_class_variance = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
150
+
151
+ # Maximize the inter_class_variance function val
152
+ index_of_max_val = np.argmax(inter_class_variance)
153
+
154
+ threshold = bin_mids[:-1][index_of_max_val]
155
+ return threshold
156
+
157
+
158
+ @njit()
159
+ def otsu_thresholding(image):
160
+ """
161
+ Segment a grayscale image into a binary image using otsu thresholding
162
+
163
+ Contrary to cv2.threshold(image, 0, 1, cv2.THRESH_OTSU),
164
+ This method does not require image to be uint8.
165
+ Hence, does not require any rounding if image has been =np.float64.
166
+ Consequently, the binary image obtained contains less noise:
167
+ :param image: Image of any type and any dimension
168
+ :return: a uint8 binary image in which 1 are less numerous than 0
169
+ --> A usual assumption for segmentation, especially for the first image of
170
+ the time lapse of a growing cell.
171
+ :param image:
172
+ :return:
173
+ """
174
+ threshold = get_otsu_threshold(image)
175
+ binary_image = (image > threshold)
176
+ binary_image2 = np.logical_not(binary_image)
177
+ if binary_image.sum() < binary_image2.sum():
178
+ return binary_image.astype(np.uint8)
179
+ else:
180
+ return binary_image2.astype(np.uint8)
181
+
182
+
183
+ @njit()
184
+ def segment_with_lum_value(converted_video, basic_bckgrnd_values, l_threshold, lighter_background):
185
+ """
186
+ Use an uint8 value as a threshold to segment the image: split it into two categories, 0 and 1
187
+ :param converted_video: a 3D matrix with time, y_coord, x_coord
188
+ :param basic_bckgrnd_values: a vector of typical background values of each frame (t)
189
+ :param l_threshold: an average luminosity threshold
190
+ :param lighter_background: True if the background of the image is lighter than the shape to detect
191
+ :type lighter_background: bool
192
+ :return: the resulting video of the segmentation, a vector of the luminosity threshold of each frame (t)
193
+ """
194
+ # segmentation = None
195
+ if lighter_background:
196
+ l_threshold_over_time = l_threshold - (basic_bckgrnd_values[-1] - basic_bckgrnd_values)
197
+ if np.all(np.logical_and(0 <= l_threshold_over_time, l_threshold_over_time <= 255)):
198
+ segmentation = less_along_first_axis(converted_video, l_threshold_over_time)
199
+ else:
200
+ segmentation = np.zeros_like(converted_video)
201
+ if l_threshold > 255:
202
+ l_threshold = 255
203
+ segmentation += converted_video > l_threshold
204
+ else:
205
+ l_threshold_over_time = l_threshold - (basic_bckgrnd_values[-1] - basic_bckgrnd_values)
206
+ if np.all(np.logical_and(0 <= l_threshold_over_time, l_threshold_over_time <= 255)):
207
+ segmentation = greater_along_first_axis(converted_video, l_threshold_over_time)
208
+ else:
209
+ segmentation = np.zeros_like(converted_video)
210
+ if l_threshold > 255:
211
+ l_threshold = 255
212
+ segmentation += converted_video > l_threshold
213
+ return segmentation, l_threshold_over_time
214
+
215
+
216
+ def _network_perimeter(threshold, img):
217
+ binary_img = img > threshold
218
+ return -perimeter(binary_img)
219
+
220
+
221
+ def rolling_window_segmentation(greyscale_image, possibly_filled_pixels, patch_size=(80, 80)):
222
+ patch_centers = [
223
+ np.floor(np.linspace(
224
+ p // 2, s - p // 2, int(np.ceil(s / (p // 2))) - 1
225
+ )).astype(int)
226
+ for s, p in zip(greyscale_image.shape, patch_size)
227
+ ]
228
+ patch_centers = np.transpose(np.meshgrid(*patch_centers), (1, 2, 0)).reshape((-1, 2))
229
+
230
+ patch_slices = [
231
+ tuple(slice(c - p // 2, c + p // 2, 1)
232
+ for c, p in zip(p_c, patch_size)) for p_c in patch_centers
233
+ ]
234
+ maximize_parameter = False
235
+
236
+ network_patches = []
237
+ patch_thresholds = []
238
+ for patch in tqdm(patch_slices):
239
+ v = greyscale_image[patch] * possibly_filled_pixels[patch]
240
+ if v.max() > 0 and np.ptp(v) > 0.5:
241
+ t = threshold_otsu(v)
242
+
243
+ if maximize_parameter:
244
+ res = minimize(_network_perimeter, x0=t, args=(v,), method='Nelder-Mead')
245
+ t = res.x[0]
246
+
247
+ network_patches.append(v > t)
248
+ patch_thresholds.append(t)
249
+ else:
250
+ network_patches.append(np.zeros_like(v))
251
+ patch_thresholds.append(0)
252
+
253
+ network_img = np.zeros_like(greyscale_image)
254
+ count_img = np.zeros_like(greyscale_image)
255
+ for patch, network_patch, t in zip(patch_slices, network_patches, patch_thresholds):
256
+ network_img[patch] += network_patch
257
+ count_img[patch] += np.ones_like(network_patch)
258
+ network_img /= count_img
259
+ return network_img
260
+
261
+ def binary_quality_index(binary_img):
262
+ from cellects.image_analysis.shape_descriptors import ShapeDescriptors
263
+
264
+ if np.any(binary_img):
265
+ # SD = ShapeDescriptors(binary_img, ["euler_number"])
266
+ # index = - SD.descriptors['euler_number']
267
+ nb, largest_cc = get_largest_connected_component(binary_img)
268
+ index = np.square(perimeter(largest_cc)) / binary_img.sum()
269
+ # index = (largest_cc.sum() * perimeter(largest_cc)) / binary_img.sum()
270
+ else:
271
+ index = 0.
272
+ return index