cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,679 @@
1
+ """
2
+ This class uses the first (and if is required more accuracy, other) image(s) to detect the contour of the arenas of
3
+ one experiment and use that information to create videos of smaller size (to reduce RAM usage) and save them as
4
+ .npy files on hard drive. Along the process, Cellects checks whether there is enough RAM, split the work when needed,
5
+ and warn the user through a thread message that displays in the interface
6
+
7
+ This class contains methods to automatically detect arenas from specimens detected in an image at the beginning of an experiment.
8
+ Arenas can be delimited automatically or manually. Cellects includes two automatic algorithms: A fast one to be used when arenas are symmetric around the initial position of the specimens or sufficiently far from each other, and a slower one to be used otherwise. These automatic algorithms work even if the arenas are not detectable in the images, but only work when there is a single individual in each arena. In the case of manual delimitation, the user draws each arena by holding down the mouse button. The following paragraphs describe the two automatic algorithms.
9
+ The fast algorithm computes each arena coordinate using the distances between the components detected in the seed image after step 1. For each component, Cellects finds its nearest neighbor and uses its distance as the side of the square, centered on the component, giving the x and y limits of the arena.
10
+ If the initial position of the cells do not provide good estimates of the center of each arena, Cellects can use the slower algorithm to find them. Because Cellects is intended to be very general, it cannot use specific characteristics of a particular arena to find its edges. Instead, it uses the motion and/or growth of the cell to infer the position of each arena. To do so, Cellects segments a sample of 5 images (equally spaced in time) using the same algorithm as for the seed image. Even if this segmentation is not accurate, the following algorithm finds the arenas robustly. First, it finds a rough estimate of the expected position of the cell. To do this, it dilates the cell in the first frame, until the edge of the dilated image is closer to the nearest centroid of other cells than to its own centroid. Then, it moves to the second image, and also dilates it in order to link together different disconnected components that may result from an inaccurate segmentation. Then, it performs an AND operation between these two dilated images and dilates the result so that it remains one component per arena. By doing this to all cells, we get an estimate of their shape in the second frame, and we can compute their centroids. We then repeat this procedure, for each pair of consecutive frames. Finally, Cellects computes the bounding boxes that contain the cells detected in the 5 frames for each arena, and uses them to estimate each arena coordinate.
11
+ In some experiments, all cells are located at one edge of the arena and move roughly in the same direction. Cellects includes an option to take advantage of this regularity and improve the accuracy of arena detection: Once the centroids of a frame have been estimated (as described above), Cellects finds the centroid with highest displacement with respect to the previous frame, and applies the same displacement to all centroids.
12
+
13
+ It also contains methods to write videos (as np arrays .npy files) corresponding to the pixels delimited by these arenas.
14
+ """
15
+
16
+ import os
17
+ import logging
18
+ from copy import deepcopy
19
+ import numpy as np
20
+ import cv2
21
+ import psutil
22
+
23
+ from cellects.image_analysis.morphological_operations import cross_33, Ellipse, get_minimal_distance_between_2_shapes, get_every_coord_between_2_points, rank_from_top_to_bottom_from_left_to_right, expand_until_neighbor_center_gets_nearer_than_own
24
+ from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
25
+
26
+
27
+ class OneVideoPerBlob:
28
+ """
29
+ This class finds the bounding box containing all pixels covered by one blob over time
30
+ and create a video from it.
31
+ It does that, for each blob, considering a few information.
32
+ """
33
+
34
+ def __init__(self, first_image, starting_blob_hsize_in_pixels, raw_images):
35
+ """
36
+
37
+ """
38
+ # Initialize all variables used in the following methods
39
+ self.first_image = first_image
40
+ self.original_shape_hsize = starting_blob_hsize_in_pixels
41
+ self.raw_images = raw_images
42
+ if self.original_shape_hsize is not None:
43
+ self.k_size = int(((self.original_shape_hsize // 5) * 2) + 1)
44
+
45
+ # 7) Create required empty arrays: especially the bounding box coordinates of each video
46
+ self.ordered_first_image = None
47
+ self.motion_list = list()
48
+ self.shapes_to_remove = None
49
+ self.not_analyzed_individuals = None
50
+
51
+ def get_bounding_boxes(self, are_gravity_centers_moving, img_list, color_space_combination, color_number=2, sample_size=5, all_specimens_have_same_direction=True, display=False):
52
+ logging.info("Get the coordinates of all arenas using the get_bounding_boxes method of the VideoMaker class")
53
+ # are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1; img_list=self.data_list; color_space_combination=self.vars['convert_for_origin']; color_number=self.vars["color_number"]; sample_size=5
54
+
55
+ self.big_kernel = Ellipse((self.k_size, self.k_size)).create() # fromfunction(self.circle_fun, (self.k_size, self.k_size))
56
+ self.big_kernel = self.big_kernel.astype(np.uint8)
57
+ self.small_kernel = np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0)), dtype=np.uint8)
58
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
59
+ self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
60
+ self.unchanged_ordered_fimg = deepcopy(self.ordered_first_image)
61
+ self.modif_validated_shapes = deepcopy(self.first_image.validated_shapes)
62
+ self.standard = - 1
63
+ counter = 0
64
+ while np.any(np.less(self.standard, 0)) and counter < 20:
65
+ counter += 1
66
+ self.left = np.zeros(self.first_image.shape_number, dtype=np.int64)
67
+ self.right = np.repeat(self.modif_validated_shapes.shape[1], self.first_image.shape_number)
68
+ self.top = np.zeros(self.first_image.shape_number, dtype=np.int64)
69
+ self.bot = np.repeat(self.modif_validated_shapes.shape[0], self.first_image.shape_number)
70
+ if are_gravity_centers_moving:
71
+ self.get_bb_with_moving_centers(img_list, color_space_combination, color_number, sample_size, all_specimens_have_same_direction, display)
72
+ # new:
73
+ new_ordered_first_image = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
74
+ #
75
+ for i in np.arange(1, self.first_image.shape_number + 1):
76
+ previous_shape = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
77
+ previous_shape[np.nonzero(self.unchanged_ordered_fimg == i)] = 1
78
+ new_potentials = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
79
+ new_potentials[np.nonzero(self.ordered_first_image == i)] = 1
80
+ new_potentials[np.nonzero(self.unchanged_ordered_fimg == i)] = 0
81
+
82
+ pads = ProgressivelyAddDistantShapes(new_potentials, previous_shape, max_distance=2)
83
+ pads.consider_shapes_sizes(min_shape_size=10)
84
+ pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=False)
85
+ new_ordered_first_image[np.nonzero(pads.expanded_shape)] = i
86
+ self.ordered_first_image = new_ordered_first_image
87
+ self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
88
+ self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
89
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
90
+ self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
91
+ self.get_quick_bb()
92
+ # self.print_bounding_boxes()
93
+ else:
94
+ self.get_quick_bb()
95
+ self.standardize_video_sizes()
96
+ if counter == 20:
97
+ self.top[self.top < 0] = 1
98
+ self.bot[self.bot >= self.ordered_first_image.shape[0] - 1] = self.ordered_first_image.shape[0] - 2
99
+ self.left[self.left < 0] = 1
100
+ self.right[self.right >= self.ordered_first_image.shape[1] - 1] = self.ordered_first_image.shape[1] - 2
101
+
102
+
103
+ def get_quick_bb(self):
104
+ """
105
+ Compute euclidean distance between cell(s) to get each arena bounding box
106
+ To earn computation time:
107
+ 1) We use triu_indices to consider one time each pairwise distance
108
+ 2) We only compute distances when x and y distances are small enough
109
+ (i.e. 3 * the minimal distance already calculated)
110
+
111
+ :return:
112
+ """
113
+ from timeit import default_timer
114
+ tic = default_timer()
115
+ shapes = deepcopy(self.modif_validated_shapes)
116
+ eroded_shapes = cv2.erode(self.modif_validated_shapes, cross_33)
117
+ shapes = shapes - eroded_shapes
118
+ x_min = self.ordered_stats[:, 0]
119
+ y_min = self.ordered_stats[:, 1]
120
+ x_max = self.ordered_stats[:, 0] + self.ordered_stats[:, 2]
121
+ y_max = self.ordered_stats[:, 1] + self.ordered_stats[:, 3]
122
+ x_min_dist = shapes.shape[1]
123
+ y_min_dist = shapes.shape[0]
124
+
125
+ shapes *= self.ordered_first_image
126
+ shape_nb = (len(np.unique(shapes)) - 1)
127
+ i = 0
128
+ a_indices, b_indices = np.triu_indices(shape_nb, 1)
129
+ a_indices, b_indices = a_indices + 1, b_indices + 1
130
+ all_distances = np.zeros((len(a_indices), 3), dtype=float)
131
+ # For every pair of components, find the minimal distance
132
+ for (a, b) in zip(a_indices, b_indices):
133
+ x_dist = np.absolute(x_max[a - 1] - x_min[b - 1])
134
+ y_dist = np.absolute(y_max[a - 1] - y_min[b - 1])
135
+ if x_dist < 2 * x_min_dist and y_dist < 2 * y_min_dist:
136
+ sub_shapes = np.logical_or(shapes == a, shapes == b) * shapes
137
+ sub_shapes = sub_shapes[np.min((y_min[a - 1], y_min[b - 1])):np.max((y_max[a - 1], y_max[b - 1])),
138
+ np.min((x_min[a - 1], x_min[b - 1])):np.max((x_max[a - 1], x_max[b - 1]))]
139
+ sub_shapes[sub_shapes == a] = 1
140
+ sub_shapes[sub_shapes == b] = 2
141
+ if np.any(sub_shapes == 1) and np.any(sub_shapes == 2):
142
+ all_distances[i, :] = a, b, get_minimal_distance_between_2_shapes(sub_shapes, False)
143
+
144
+ if x_dist > y_dist:
145
+ x_min_dist = np.min((x_min_dist, x_dist))
146
+ else:
147
+ y_min_dist = np.min((y_min_dist, y_dist))
148
+ i += 1
149
+ for shape_i in np.arange(1, shape_nb + 1):
150
+ # Get where the shape i appear in pairwise comparisons
151
+ idx = np.nonzero(np.logical_or(all_distances[:, 0] == shape_i, all_distances[:, 1] == shape_i))
152
+ # print(all_distances[idx, 2])
153
+ # Compute the minimal distance related to shape i and divide by 2
154
+ if len(all_distances[idx, 2]) > 0:
155
+ dist = all_distances[idx, 2].min() // 2
156
+ else:
157
+ dist = 1
158
+ # Save the coordinates of the arena around shape i
159
+ self.left[shape_i - 1] = x_min[shape_i - 1] - dist.astype(np.int64)
160
+ self.right[shape_i - 1] = x_max[shape_i - 1] + dist.astype(np.int64)
161
+ self.top[shape_i - 1] = y_min[shape_i - 1] - dist.astype(np.int64)
162
+ self.bot[shape_i - 1] = y_max[shape_i - 1] + dist.astype(np.int64)
163
+ print((default_timer() - tic))
164
+
165
+ def standardize_video_sizes(self):
166
+ distance_threshold_to_consider_an_arena_out_of_the_picture = None# in pixels, worked nicely with - 50
167
+
168
+ # The modifications allowing to not make videos of setups out of view, do not work for moving centers
169
+ y_diffs = self.bot - self.top
170
+ x_diffs = self.right - self.left
171
+ add_to_y = ((np.max(y_diffs) - y_diffs) / 2)
172
+ add_to_x = ((np.max(x_diffs) - x_diffs) / 2)
173
+ self.standard = np.zeros((len(self.top), 4), dtype=np.int64)
174
+ self.standard[:, 0] = self.top - np.uint8(np.floor(add_to_y))
175
+ self.standard[:, 1] = self.bot + np.uint8(np.ceil(add_to_y))
176
+ self.standard[:, 2] = self.left - np.uint8(np.floor(add_to_x))
177
+ self.standard[:, 3] = self.right + np.uint8(np.ceil(add_to_x))
178
+
179
+ # Monitor if one bounding box gets out of picture shape
180
+ out_of_pic = deepcopy(self.standard)
181
+ out_of_pic[:, 1] = self.ordered_first_image.shape[0] - out_of_pic[:, 1] - 1
182
+ out_of_pic[:, 3] = self.ordered_first_image.shape[1] - out_of_pic[:, 3] - 1
183
+
184
+ if distance_threshold_to_consider_an_arena_out_of_the_picture is None:
185
+ distance_threshold_to_consider_an_arena_out_of_the_picture = np.min(out_of_pic) - 1
186
+
187
+ # If it occurs at least one time, apply a correction, otherwise, continue and write videos
188
+ # If the overflow is strong, remove the corresponding individuals and remake bounding_box finding
189
+
190
+ if np.any(np.less(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)):
191
+ # Remove shapes
192
+ self.standard = - 1
193
+ self.shapes_to_remove = np.nonzero(np.less(out_of_pic, - 20))[0]
194
+ for shape_i in self.shapes_to_remove:
195
+ self.ordered_first_image[self.ordered_first_image == (shape_i + 1)] = 0
196
+ self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
197
+ self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
198
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
199
+ self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
200
+
201
+ self.first_image.shape_number = self.first_image.shape_number - len(self.shapes_to_remove)
202
+ self.not_analyzed_individuals = np.unique(self.unchanged_ordered_fimg -
203
+ (self.unchanged_ordered_fimg * self.modif_validated_shapes))[1:]
204
+
205
+ else:
206
+ # Reduce all box sizes if necessary and proceed
207
+ if np.any(np.less(out_of_pic, 0)):
208
+ # When the overflow is weak, remake standardization with lower "add_to_y" and "add_to_x"
209
+ overflow = np.nonzero(np.logical_and(np.less(out_of_pic, 0), np.greater_equal(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)))[0]
210
+ # Look if overflow occurs on the y axis
211
+ if np.any(np.less(out_of_pic[overflow, :2], 0)):
212
+ add_to_top_and_bot = np.min(out_of_pic[overflow, :2])
213
+ self.standard[:, 0] = self.standard[:, 0] - add_to_top_and_bot
214
+ self.standard[:, 1] = self.standard[:, 1] + add_to_top_and_bot
215
+ # Look if overflow occurs on the x axis
216
+ if np.any(np.less(out_of_pic[overflow, 2:], 0)):
217
+ add_to_left_and_right = np.min(out_of_pic[overflow, 2:])
218
+ self.standard[:, 2] = self.standard[:, 2] - add_to_left_and_right
219
+ self.standard[:, 3] = self.standard[:, 3] + add_to_left_and_right
220
+ # If x or y sizes are odd, make them even :
221
+ # Don't know why, but opencv remove 1 to odd shapes when writing videos
222
+ if (self.standard[0, 1] - self.standard[0, 0]) % 2 != 0:
223
+ self.standard[:, 1] -= 1
224
+ if (self.standard[0, 3] - self.standard[0, 2]) % 2 != 0:
225
+ self.standard[:, 3] -= 1
226
+ self.top = self.standard[:, 0]
227
+ self.bot = self.standard[:, 1]
228
+ self.left = self.standard[:, 2]
229
+ self.right = self.standard[:, 3]
230
+
231
+
232
+ def get_bb_with_moving_centers(self, img_list, color_space_combination, color_number, sample_size=2, all_specimens_have_same_direction=True, display=False):
233
+ """
234
+ Starting with the first image, this function try to make each shape grow to see if it covers segmented pixels
235
+ on following images. i.e. it segment evenly spaced images (See self.segment_blob_motion and OneImageAnalysis)
236
+ to make a rough tracking of blob motion allowing to be sure that the video will only contain the shapes that
237
+ have a chronological link with the shape as it was on the first image.
238
+
239
+ :param img_list: The whole list of image names
240
+ :type img_list: list
241
+ :param sample_size: The picture number to analyse. The higher it is, the higher bath accuracy and computation
242
+ time are
243
+ :type sample_size: int
244
+ :param all_specimens_have_same_direction: Whether all specimens move roughly in the same direction or not
245
+ :type all_specimens_have_same_direction: bool
246
+ :return: For each shapes, the coordinate of a bounding box including all shape movements
247
+ """
248
+ print("Read and segment each sample image and rank shapes from top to bot and from left to right")
249
+
250
+ self.motion_list = list()
251
+ if img_list.dtype.type is np.str_:
252
+ frame_number = len(img_list)
253
+ sample_numbers = np.floor(np.linspace(0, frame_number, sample_size)).astype(int)
254
+ for frame_idx in np.arange(sample_size):
255
+ if frame_idx == 0:
256
+ self.motion_list.insert(frame_idx, self.first_image.validated_shapes)
257
+ else:
258
+ # image_obj = OneImageAnalysis(cv2.imread(img_list[sample_numbers[image] - 1])) # image_name=img_list[10]
259
+ # image_obj.conversion(rgb_hsv_lab=[[1, 0, 0], [0, 0, 1], [0, 0, 0]])
260
+ # image_obj.automatically_crop(self.first_image.crop_coord)
261
+ # image_obj.thresholding()
262
+ # self.motion_list.insert(image, image_obj.binary_image)
263
+ if img_list.dtype.type is np.str_:
264
+ image = img_list[sample_numbers[frame_idx] - 1]
265
+ else:
266
+ image = img_list[sample_numbers[frame_idx] - 1, ...]
267
+ self.motion_list.insert(frame_idx, self.segment_blob_motion(image, color_space_combination, color_number))
268
+
269
+
270
+ self.big_kernels = Ellipse((self.k_size, self.k_size)).create().astype(np.uint8)
271
+ self.small_kernels = np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0)), dtype=np.uint8)
272
+ self.small_kernels = self.small_kernels.astype(np.uint8)
273
+
274
+ ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
275
+ self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
276
+ previous_ordered_image_i = deepcopy(self.ordered_first_image)
277
+ if img_list.dtype.type is np.str_:
278
+ img_to_display = self.read_and_rotate(img_list[sample_numbers[1] - 1], self.first_image.bgr)
279
+ else:
280
+ img_to_display = img_list[sample_numbers[1] - 1, ...]
281
+ if self.first_image.cropped:
282
+ img_to_display = img_to_display[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
283
+ self.first_image.crop_coord[2]:self.first_image.crop_coord[3], :]
284
+ print("For each frame, expand each previously confirmed shape to add area to its maximal bounding box")
285
+ for step_i in np.arange(1, sample_size):
286
+ print(step_i)
287
+
288
+ previously_ordered_centroids = deepcopy(ordered_centroids)
289
+ image_i = deepcopy(self.motion_list[step_i])
290
+ image_i = cv2.dilate(image_i, self.small_kernels, iterations=5)
291
+
292
+ # Display the segmentation result for all shapes at this frame
293
+ if img_list.dtype.type is np.str_:
294
+ img_to_display = self.read_and_rotate(img_list[sample_numbers[step_i] - 1], self.first_image.bgr)
295
+ # img_to_display = readim(img_list[sample_numbers[step_i] - 1], self.raw_images)
296
+ else:
297
+ img_to_display = img_list[sample_numbers[step_i] - 1, ...]
298
+ if self.first_image.cropped:
299
+ img_to_display = img_to_display[self.first_image.crop_coord[0]: self.first_image.crop_coord[1],
300
+ self.first_image.crop_coord[2]: self.first_image.crop_coord[3], :]
301
+
302
+ for shape_i in range(self.first_image.shape_number):
303
+ shape_to_expand = np.zeros(image_i.shape, dtype=np.uint8)
304
+ shape_to_expand[previous_ordered_image_i == (shape_i + 1)] = 1
305
+ without_shape_i = deepcopy(previous_ordered_image_i)
306
+ without_shape_i[previous_ordered_image_i == (shape_i + 1)] = 0
307
+ test_shape = expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand, without_shape_i,
308
+ ordered_centroids[shape_i, :],
309
+ np.delete(ordered_centroids, shape_i,
310
+ axis=0), self.big_kernels)
311
+ test_shape = expand_until_neighbor_center_gets_nearer_than_own(test_shape, without_shape_i,
312
+ ordered_centroids[shape_i, :],
313
+ np.delete(ordered_centroids, shape_i,
314
+ axis=0), self.small_kernels)
315
+ confirmed_shape = test_shape * image_i
316
+ previous_ordered_image_i[np.nonzero(confirmed_shape)] = shape_i + 1
317
+ # update the image by putting a purple mask around the current shape
318
+ contours, useless = cv2.findContours(confirmed_shape, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
319
+ cv2.drawContours(img_to_display, contours, -1, (255, 0, 180), 3)
320
+ if display:
321
+ imtoshow = cv2.resize(img_to_display.astype(np.uint8), (960, 540))
322
+ cv2.imshow('Rough detection', imtoshow)
323
+ cv2.waitKey(1)
324
+ if display:
325
+ cv2.destroyAllWindows()
326
+
327
+
328
+ mask_to_display = np.zeros(image_i.shape, dtype=np.uint8)
329
+ mask_to_display[np.nonzero(previous_ordered_image_i)] = 1
330
+ contours_to_display, useless = cv2.findContours(mask_to_display,
331
+ cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
332
+ cv2.drawContours(img_to_display, contours_to_display, -1, (255, 0, 0), 3)
333
+ if display:
334
+ imtoshow = cv2.resize(img_to_display.astype(np.uint8), (960, 540))
335
+ cv2.imshow('Rough detection', imtoshow)
336
+ cv2.waitKey(1)
337
+
338
+ # If the blob moves enough to drastically change its gravity center,
339
+ # update the ordered centroids at each frame.
340
+ detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
341
+ connectivity=8)
342
+ mask_to_display = mask_to_display.astype(np.uint8)
343
+ while np.logical_and(detected_shape_number - 1 != self.first_image.shape_number,
344
+ np.sum(mask_to_display > 0) < mask_to_display.size):
345
+ mask_to_display = cv2.dilate(mask_to_display, self.small_kernels, iterations=1)
346
+ detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
347
+ connectivity=8)
348
+ mask_to_display[np.nonzero(mask_to_display)] = 1
349
+ mask_to_display = mask_to_display.astype(np.uint8)
350
+ if display:
351
+ imtoshow = cv2.resize(mask_to_display * 255, (960, 540))
352
+ cv2.imshow('expansion', imtoshow)
353
+ cv2.waitKey(1)
354
+ if display:
355
+ cv2.destroyAllWindows()
356
+ ordered_stats, ordered_centroids = rank_from_top_to_bottom_from_left_to_right(mask_to_display,
357
+ self.first_image.y_boundaries)
358
+
359
+ new_ordered_centroids = ordered_centroids
360
+ if all_specimens_have_same_direction:
361
+ # Adjust each centroid position according to the maximal centroid displacement.
362
+ x_diffs = new_ordered_centroids[:, 0] - previously_ordered_centroids[:, 0]
363
+ if np.mean(x_diffs) > 0: # They moved left, we add to x
364
+ add_to_x = np.max(x_diffs) - x_diffs
365
+ else: #They moved right, we remove from x
366
+ add_to_x = np.min(x_diffs) - x_diffs
367
+ new_ordered_centroids[:, 0] = new_ordered_centroids[:, 0] + add_to_x
368
+
369
+ y_diffs = new_ordered_centroids[:, 1] - previously_ordered_centroids[:, 1]
370
+ if np.mean(y_diffs) > 0: # They moved down, we add to y
371
+ add_to_y = np.max(y_diffs) - y_diffs
372
+ else: # They moved up, we remove from y
373
+ add_to_y = np.min(y_diffs) - y_diffs
374
+ new_ordered_centroids[:, 1] = new_ordered_centroids[:, 1] + add_to_y
375
+
376
+ ordered_centroids = new_ordered_centroids
377
+
378
+ # Normalize each bounding box
379
+
380
+ for shape_i in range(self.first_image.shape_number):
381
+ shape_i_indices = np.where(previous_ordered_image_i == shape_i + 1)
382
+ self.left[shape_i] = np.min(shape_i_indices[1])
383
+ self.right[shape_i] = np.max(shape_i_indices[1])
384
+ self.top[shape_i] = np.min(shape_i_indices[0])
385
+ self.bot[shape_i] = np.max(shape_i_indices[0])
386
+ self.ordered_first_image = previous_ordered_image_i
387
+
388
+ def segment_blob_motion(self, image, color_space_combination, color_number):
389
+ if isinstance(image, str):
390
+ image = self.read_and_rotate(image, self.first_image.bgr)
391
+ # image = readim(image)
392
+ In = OneImageAnalysis(image)#, self.raw_images
393
+ In.convert_and_segment(color_space_combination, color_number, None, None, self.first_image.subtract_background,
394
+ self.first_image.subtract_background2)
395
+ # In.generate_color_space_combination(color_space_combination)
396
+ # In.thresholding()
397
+ return In.binary_image
398
+
399
+
400
+ def print_bounding_boxes(self, display_or_return=0):
401
+ imtoshow = deepcopy(self.first_image.bgr)
402
+ segments = np.zeros((2, 1), dtype=np.uint8)
403
+ for i in np.arange(self.first_image.shape_number):
404
+ j = i * 4
405
+ segments = np.append(segments, get_every_coord_between_2_points(np.array((self.top[i], self.left[i])),
406
+ np.array((self.bot[i], self.left[i]))),
407
+ axis=1)
408
+ j = j + 1
409
+ segments = np.append(segments, get_every_coord_between_2_points(np.array((self.top[i], self.right[i])),
410
+ np.array((self.bot[i], self.right[i]))),
411
+ axis=1)
412
+ j = j + 1
413
+ segments = np.append(segments, get_every_coord_between_2_points(np.array((self.top[i], self.left[i])),
414
+ np.array((self.top[i], self.right[i]))),
415
+ axis=1)
416
+ j = j + 1
417
+ segments = np.append(segments, get_every_coord_between_2_points(np.array((self.bot[i], self.left[i])),
418
+ np.array((self.bot[i], self.right[i]))),
419
+ axis=1)
420
+
421
+ text = f"{i + 1}"
422
+ position = (self.left[i] + 25, self.top[i] + (self.bot[i] - self.top[i]) // 2)
423
+ imtoshow = cv2.putText(imtoshow, # numpy array on which text is written
424
+ text, # text
425
+ position, # position at which writing has to start
426
+ cv2.FONT_HERSHEY_SIMPLEX, # font family
427
+ 1, # font size
428
+ (0, 0, 0, 255), # (209, 80, 0, 255),
429
+ 2) # font stroke
430
+
431
+ mask = np.zeros(self.first_image.validated_shapes.shape, dtype=np.uint8)
432
+ mask[segments[0], segments[1]] = 1
433
+ mask = cv2.dilate(mask, np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0)), dtype=np.uint8), iterations=3)
434
+ if display_or_return == 0:
435
+ imtoshow[mask == 1, :] = 0
436
+ imtoshow = cv2.resize(imtoshow, (2000, 1000))
437
+ cv2.imshow('Video contour', imtoshow)
438
+ cv2.waitKey(0)
439
+ cv2.destroyAllWindows()
440
+ else:
441
+ return mask
442
+
443
+
444
+ def prepare_video_writing(self, img_list, min_ram_free, in_colors=False):
445
+ #https://stackoverflow.com/questions/48672130/saving-to-hdf5-is-very-slow-python-freezing
446
+ #https://stackoverflow.com/questions/48385256/optimal-hdf5-dataset-chunk-shape-for-reading-rows/48405220#48405220
447
+ # 1) Create a list of video names
448
+ if self.not_analyzed_individuals is not None:
449
+ number_to_add = len(self.not_analyzed_individuals)
450
+ else:
451
+ number_to_add = 0
452
+ vid_names = list()
453
+ ind_i = 0
454
+ counter = 0
455
+ while ind_i < (self.first_image.shape_number + number_to_add):
456
+ ind_i += 1
457
+ while np.any(np.isin(self.not_analyzed_individuals, ind_i)):
458
+ ind_i += 1
459
+ vid_names.append("ind_" + str(ind_i) + ".npy")
460
+ counter += 1
461
+ img_nb = len(img_list)
462
+
463
+ # 2) Create a table of the dimensions of each video
464
+ # Add 10% to the necessary memory to avoid problems
465
+ necessary_memory = img_nb * np.multiply((self.bot - self.top + 1).astype(np.uint64), (self.right - self.left + 1).astype(np.uint64)).sum() * 8 * 1.16415e-10
466
+ if in_colors:
467
+ sizes = np.column_stack(
468
+ (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top + 1, self.right - self.left + 1,
469
+ np.repeat(3, self.first_image.shape_number)))
470
+ necessary_memory *= 3
471
+ else:
472
+ sizes = np.column_stack(
473
+ (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top + 1, self.right - self.left + 1))
474
+ self.use_list_of_vid = True
475
+ if np.all(sizes[0, :] == sizes):
476
+ self.use_list_of_vid = False
477
+ available_memory = (psutil.virtual_memory().available >> 30) - min_ram_free
478
+ bunch_nb = int(np.ceil(necessary_memory / available_memory))
479
+ if bunch_nb > 1:
480
+ # The program will need twice the memory to create the second bunch.
481
+ bunch_nb = int(np.ceil(2 * necessary_memory / available_memory))
482
+
483
+ video_nb_per_bunch = np.floor(self.first_image.shape_number / bunch_nb).astype(np.uint8)
484
+ analysis_status = {"continue": True, "message": ""}
485
+ try:
486
+ if self.use_list_of_vid:
487
+ video_bunch = [zeros(sizes[i, :], dtype=np.uint8) for i in range(video_nb_per_bunch)]
488
+ else:
489
+ video_bunch = np.zeros(np.append(sizes[0, :], video_nb_per_bunch), dtype=np.uint8)
490
+ except ValueError as v_err:
491
+ analysis_status = {"continue": False, "message": "Probably failed to detect the right cell(s) number, do the first image analysis manually."}
492
+ logging.error(f"{analysis_status['message']} error is: {v_err}")
493
+ # Check for available ROM memory
494
+ if (psutil.disk_usage('/')[2] >> 30) < (necessary_memory + 2):
495
+ rom_memory_required = necessary_memory + 2
496
+ else:
497
+ rom_memory_required = None
498
+ remaining = self.first_image.shape_number % bunch_nb
499
+ if remaining > 0:
500
+ bunch_nb += 1
501
+ logging.info(f"Cellects will start writing {self.first_image.shape_number} videos. Given available memory, it will do it in {bunch_nb} time(s)")
502
+ return bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining
503
+
504
+ def write_videos_as_np_arrays(self, img_list, min_ram_free, in_colors=False, reduce_image_dim=False):
505
+ #self=self.videos
506
+ #img_list = self.data_list
507
+ #min_ram_free = self.vars['min_ram_free']
508
+ #in_colors = not self.vars['already_greyscale']
509
+
510
+ bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining = self.prepare_video_writing(img_list, min_ram_free, in_colors)
511
+ for bunch in np.arange(bunch_nb):
512
+ print(f'\nSaving the bunch n: {bunch + 1} / {bunch_nb} of videos:', end=' ')
513
+ if bunch == (bunch_nb - 1) and remaining > 0:
514
+ arena = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining)
515
+ else:
516
+ arena = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch)
517
+ if self.use_list_of_vid:
518
+ video_bunch = [zeros(sizes[i, :], dtype=np.uint8) for i in arena]
519
+ else:
520
+ video_bunch = np.zeros(np.append(sizes[0, :], len(arena)), dtype=np.uint8)
521
+ prev_img = None
522
+ images_done = bunch * len(img_list)
523
+ for image_i, image_name in enumerate(img_list):
524
+ # print(str(image_i), end=' ')
525
+ img = self.read_and_rotate(image_name, prev_img)
526
+ prev_img = deepcopy(img)
527
+ if not in_colors and reduce_image_dim:
528
+ img = img[:, :, 0]
529
+ # if not in_colors:
530
+ # csc = OneImageAnalysis(img)
531
+ # csc.generate_color_space_combination(convert_for_motion)
532
+ # img = csc.image
533
+ # if self.first_image.crop_coord is not None:
534
+ # img = img[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
535
+ # self.first_image.crop_coord[2]:self.first_image.crop_coord[3], ...]
536
+
537
+ for arena_i, arena_name in enumerate(arena):
538
+ # arena_i = 0; arena_name = arena[arena_i]
539
+ sub_img = img[self.top[arena_name]: (self.bot[arena_name] + 1),
540
+ self.left[arena_name]: (self.right[arena_name] + 1), ...]
541
+ if self.use_list_of_vid:
542
+ video_bunch[arena_i][image_i, ...] = sub_img
543
+ else:
544
+ if len(video_bunch.shape) == 5:
545
+ video_bunch[image_i, :, :, :, arena_i] = sub_img
546
+ else:
547
+ video_bunch[image_i, :, :, arena_i] = sub_img
548
+ for arena_i, arena_name in enumerate(arena):
549
+ if self.use_list_of_vid:
550
+ np.save(vid_names[arena_name], video_bunch[arena_i])
551
+ else:
552
+ if len(video_bunch.shape) == 5:
553
+ np.save(vid_names[arena_name], video_bunch[:, :, :, :, arena_i])
554
+ else:
555
+ np.save(vid_names[arena_name], video_bunch[:, :, :, arena_i])
556
+
557
+ def read_and_rotate(self, image_name, prev_img):
558
+ """ This method read an image from its name and:
559
+ - Make sure to properly crop it
560
+ - Rotate the image if ir is not in the same orientation as the reference
561
+ - Make sure that the rotation is on the good direction (clockwise or counterclockwise)"""
562
+
563
+ # Read the image
564
+ if not os.path.exists(image_name):
565
+ raise FileNotFoundError(image_name)
566
+
567
+ from cellects.utils.load_display_save import readim
568
+ img = readim(image_name, self.raw_images)
569
+
570
+ # Use a reference image to make sure that the read image is landscape or not
571
+ is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
572
+ if (img.shape[0] > img.shape[1] and is_landscape) or (img.shape[0] < img.shape[1] and not is_landscape):
573
+ # Try to turn it clockwise and (if necessary crop it)
574
+ clockwise = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
575
+ if self.first_image.cropped:
576
+ clockwise = clockwise[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
577
+ self.first_image.crop_coord[2]:self.first_image.crop_coord[3], ...]
578
+ if prev_img is not None:
579
+ # Quantify the similarity between the clockwised turned image and the reference
580
+ prev_img = np.int16(prev_img)
581
+ clock_diff = sum_of_abs_differences(prev_img, np.int16(clockwise))
582
+ # Try to turn it counterclockwise and (if necessary crop it)
583
+ counter_clockwise = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
584
+ if self.first_image.cropped:
585
+ counter_clockwise = counter_clockwise[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
586
+ self.first_image.crop_coord[2]:self.first_image.crop_coord[3], ...]
587
+ # Quantify the similarity between the counterclockwised turned image and the reference
588
+ counter_clock_diff = sum_of_abs_differences(prev_img, np.int16(counter_clockwise))
589
+ # The image that has the lower difference is kept.
590
+ if clock_diff > counter_clock_diff:
591
+ img = counter_clockwise
592
+ else:
593
+ img = clockwise
594
+ else:
595
+ img = clockwise
596
+ else:
597
+ if self.first_image.cropped:
598
+ img = img[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
599
+ self.first_image.crop_coord[2]:self.first_image.crop_coord[3], ...]
600
+ return img
601
+
602
+ def make_videos(self, img_list, extension, fps=40):
603
+ is_color = True
604
+ sizes = np.column_stack((self.right - self.left + 1, self.bot - self.top + 1))
605
+ if extension == '.mp4':
606
+ fourcc = 0x7634706d
607
+ else:
608
+ fourcc = cv2.VideoWriter_fourcc('F', 'F', 'V', '1') # lossless
609
+
610
+ # 1) Create a list of video names
611
+ if self.not_analyzed_individuals is not None:
612
+ number_to_add = len(self.not_analyzed_individuals)
613
+ else:
614
+ number_to_add = 0
615
+ vid_list = list()
616
+ ind_i = 0
617
+ counter = 0
618
+ while ind_i < (self.first_image.shape_number + number_to_add):
619
+ ind_i += 1
620
+ while np.any(np.isin(self.not_analyzed_individuals, ind_i)):
621
+ ind_i += 1
622
+ vid_name = f"ind_{ind_i}{extension}"
623
+ vid_list.insert(counter, cv2.VideoWriter(vid_name, fourcc, float(fps), tuple(sizes[counter, :]), is_color))
624
+ counter += 1
625
+
626
+ # 2) loop over images and save videos frame by frame
627
+ print("Image number: ")
628
+ prev_img = None
629
+ for image_i in np.arange(len(img_list)):
630
+ print(str(image_i), end=' ')
631
+ image_name = img_list[image_i]
632
+ if not os.path.exists(image_name):
633
+ raise FileNotFoundError(image_name)
634
+ img = self.read_and_rotate(image_name, prev_img)
635
+ prev_img = deepcopy(img)
636
+ for blob_i in np.arange(self.first_image.shape_number):
637
+ blob_img = deepcopy(img)
638
+ if self.first_image.crop_coord is not None:
639
+ blob_img = blob_img[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
640
+ self.first_image.crop_coord[2]:self.first_image.crop_coord[3], :]
641
+ blob_img = blob_img[self.top[blob_i]: (self.bot[blob_i] + 1),
642
+ self.left[blob_i]: (self.right[blob_i] + 1), :]
643
+ vid = vid_list[blob_i]
644
+ vid.write(blob_img)
645
+
646
+ for blob_i in np.arange(self.first_image.shape_number):
647
+ vid_list[blob_i].release()
648
+
649
+
650
+ if __name__ == "__main__":
651
+ from glob import glob
652
+ from pathlib import Path
653
+ from cellects.core.cellects_paths import TEST_DIR
654
+ from cellects.utils.load_display_save import *
655
+ from cellects.utils.utilitarian import insensitive_glob
656
+ from cellects.image_analysis.one_image_analysis_threads import ProcessFirstImage
657
+ from numpy import sort, array
658
+ # os.chdir(TEST_DIR / "experiment")
659
+ # image = readim("IMG_7653.jpg")
660
+ os.chdir(Path("D:/Directory/Data/100/101-104/"))
661
+ img_list = np.sort(insensitive_glob("IMG_" + '*' + ".jpg"))
662
+ image = readim(img_list[0])
663
+ first_image = OneImageAnalysis(image)
664
+ first_im_color_space_combination = {"lab": np.array((1, 0, 0), np.uint8)}
665
+ last_im_color_space_combination = {"lab": np.array((0, 0, 1), np.uint8)}
666
+ first_image.convert_and_segment(first_im_color_space_combination)
667
+ first_image.set_spot_shapes_and_size_confint('circle')
668
+ process_i = ProcessFirstImage(
669
+ [first_image, False, False, None, False, 8, None, 2, None, None, None])
670
+ process_i.binary_image = first_image.binary_image
671
+ process_i.process_binary_image()
672
+ first_image.validated_shapes = process_i.validated_shapes
673
+ first_image.shape_number = 8
674
+ first_image.get_crop_coordinates()
675
+ self = OneVideoPerBlob(first_image, 100, False)
676
+ are_gravity_centers_moving=1; color_space_combination=last_im_color_space_combination; color_number=2; sample_size=5; all_specimens_have_same_direction=True
677
+ self.get_bounding_boxes(are_gravity_centers_moving=1, img_list=img_list, color_space_combination=last_im_color_space_combination, color_number=2, sample_size=5, all_specimens_have_same_direction=False, display=True)
678
+ self.print_bounding_boxes()
679
+