cellects 0.1.3__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +365 -326
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +4 -3
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +841 -450
  14. cellects/gui/required_output.py +100 -56
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +317 -135
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -105
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.6.dist-info/LICENSE +675 -0
  29. cellects-0.2.6.dist-info/METADATA +829 -0
  30. cellects-0.2.6.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.3.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.3.dist-info/METADATA +0 -176
  35. cellects-0.1.3.dist-info/RECORD +0 -44
  36. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.3.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,44 @@
1
+ cellects/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ cellects/__main__.py,sha256=7JEHy1h2TRZCrGh1TmYp_b1QLWm8ZqekRkKb9wKE-dU,2798
3
+ cellects/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ cellects/config/all_vars_dict.py,sha256=Vdu0JoIqwY6QPa8z_DDiZd5QTEdIhPkSvGDjyzX5GTU,6249
5
+ cellects/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ cellects/core/cellects_paths.py,sha256=vwFEYXVAD16w0euoTuJ8Ca0tUyelsoiDNmWqryU5k-k,880
7
+ cellects/core/cellects_threads.py,sha256=I3k-k7w4rnPHeJe-0furBzU6I4gBCZrFgiNWX477EOA,105390
8
+ cellects/core/motion_analysis.py,sha256=R9a_ZL4qcj3pO3OlMSU6iTBCX9TggCMKaSIJBq9nkdc,97841
9
+ cellects/core/one_image_analysis.py,sha256=YghJIVNjZLiTAsnNSxBQ8w_m5Xq5kfhQHyA-anrjqKQ,67361
10
+ cellects/core/program_organizer.py,sha256=Quo34VH1qqeb6gmXw6TEil40gd80eRYB5xigL_8zUCg,89175
11
+ cellects/core/script_based_run.py,sha256=00TecfdIseY7GuB63Vwpc1O2DcdLjn5F-SldcRPPoAM,7559
12
+ cellects/gui/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ cellects/gui/advanced_parameters.py,sha256=8lpXsSrLr7JZ0PUsl2rslddwU4KSNqWjmqUd68_u1Ng,77321
14
+ cellects/gui/cellects.py,sha256=KpCOL1j3sBVcqp5l2epNc28knekGGLUYiLZ3XmEw5BE,8325
15
+ cellects/gui/custom_widgets.py,sha256=tSB8sIPCJ8B10eTNrfj1iKoztSQS0iYX2WjYjGpI88o,34257
16
+ cellects/gui/first_window.py,sha256=HZ_4ZmDMpDbNdEa-KVlMINKVqXyWGIZI7GYFcdLm0E8,28439
17
+ cellects/gui/if_several_folders_window.py,sha256=9KcnWxzURtxS6ViyoWZVJ7dFBlH4XcRCCsWuL1v4L-c,13803
18
+ cellects/gui/image_analysis_window.py,sha256=UDIRp20LcX-ORnyufJzGxcH-CW9XvasmVd3Kt0bKBm8,130692
19
+ cellects/gui/required_output.py,sha256=ib3jXSzRuRzE_yU_ZNkrimCcq6NjAukz088T2Zgl95c,13654
20
+ cellects/gui/ui_strings.py,sha256=ifkBJqHzitpTXEyMG_zp4sijPLLudSL2ot5DSn7FMdg,31116
21
+ cellects/gui/video_analysis_window.py,sha256=hxIqEWKisJSV3tnceFQfVXkjmC_AuARCcqMZ5ucmGIQ,43340
22
+ cellects/icons/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
+ cellects/icons/cellects_icon.icns,sha256=3GM5rpVKUoy-gAYEee5gGPh9lQx9KZqh9iRqYCT83Aw,393392
24
+ cellects/icons/cellects_icon.ico,sha256=Eqlci8zZ0zdsRh2kSQAu4aHAPbR2NEzSbJPgaRQNenI,208076
25
+ cellects/image_analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ cellects/image_analysis/cell_leaving_detection.py,sha256=LtwLrjGXUVU6lBspSr3yN5UApun4-v4A23VBcP2A2ZU,5317
27
+ cellects/image_analysis/image_segmentation.py,sha256=6ighRFLmCewnCOk-RFJDjMMh92Jo0se0K1P5g4okhEg,48876
28
+ cellects/image_analysis/morphological_operations.py,sha256=M0uK3C1h0OH-SuE-hNJzJsK7RhHBNhlKBeAITOIVKkg,91452
29
+ cellects/image_analysis/network_functions.py,sha256=QHBU09U8My3IQqzDhxcK9t6ZDhxhCac2mt5N1JwEfCI,107499
30
+ cellects/image_analysis/one_image_analysis_threads.py,sha256=4mE6MTF1I9y3SIYQgZ1nj4b4DqJ5-BnwGqdQIVZvlpk,11607
31
+ cellects/image_analysis/oscillations_functions.py,sha256=87dZAEVf4jzyVfM5_iFVsuUOJPxZ5gXSHsZsH3A7n7A,6853
32
+ cellects/image_analysis/progressively_add_distant_shapes.py,sha256=WTIW0JZBPiVmnwP_vG18bp6BPRCsr0JPdGd1SEdUPUU,25719
33
+ cellects/image_analysis/shape_descriptors.py,sha256=vuQvUGbajPG9VPxzgadmeBCLd2srBLRpaaebjdz8P1c,42995
34
+ cellects/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ cellects/utils/decorators.py,sha256=kjZWSK71l5-LrrH7BZHb0kdFaAikC_qZu14_KjIUCms,361
36
+ cellects/utils/formulas.py,sha256=xFURq95YpgQ42uPo41U7vj0iywcJq0fh5KjgFa8vvqo,25283
37
+ cellects/utils/load_display_save.py,sha256=SxeEN4IVG-KA2BopsLH_ZI4OA9RQxNd8KiLLAnGW3ig,48981
38
+ cellects/utils/utilitarian.py,sha256=mwGWUgrnaO_2Lne485cp0MxsCxtK68whK9O2fAJ725E,19735
39
+ cellects-0.2.6.dist-info/LICENSE,sha256=RkNnEPPAi068Hv0ST1RwdKOi_Mc6Ukik5twIYPLWXjw,35176
40
+ cellects-0.2.6.dist-info/METADATA,sha256=HX1JgLxbEGNyh_Y0kM8myr9ysL62x6KlHwyEItu7bRw,46231
41
+ cellects-0.2.6.dist-info/WHEEL,sha256=beeZ86-EfXScwlR_HKu4SllMC9wUEj_8Z_4FJ3egI2w,91
42
+ cellects-0.2.6.dist-info/entry_points.txt,sha256=JT6rEvKpUuKyDPvfOYma-IMQNvfnKMstFMAoVJhXIGc,60
43
+ cellects-0.2.6.dist-info/top_level.txt,sha256=8VlvCH4ka3bqugIpQnOVjc3UV9Vavfx5SXNyUV9_lGw,9
44
+ cellects-0.2.6.dist-info/RECORD,,
@@ -1,540 +0,0 @@
1
- """
2
- This class uses the first (and if is required more accuracy, other) image(s) to detect the contour of the arenas of
3
- one experiment and use that information to create videos of smaller size (to reduce RAM usage) and save them as
4
- .npy files on hard drive. Along the process, Cellects checks whether there is enough RAM, split the work when needed,
5
- and warn the user through a thread message that displays in the interface
6
-
7
- This class contains methods to automatically detect arenas from specimens detected in an image at the beginning of an experiment.
8
- Arenas can be delimited automatically or manually. Cellects includes two automatic algorithms: A fast one to be used when arenas are symmetric around the initial position of the specimens or sufficiently far from each other, and a slower one to be used otherwise. These automatic algorithms work even if the arenas are not detectable in the images, but only work when there is a single individual in each arena. In the case of manual delimitation, the user draws each arena by holding down the mouse button. The following paragraphs describe the two automatic algorithms.
9
- The fast algorithm computes each arena coordinate using the distances between the components detected in the seed image after step 1. For each component, Cellects finds its nearest neighbor and uses its distance as the side of the square, centered on the component, giving the x and y limits of the arena.
10
- If the initial position of the cells do not provide good estimates of the center of each arena, Cellects can use the slower algorithm to find them. Because Cellects is intended to be very general, it cannot use specific characteristics of a particular arena to find its edges. Instead, it uses the motion and/or growth of the cell to infer the position of each arena. To do so, Cellects segments a sample of 5 images (equally spaced in time) using the same algorithm as for the seed image. Even if this segmentation is not accurate, the following algorithm finds the arenas robustly. First, it finds a rough estimate of the expected position of the cell. To do this, it dilates the cell in the first frame, until the edge of the dilated image is closer to the nearest centroid of other cells than to its own centroid. Then, it moves to the second image, and also dilates it in order to link together different disconnected components that may result from an inaccurate segmentation. Then, it performs an AND operation between these two dilated images and dilates the result so that it remains one component per arena. By doing this to all cells, we get an estimate of their shape in the second frame, and we can compute their centroids. We then repeat this procedure, for each pair of consecutive frames. Finally, Cellects computes the bounding boxes that contain the cells detected in the 5 frames for each arena, and uses them to estimate each arena coordinate.
11
- In some experiments, all cells are located at one edge of the arena and move roughly in the same direction. Cellects includes an option to take advantage of this regularity and improve the accuracy of arena detection: Once the centroids of a frame have been estimated (as described above), Cellects finds the centroid with highest displacement with respect to the previous frame, and applies the same displacement to all centroids.
12
-
13
- It also contains methods to write videos (as np arrays .npy files) corresponding to the pixels delimited by these arenas.
14
- """
15
-
16
- import os
17
- import logging
18
- from copy import deepcopy
19
- import numpy as np
20
- import cv2
21
- import psutil
22
-
23
- from cellects.image_analysis.morphological_operations import cross_33, Ellipse, get_minimal_distance_between_2_shapes, \
24
- rank_from_top_to_bottom_from_left_to_right, \
25
- expand_until_neighbor_center_gets_nearer_than_own, get_line_points
26
- from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
27
- from cellects.core.one_image_analysis import OneImageAnalysis
28
- from cellects.utils.load_display_save import read_and_rotate
29
-
30
-
31
- class OneVideoPerBlob:
32
- """
33
- This class finds the bounding box containing all pixels covered by one blob over time
34
- and create a video from it.
35
- It does that, for each blob, considering a few information.
36
- """
37
-
38
- def __init__(self, first_image, starting_blob_hsize_in_pixels, raw_images):
39
- """
40
-
41
- """
42
- # Initialize all variables used in the following methods
43
- self.first_image = first_image
44
- self.original_shape_hsize = starting_blob_hsize_in_pixels
45
- self.raw_images = raw_images
46
- if self.original_shape_hsize is not None:
47
- self.k_size = int(((self.original_shape_hsize // 5) * 2) + 1)
48
-
49
- # 7) Create required empty arrays: especially the bounding box coordinates of each video
50
- self.ordered_first_image = None
51
- self.motion_list = list()
52
- self.shapes_to_remove = None
53
- self.not_analyzed_individuals = None
54
-
55
- def get_bounding_boxes(self, are_gravity_centers_moving, img_list, color_space_combination, color_number=2,
56
- sample_size=5, all_specimens_have_same_direction=True, display=False, filter_spec=None):
57
- logging.info("Get the coordinates of all arenas using the get_bounding_boxes method of the VideoMaker class")
58
- # are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1; img_list=self.data_list; color_space_combination=self.vars['convert_for_origin']; color_number=self.vars["color_number"]; sample_size=5
59
-
60
- self.big_kernel = Ellipse((self.k_size, self.k_size)).create() # fromfunction(self.circle_fun, (self.k_size, self.k_size))
61
- self.big_kernel = self.big_kernel.astype(np.uint8)
62
- self.small_kernel = np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0)), dtype=np.uint8)
63
- self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
64
- self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
65
- self.unchanged_ordered_fimg = deepcopy(self.ordered_first_image)
66
- self.modif_validated_shapes = deepcopy(self.first_image.validated_shapes)
67
- self.standard = - 1
68
- counter = 0
69
- while np.any(np.less(self.standard, 0)) and counter < 20:
70
- counter += 1
71
- self.left = np.zeros(self.first_image.shape_number, dtype=np.int64)
72
- self.right = np.repeat(self.modif_validated_shapes.shape[1], self.first_image.shape_number)
73
- self.top = np.zeros(self.first_image.shape_number, dtype=np.int64)
74
- self.bot = np.repeat(self.modif_validated_shapes.shape[0], self.first_image.shape_number)
75
- if are_gravity_centers_moving:
76
- self._get_bb_with_moving_centers(img_list, color_space_combination, color_number, sample_size,
77
- all_specimens_have_same_direction, display, filter_spec=filter_spec)
78
- # new:
79
- new_ordered_first_image = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
80
- #
81
- for i in np.arange(1, self.first_image.shape_number + 1):
82
- previous_shape = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
83
- previous_shape[np.nonzero(self.unchanged_ordered_fimg == i)] = 1
84
- new_potentials = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
85
- new_potentials[np.nonzero(self.ordered_first_image == i)] = 1
86
- new_potentials[np.nonzero(self.unchanged_ordered_fimg == i)] = 0
87
-
88
- pads = ProgressivelyAddDistantShapes(new_potentials, previous_shape, max_distance=2)
89
- pads.consider_shapes_sizes(min_shape_size=10)
90
- pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=False)
91
- new_ordered_first_image[np.nonzero(pads.expanded_shape)] = i
92
- self.ordered_first_image = new_ordered_first_image
93
- self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
94
- self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
95
- self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
96
- self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
97
- self._get_quick_bb()
98
- # self.print_bounding_boxes()
99
- else:
100
- self._get_quick_bb()
101
- self.standardize_video_sizes()
102
- if counter == 20:
103
- self.top[self.top < 0] = 1
104
- self.bot[self.bot >= self.ordered_first_image.shape[0] - 1] = self.ordered_first_image.shape[0] - 2
105
- self.left[self.left < 0] = 1
106
- self.right[self.right >= self.ordered_first_image.shape[1] - 1] = self.ordered_first_image.shape[1] - 2
107
-
108
-
109
- def _get_quick_bb(self):
110
- """
111
- Compute euclidean distance between cell(s) to get each arena bounding box
112
- To earn computation time:
113
- 1) We use triu_indices to consider one time each pairwise distance
114
- 2) We only compute distances when x and y distances are small enough
115
- (i.e. 3 * the minimal distance already calculated)
116
-
117
- :return:
118
- """
119
- from timeit import default_timer
120
- tic = default_timer()
121
- shapes = deepcopy(self.modif_validated_shapes)
122
- eroded_shapes = cv2.erode(self.modif_validated_shapes, cross_33)
123
- shapes = shapes - eroded_shapes
124
- x_min = self.ordered_stats[:, 0]
125
- y_min = self.ordered_stats[:, 1]
126
- x_max = self.ordered_stats[:, 0] + self.ordered_stats[:, 2]
127
- y_max = self.ordered_stats[:, 1] + self.ordered_stats[:, 3]
128
- x_min_dist = shapes.shape[1]
129
- y_min_dist = shapes.shape[0]
130
-
131
- shapes *= self.ordered_first_image
132
- shape_nb = (len(np.unique(shapes)) - 1)
133
- i = 0
134
- a_indices, b_indices = np.triu_indices(shape_nb, 1)
135
- a_indices, b_indices = a_indices + 1, b_indices + 1
136
- all_distances = np.zeros((len(a_indices), 3), dtype=float)
137
- # For every pair of components, find the minimal distance
138
- for (a, b) in zip(a_indices, b_indices):
139
- x_dist = np.absolute(x_max[a - 1] - x_min[b - 1])
140
- y_dist = np.absolute(y_max[a - 1] - y_min[b - 1])
141
- if x_dist < 2 * x_min_dist and y_dist < 2 * y_min_dist:
142
- sub_shapes = np.logical_or(shapes == a, shapes == b) * shapes
143
- sub_shapes = sub_shapes[np.min((y_min[a - 1], y_min[b - 1])):np.max((y_max[a - 1], y_max[b - 1])),
144
- np.min((x_min[a - 1], x_min[b - 1])):np.max((x_max[a - 1], x_max[b - 1]))]
145
- sub_shapes[sub_shapes == a] = 1
146
- sub_shapes[sub_shapes == b] = 2
147
- if np.any(sub_shapes == 1) and np.any(sub_shapes == 2):
148
- all_distances[i, :] = a, b, get_minimal_distance_between_2_shapes(sub_shapes, False)
149
-
150
- if x_dist > y_dist:
151
- x_min_dist = np.min((x_min_dist, x_dist))
152
- else:
153
- y_min_dist = np.min((y_min_dist, y_dist))
154
- i += 1
155
- for shape_i in np.arange(1, shape_nb + 1):
156
- # Get where the shape i appear in pairwise comparisons
157
- idx = np.nonzero(np.logical_or(all_distances[:, 0] == shape_i, all_distances[:, 1] == shape_i))
158
- # print(all_distances[idx, 2])
159
- # Compute the minimal distance related to shape i and divide by 2
160
- if len(all_distances[idx, 2]) > 0:
161
- dist = all_distances[idx, 2].min() // 2
162
- else:
163
- dist = 1
164
- # Save the coordinates of the arena around shape i
165
- self.left[shape_i - 1] = x_min[shape_i - 1] - dist.astype(np.int64)
166
- self.right[shape_i - 1] = x_max[shape_i - 1] + dist.astype(np.int64)
167
- self.top[shape_i - 1] = y_min[shape_i - 1] - dist.astype(np.int64)
168
- self.bot[shape_i - 1] = y_max[shape_i - 1] + dist.astype(np.int64)
169
- print((default_timer() - tic))
170
-
171
- def standardize_video_sizes(self):
172
- distance_threshold_to_consider_an_arena_out_of_the_picture = None# in pixels, worked nicely with - 50
173
-
174
- # The modifications allowing to not make videos of setups out of view, do not work for moving centers
175
- y_diffs = self.bot - self.top
176
- x_diffs = self.right - self.left
177
- add_to_y = ((np.max(y_diffs) - y_diffs) / 2)
178
- add_to_x = ((np.max(x_diffs) - x_diffs) / 2)
179
- self.standard = np.zeros((len(self.top), 4), dtype=np.int64)
180
- self.standard[:, 0] = self.top - np.uint8(np.floor(add_to_y))
181
- self.standard[:, 1] = self.bot + np.uint8(np.ceil(add_to_y))
182
- self.standard[:, 2] = self.left - np.uint8(np.floor(add_to_x))
183
- self.standard[:, 3] = self.right + np.uint8(np.ceil(add_to_x))
184
-
185
- # Monitor if one bounding box gets out of picture shape
186
- out_of_pic = deepcopy(self.standard)
187
- out_of_pic[:, 1] = self.ordered_first_image.shape[0] - out_of_pic[:, 1] - 1
188
- out_of_pic[:, 3] = self.ordered_first_image.shape[1] - out_of_pic[:, 3] - 1
189
-
190
- if distance_threshold_to_consider_an_arena_out_of_the_picture is None:
191
- distance_threshold_to_consider_an_arena_out_of_the_picture = np.min(out_of_pic) - 1
192
-
193
- # If it occurs at least one time, apply a correction, otherwise, continue and write videos
194
- # If the overflow is strong, remove the corresponding individuals and remake bounding_box finding
195
-
196
- if np.any(np.less(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)):
197
- # Remove shapes
198
- self.standard = - 1
199
- self.shapes_to_remove = np.nonzero(np.less(out_of_pic, - 20))[0]
200
- for shape_i in self.shapes_to_remove:
201
- self.ordered_first_image[self.ordered_first_image == (shape_i + 1)] = 0
202
- self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
203
- self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
204
- self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
205
- self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
206
-
207
- self.first_image.shape_number = self.first_image.shape_number - len(self.shapes_to_remove)
208
- self.not_analyzed_individuals = np.unique(self.unchanged_ordered_fimg -
209
- (self.unchanged_ordered_fimg * self.modif_validated_shapes))[1:]
210
-
211
- else:
212
- # Reduce all box sizes if necessary and proceed
213
- if np.any(np.less(out_of_pic, 0)):
214
- # When the overflow is weak, remake standardization with lower "add_to_y" and "add_to_x"
215
- overflow = np.nonzero(np.logical_and(np.less(out_of_pic, 0), np.greater_equal(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)))[0]
216
- # Look if overflow occurs on the y axis
217
- if np.any(np.less(out_of_pic[overflow, :2], 0)):
218
- add_to_top_and_bot = np.min(out_of_pic[overflow, :2])
219
- self.standard[:, 0] = self.standard[:, 0] - add_to_top_and_bot
220
- self.standard[:, 1] = self.standard[:, 1] + add_to_top_and_bot
221
- # Look if overflow occurs on the x axis
222
- if np.any(np.less(out_of_pic[overflow, 2:], 0)):
223
- add_to_left_and_right = np.min(out_of_pic[overflow, 2:])
224
- self.standard[:, 2] = self.standard[:, 2] - add_to_left_and_right
225
- self.standard[:, 3] = self.standard[:, 3] + add_to_left_and_right
226
- # If x or y sizes are odd, make them even :
227
- # Don't know why, but opencv remove 1 to odd shapes when writing videos
228
- if (self.standard[0, 1] - self.standard[0, 0]) % 2 != 0:
229
- self.standard[:, 1] -= 1
230
- if (self.standard[0, 3] - self.standard[0, 2]) % 2 != 0:
231
- self.standard[:, 3] -= 1
232
- self.top = self.standard[:, 0]
233
- self.bot = self.standard[:, 1]
234
- self.left = self.standard[:, 2]
235
- self.right = self.standard[:, 3]
236
-
237
-
238
- def _get_bb_with_moving_centers(self, img_list, color_space_combination, color_number, sample_size=2, all_specimens_have_same_direction=True, display=False, filter_spec=None):
239
- """
240
- Starting with the first image, this function try to make each shape grow to see if it covers segmented pixels
241
- on following images. i.e. it segment evenly spaced images (See self._segment_blob_motion and OneImageAnalysis)
242
- to make a rough tracking of blob motion allowing to be sure that the video will only contain the shapes that
243
- have a chronological link with the shape as it was on the first image.
244
-
245
- :param img_list: The whole list of image names
246
- :type img_list: list
247
- :param sample_size: The picture number to analyse. The higher it is, the higher bath accuracy and computation
248
- time are
249
- :type sample_size: int
250
- :param all_specimens_have_same_direction: Whether all specimens move roughly in the same direction or not
251
- :type all_specimens_have_same_direction: bool
252
- :return: For each shapes, the coordinate of a bounding box including all shape movements
253
- """
254
- print("Read and segment each sample image and rank shapes from top to bot and from left to right")
255
-
256
- self.motion_list = list()
257
- if img_list.dtype.type is np.str_:
258
- frame_number = len(img_list)
259
- sample_numbers = np.floor(np.linspace(0, frame_number, sample_size)).astype(int)
260
- for frame_idx in np.arange(sample_size):
261
- if frame_idx == 0:
262
- self.motion_list.insert(frame_idx, self.first_image.validated_shapes)
263
- else:
264
- if img_list.dtype.type is np.str_:
265
- image = img_list[sample_numbers[frame_idx] - 1]
266
- else:
267
- image = img_list[sample_numbers[frame_idx] - 1, ...]
268
- self.motion_list.insert(frame_idx, self._segment_blob_motion(image, color_space_combination,
269
- color_number, filter_spec=filter_spec))
270
-
271
-
272
- self.big_kernels = Ellipse((self.k_size, self.k_size)).create().astype(np.uint8)
273
- self.small_kernels = np.array(((0, 1, 0), (1, 1, 1), (0, 1, 0)), dtype=np.uint8)
274
- self.small_kernels = self.small_kernels.astype(np.uint8)
275
-
276
- ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
277
- self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
278
- previous_ordered_image_i = deepcopy(self.ordered_first_image)
279
- is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
280
- if img_list.dtype.type is np.str_:
281
- img_to_display = read_and_rotate(img_list[sample_numbers[1] - 1], self.first_image.bgr, self.raw_images, is_landscape,
282
- self.first_image.crop_coord)
283
- else:
284
- img_to_display = img_list[sample_numbers[1] - 1, ...]
285
- if self.first_image.cropped:
286
- img_to_display = img_to_display[self.first_image.crop_coord[0]:self.first_image.crop_coord[1],
287
- self.first_image.crop_coord[2]:self.first_image.crop_coord[3], :]
288
- print("For each frame, expand each previously confirmed shape to add area to its maximal bounding box")
289
- for step_i in np.arange(1, sample_size):
290
- print(step_i)
291
-
292
- previously_ordered_centroids = deepcopy(ordered_centroids)
293
- image_i = deepcopy(self.motion_list[step_i])
294
- image_i = cv2.dilate(image_i, self.small_kernels, iterations=5)
295
-
296
- # Display the segmentation result for all shapes at this frame
297
- if img_list.dtype.type is np.str_:
298
- img_to_display = read_and_rotate(img_list[sample_numbers[step_i] - 1], self.first_image.bgr, self.raw_images,
299
- is_landscape, self.first_image.crop_coord)
300
- else:
301
- img_to_display = img_list[sample_numbers[step_i] - 1, ...]
302
- if self.first_image.cropped:
303
- img_to_display = img_to_display[self.first_image.crop_coord[0]: self.first_image.crop_coord[1],
304
- self.first_image.crop_coord[2]: self.first_image.crop_coord[3], :]
305
-
306
- for shape_i in range(self.first_image.shape_number):
307
- shape_to_expand = np.zeros(image_i.shape, dtype=np.uint8)
308
- shape_to_expand[previous_ordered_image_i == (shape_i + 1)] = 1
309
- without_shape_i = deepcopy(previous_ordered_image_i)
310
- without_shape_i[previous_ordered_image_i == (shape_i + 1)] = 0
311
- test_shape = expand_until_neighbor_center_gets_nearer_than_own(shape_to_expand, without_shape_i,
312
- ordered_centroids[shape_i, :],
313
- np.delete(ordered_centroids, shape_i,
314
- axis=0), self.big_kernels)
315
- test_shape = expand_until_neighbor_center_gets_nearer_than_own(test_shape, without_shape_i,
316
- ordered_centroids[shape_i, :],
317
- np.delete(ordered_centroids, shape_i,
318
- axis=0), self.small_kernels)
319
- confirmed_shape = test_shape * image_i
320
- previous_ordered_image_i[np.nonzero(confirmed_shape)] = shape_i + 1
321
- # update the image by putting a purple mask around the current shape
322
- contours, useless = cv2.findContours(confirmed_shape, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
323
- cv2.drawContours(img_to_display, contours, -1, (255, 0, 180), 3)
324
- if display:
325
- imtoshow = cv2.resize(img_to_display.astype(np.uint8), (960, 540))
326
- cv2.imshow('Rough detection', imtoshow)
327
- cv2.waitKey(1)
328
- if display:
329
- cv2.destroyAllWindows()
330
-
331
-
332
- mask_to_display = np.zeros(image_i.shape, dtype=np.uint8)
333
- mask_to_display[np.nonzero(previous_ordered_image_i)] = 1
334
- contours_to_display, useless = cv2.findContours(mask_to_display,
335
- cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
336
- cv2.drawContours(img_to_display, contours_to_display, -1, (255, 0, 0), 3)
337
- if display:
338
- imtoshow = cv2.resize(img_to_display.astype(np.uint8), (960, 540))
339
- cv2.imshow('Rough detection', imtoshow)
340
- cv2.waitKey(1)
341
-
342
- # If the blob moves enough to drastically change its gravity center,
343
- # update the ordered centroids at each frame.
344
- detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
345
- connectivity=8)
346
- mask_to_display = mask_to_display.astype(np.uint8)
347
- while np.logical_and(detected_shape_number - 1 != self.first_image.shape_number,
348
- np.sum(mask_to_display > 0) < mask_to_display.size):
349
- mask_to_display = cv2.dilate(mask_to_display, self.small_kernels, iterations=1)
350
- detected_shape_number, mask_to_display = cv2.connectedComponents(mask_to_display,
351
- connectivity=8)
352
- mask_to_display[np.nonzero(mask_to_display)] = 1
353
- mask_to_display = mask_to_display.astype(np.uint8)
354
- if display:
355
- imtoshow = cv2.resize(mask_to_display * 255, (960, 540))
356
- cv2.imshow('expansion', imtoshow)
357
- cv2.waitKey(1)
358
- if display:
359
- cv2.destroyAllWindows()
360
- ordered_stats, ordered_centroids = rank_from_top_to_bottom_from_left_to_right(mask_to_display,
361
- self.first_image.y_boundaries)
362
-
363
- new_ordered_centroids = ordered_centroids
364
- if all_specimens_have_same_direction:
365
- # Adjust each centroid position according to the maximal centroid displacement.
366
- x_diffs = new_ordered_centroids[:, 0] - previously_ordered_centroids[:, 0]
367
- if np.mean(x_diffs) > 0: # They moved left, we add to x
368
- add_to_x = np.max(x_diffs) - x_diffs
369
- else: #They moved right, we remove from x
370
- add_to_x = np.min(x_diffs) - x_diffs
371
- new_ordered_centroids[:, 0] = new_ordered_centroids[:, 0] + add_to_x
372
-
373
- y_diffs = new_ordered_centroids[:, 1] - previously_ordered_centroids[:, 1]
374
- if np.mean(y_diffs) > 0: # They moved down, we add to y
375
- add_to_y = np.max(y_diffs) - y_diffs
376
- else: # They moved up, we remove from y
377
- add_to_y = np.min(y_diffs) - y_diffs
378
- new_ordered_centroids[:, 1] = new_ordered_centroids[:, 1] + add_to_y
379
-
380
- ordered_centroids = new_ordered_centroids
381
-
382
- # Normalize each bounding box
383
-
384
- for shape_i in range(self.first_image.shape_number):
385
- shape_i_indices = np.where(previous_ordered_image_i == shape_i + 1)
386
- self.left[shape_i] = np.min(shape_i_indices[1])
387
- self.right[shape_i] = np.max(shape_i_indices[1])
388
- self.top[shape_i] = np.min(shape_i_indices[0])
389
- self.bot[shape_i] = np.max(shape_i_indices[0])
390
- self.ordered_first_image = previous_ordered_image_i
391
-
392
- def _segment_blob_motion(self, image, color_space_combination, color_number, filter_spec):
393
- if isinstance(image, str):
394
- is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
395
- image = read_and_rotate(image, self.first_image.bgr, self.raw_images,
396
- is_landscape, self.first_image.crop_coord)
397
- # image = readim(image)
398
- In = OneImageAnalysis(image)#, self.raw_images
399
- In.convert_and_segment(color_space_combination, color_number, None, None, self.first_image.subtract_background,
400
- self.first_image.subtract_background2, filter_spec=filter_spec)
401
- return In.binary_image
402
-
403
-
404
- def prepare_video_writing(self, img_list, min_ram_free, in_colors=False):
405
- #https://stackoverflow.com/questions/48672130/saving-to-hdf5-is-very-slow-python-freezing
406
- #https://stackoverflow.com/questions/48385256/optimal-hdf5-dataset-chunk-shape-for-reading-rows/48405220#48405220
407
- # 1) Create a list of video names
408
- if self.not_analyzed_individuals is not None:
409
- number_to_add = len(self.not_analyzed_individuals)
410
- else:
411
- number_to_add = 0
412
- vid_names = list()
413
- ind_i = 0
414
- counter = 0
415
- while ind_i < (self.first_image.shape_number + number_to_add):
416
- ind_i += 1
417
- while np.any(np.isin(self.not_analyzed_individuals, ind_i)):
418
- ind_i += 1
419
- vid_names.append("ind_" + str(ind_i) + ".npy")
420
- counter += 1
421
- img_nb = len(img_list)
422
-
423
- # 2) Create a table of the dimensions of each video
424
- # Add 10% to the necessary memory to avoid problems
425
- necessary_memory = img_nb * np.multiply((self.bot - self.top + 1).astype(np.uint64), (self.right - self.left + 1).astype(np.uint64)).sum() * 8 * 1.16415e-10
426
- if in_colors:
427
- sizes = np.column_stack(
428
- (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top + 1, self.right - self.left + 1,
429
- np.repeat(3, self.first_image.shape_number)))
430
- necessary_memory *= 3
431
- else:
432
- sizes = np.column_stack(
433
- (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top + 1, self.right - self.left + 1))
434
- self.use_list_of_vid = True
435
- if np.all(sizes[0, :] == sizes):
436
- self.use_list_of_vid = False
437
- available_memory = (psutil.virtual_memory().available >> 30) - min_ram_free
438
- bunch_nb = int(np.ceil(necessary_memory / available_memory))
439
- if bunch_nb > 1:
440
- # The program will need twice the memory to create the second bunch.
441
- bunch_nb = int(np.ceil(2 * necessary_memory / available_memory))
442
-
443
- video_nb_per_bunch = np.floor(self.first_image.shape_number / bunch_nb).astype(np.uint8)
444
- analysis_status = {"continue": True, "message": ""}
445
- try:
446
- if self.use_list_of_vid:
447
- video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in range(video_nb_per_bunch)]
448
- else:
449
- video_bunch = np.zeros(np.append(sizes[0, :], video_nb_per_bunch), dtype=np.uint8)
450
- except ValueError as v_err:
451
- analysis_status = {"continue": False, "message": "Probably failed to detect the right cell(s) number, do the first image analysis manually."}
452
- logging.error(f"{analysis_status['message']} error is: {v_err}")
453
- # Check for available ROM memory
454
- if (psutil.disk_usage('/')[2] >> 30) < (necessary_memory + 2):
455
- rom_memory_required = necessary_memory + 2
456
- else:
457
- rom_memory_required = None
458
- remaining = self.first_image.shape_number % bunch_nb
459
- if remaining > 0:
460
- bunch_nb += 1
461
- logging.info(f"Cellects will start writing {self.first_image.shape_number} videos. Given available memory, it will do it in {bunch_nb} time(s)")
462
- return bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining
463
-
464
- def write_videos_as_np_arrays(self, img_list, min_ram_free, in_colors=False, reduce_image_dim=False):
465
- #self=self.videos
466
- #img_list = self.data_list
467
- #min_ram_free = self.vars['min_ram_free']
468
- #in_colors = not self.vars['already_greyscale']
469
-
470
- is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
471
- bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining = self.prepare_video_writing(img_list, min_ram_free, in_colors)
472
- for bunch in np.arange(bunch_nb):
473
- print(f'\nSaving the bunch n: {bunch + 1} / {bunch_nb} of videos:', end=' ')
474
- if bunch == (bunch_nb - 1) and remaining > 0:
475
- arena = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining)
476
- else:
477
- arena = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch)
478
- if self.use_list_of_vid:
479
- video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in arena]
480
- else:
481
- video_bunch = np.zeros(np.append(sizes[0, :], len(arena)), dtype=np.uint8)
482
- prev_img = None
483
- images_done = bunch * len(img_list)
484
- for image_i, image_name in enumerate(img_list):
485
- # print(str(image_i), end=' ')
486
- img = read_and_rotate(image_name, prev_img, self.raw_images, is_landscape, self.first_image.crop_coord)
487
- prev_img = deepcopy(img)
488
- if not in_colors and reduce_image_dim:
489
- img = img[:, :, 0]
490
-
491
- for arena_i, arena_name in enumerate(arena):
492
- # arena_i = 0; arena_name = arena[arena_i]
493
- sub_img = img[self.top[arena_name]: (self.bot[arena_name] + 1),
494
- self.left[arena_name]: (self.right[arena_name] + 1), ...]
495
- if self.use_list_of_vid:
496
- video_bunch[arena_i][image_i, ...] = sub_img
497
- else:
498
- if len(video_bunch.shape) == 5:
499
- video_bunch[image_i, :, :, :, arena_i] = sub_img
500
- else:
501
- video_bunch[image_i, :, :, arena_i] = sub_img
502
- for arena_i, arena_name in enumerate(arena):
503
- if self.use_list_of_vid:
504
- np.save(vid_names[arena_name], video_bunch[arena_i])
505
- else:
506
- if len(video_bunch.shape) == 5:
507
- np.save(vid_names[arena_name], video_bunch[:, :, :, :, arena_i])
508
- else:
509
- np.save(vid_names[arena_name], video_bunch[:, :, :, arena_i])
510
-
511
-
512
- if __name__ == "__main__":
513
- from glob import glob
514
- from pathlib import Path
515
- from cellects.core.cellects_paths import TEST_DIR
516
- from cellects.utils.load_display_save import *
517
- from cellects.utils.utilitarian import insensitive_glob
518
- from cellects.image_analysis.one_image_analysis_threads import ProcessFirstImage
519
- from numpy import sort, array
520
- # os.chdir(TEST_DIR / "experiment")
521
- # image = readim("IMG_7653.jpg")
522
- os.chdir(Path("D:/Directory/Data/100/101-104/"))
523
- img_list = np.sort(insensitive_glob("IMG_" + '*' + ".jpg"))
524
- image = readim(img_list[0])
525
- first_image = OneImageAnalysis(image)
526
- first_im_color_space_combination = {"lab": np.array((1, 0, 0), np.uint8)}
527
- last_im_color_space_combination = {"lab": np.array((0, 0, 1), np.uint8)}
528
- first_image.convert_and_segment(first_im_color_space_combination)
529
- first_image.set_spot_shapes_and_size_confint('circle')
530
- process_i = ProcessFirstImage(
531
- [first_image, False, False, None, False, 8, None, 2, None, None, None])
532
- process_i.binary_image = first_image.binary_image
533
- process_i.process_binary_image()
534
- first_image.validated_shapes = process_i.validated_shapes
535
- first_image.shape_number = 8
536
- first_image.get_crop_coordinates()
537
- self = OneVideoPerBlob(first_image, 100, False)
538
- are_gravity_centers_moving=1; color_space_combination=last_im_color_space_combination; color_number=2; sample_size=5; all_specimens_have_same_direction=True
539
- self.get_bounding_boxes(are_gravity_centers_moving=1, img_list=img_list, color_space_combination=last_im_color_space_combination, color_number=2, sample_size=5, all_specimens_have_same_direction=False, display=True)
540
-