cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1931 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains the MotionAnalysis class. This class, called by program_organizer,
4
+ calls all methods used to read, process videos and save results.
5
+ 1. load_images_and_videos: It starts by loading a video in .npy (which must have been written before, thanks to the one_video_per_blob file)
6
+ and if it exists, the background used for background subtraction. Then, it uses a particular color space combination
7
+ to convert the rgb video into greyscale.
8
+ At this point, arenas have been delimited and each can be analyzed separately. The following describes what happens during the analysis of one arena. Also, while Cellects can work with either one or several cells in each arena, we will describe the algorithm for a single cell, making clarifications whenever anything changes for multiple cells.
9
+ Cellects starts by reading and converting the video of each arena into grayscale, using the selected color space combination. Then, it processes it through the following steps.
10
+ 3. It validates the presence/absence of the specimen(s) in the first image of the video, named origin.
11
+ Cellects finds the frame in which the cell is visible for the first time in each arena. When the seed image is the first image, then all cells are visible from the beginning. Otherwise, it will apply the same segmentation as for the seed image to the first, second, third images, etc. until the cell appears in one of them.
12
+ 4. It browses the first frames of the video to find the average covering duration of a pixel.
13
+ It does so using a very conservative method, to make sure that only pixels that really are covered by the specimen(s)
14
+ are used to do compute that covering duration.
15
+ 5. It performs the main segmentation algorithm on the whole video.
16
+ This segmentation will consist in transforming the grayscale video resulting from the color space combination conversion
17
+ into a binary video of presence/absence. To do this, Cellects provides several different options to detect specimen
18
+ motion and growth throughout the video. The video segmentation transforms a grayscale video into a binary one.
19
+ In simple datasets with strong contrast between specimens and background, Cellects can simply segment each image by
20
+ thresholding. In more challenging conditions, the algorithm tracks the intensity of each pixel over time,
21
+ using this dynamical information to determine when a pixel has been covered. This is done through an automatically
22
+ determined threshold on the intensity or on its derivative. Additionally, Cellects can apply the logical operators
23
+ AND or OR to these intensity and derivative thresholds. The default option is the dynamical intensity threshold and it
24
+ works in many cases, but the user interface lets the user quickly check the results of different options and choose
25
+ the best one by visual inspection of the segmentation result in different frames.
26
+ For Cellects to be as versatile as possible, the user can select across five segmentation strategies.
27
+ The first option is the simplest: It starts at the frame in which the cell is visible for the first time and segments the video frame by frame, using the same method as when analyzing only one image (as described in sections 1 and 2). The only difference is an optional background subtraction algorithm, which subtracts the first image to all others.
28
+ The second option segments each frame by intensity thresholding. The threshold changes over time to adapt to changes in the background over time. To estimate the optimal threshold for each frame, Cellects proceeds as follows: It first estimates the typical background intensity of each frame as an intensity higher than the first decile of all pixels in the frame. Then, it defines an initial threshold for each frame at a fixed distance above this decile. This fixed distance is initially low, so that the initial segmentation is an overestimation of the actual area covered by the specimen. Then, it performs segmentation of all frames. If any frame presents a growth greater than a user_set threshold (whose default value is 5% of the area), all thresholds are diminished by 10%. Then the segmentation is performed again, and this process continues until no frame presents excessive growth. This description refers to cases in which the background is darker than the specimen. Cellects automatically detects if contrast is reversed, and adapts the method accordingly. Finally, Cellects segments the whole video with these adjusted intensity thresholds.
29
+ The third option uses the change in intensity over time: For each pixel, it considers the evolution in time of its intensity, and considers that the cell covers the pixel when the slope of this intensity over time exceeds a threshold (Fig 3d in the main text). For each frame, Cellects computes each frame’s threshold with the similar procedure as in the second option, except for the following. As the value of the slope of a derivative is highly sensitive to noise, Cellects first smooths the intensity curves using a moving average with a window length adapted to the typical time it takes for the cell to cover each pixel. Cellects tries to compute this typical time using the dynamics of a subset of pixels whose intensity varies strongly at the beginning of the growth (see the code for further details), and uses a default value of 10 frames when this computation fails. Cellects also uses this subset of pixels to get the reference slope threshold. Finally, it progressively modifies this reference until the video segmentation matches the required growth ratio, as in the second step.
30
+ The two next options are combinations of the two first ones.
31
+ The fourth is a logical OR between the intensity value and the intensity slope segmentations. It provides a very permissive segmentation, which is useful when parts of the cells are very hard to detect.
32
+ The fifth is the logical AND between the intensity value and the intensity slope segmentations. It provides a more restrictive segmentation that can be useful when both the value and the slope segmentations detect areas that are not covered by the cell.
33
+ 6. Video post-processing improves the resulting binary video obtained through segmentation.
34
+ The final step consists in improving the segmentation (see section S3.5 of the Supplementary Materials for more information). Cellects will first apply several filters that consistently improve the results, such as checking that each detected pixel was also detected at least twice in the three previous frames, omitting images containing too many detected pixels, and performing morphological opening and closing. Optionally, the user can activate the detection of areas left by the cell (See section S3.5.B of the Supplementary Materials for details).
35
+
36
+ Additionally, optional algorithms correct particular types of errors. The first algorithm is useful when the substrate on which the cells are at the first image is of a different color than the substrate on which they will grow, expand or move. This color difference may produce holes in the segmentation and we developed an optional algorithm to correct this kind of error around the initial shape. The second algorithm should be used when each arena contains a single specimen, which should generate a single connected component. We can use this information to correct mistakes in models such as P. polycephalum, whose strong heterogeneity produces large variations of opacity. In these cases, segmentation may fail in the most transparent parts of the specimens and identify two disconnected components. The correction algorithm merges these disconnecting components by finding the most likely pixels connecting them and the most likely times at which those pixels were covered during growth.
37
+ 6.A Basic post-processing
38
+ This process improves the raw segmentation. It includes algorithms to filter out aberrant frames, remove small artifacts and holes, and to detect when the specimens leave pixels. First, it checks that every pixel was detected at least twice in the three previous frames. Second, it excludes frames containing too many newly detected pixels, according to the maximal growth ratio per frame (as defined in section 3B). For these frames, the previous segmentation is kept, making the analysis robust to events producing a sharp variation in the brightness of a few images in the video (for example, when an enclosed device is temporarily opened or a light is switched on or off). Third, it removes potential small artifacts and holes by performing morphological opening followed by morphological closing.
39
+
40
+ 6.B Cell leaving detection
41
+ This optional algorithm detects when areas are left by the specimens. It is useful when the cells not only grow but also move, so they can leave pixels that were covered before. When a pixel is covered, Cellects saves the intensity it had before being covered, computed as the median of the pixel’s intensity over a time window before it was covered. The length of this time window matches the typical time it takes for the cell to cover each pixel (computed as described in section 4.B, third segmentation strategy). Then, pixels at the border of the cell whose intensity fall below the saved intensity, rescaled by a user-defined multiplier (set by default at 1) are considered to be left by the cell. When there should be only one cell in the arena, Cellects tries to remove each component one by one, accepting this removal only when it does not break the connectivity of all parts of the cell.
42
+
43
+ 6.C Special error correction algorithms
44
+ At the time of writing, Cellects contains two post-processing algorithms adapted to two specific situations. The first one is useful when there should be only one specimen per arena and when Cellects fails to detect its distant parts because their connections are not sufficiently visible. The second one is useful when Cellects fails to detect small areas around the initial shape, for example due to reflections near the edges. The following explains how these optional algorithms work.
45
+
46
+ 6.D Connect distant components:
47
+ This algorithm automatically and progressively adds distant shapes to the main one. This correcting process occurs in three steps. First, it selects which distant component should get connected to the main one. The user can adjust this selection process according to the distance of the distant components with the main shape, and the minimal and maximal size of these components. Second, for each distant component, it computes and creates the shortest connection with the main shape. The width of that connection depends on the size of the distant shape where the connection occurs. Third, it uses an algorithm similar to the one used to correct errors around initial shape to estimate how quickly the gaps should be filled. This algorithm uses distance and timing vectors to create a dynamic connection between these two shapes (Figure 3f-h in the main text).
48
+
49
+ 6.E Correct errors around initial shape:
50
+ This correcting process occurs in two steps. The first one scans the formation of holes around the initial segmentation during the beginning of the growth. The second one finds out when and how these holes are to be filled. To determine how the holes should be covered, Cellects uses the same algorithm as the one used to connect distant components. Computing the speed at which growth occurs from the initial position allows Cellects to fill the holes at the same speed, and therefore to correct these errors.
51
+
52
+ 7. Special algorithms for Physarum polycephalum
53
+ Although done for this organism, these methods can be used with other biological models, such as mycelia.
54
+ 7.A. Oscillatory activity detection:
55
+ This algorithm analyzes grayscale video frames to detect whether pixel intensities increase or decrease over time. To prevent artifacts from arena-scale illumination fluctuations, pixel intensities are first standardized by the average intensity of the entire image. A pixel is considered to have increased (or decreased) in intensity if at least four of its eight neighboring pixels have also shown an increase (or decrease). Then, regions with adjacent pixels whose intensity is changing in the same direction are detected, keeping only those larger than a user-selected threshold. Each region is tracked throughout the video, recording its oscillatory period, phase, and coordinates until it dissipates or reaches the video’s end.
56
+
57
+ 7.B. Network detection:
58
+ P. polycephalum cells are composed of two types of compartments: A tubular network that transports cytoplasmic materials, and a thinner compartment that covers the rest of the space. Cellects’ initial segmentation does not distinguish between these two compartments, detecting all pixels that have been covered by any of them. This step distinguishes them, in order to segment the tubular network, whose intensity is further from that of the background.
59
+ Cellects detects such a network using an algorithm that scores the segmentation results after using filters of
60
+ vesselness detection: sato and frangi. On top of testing these filters with around 10 variations of their parameters,
61
+ Cellects tries to segment the images adaptatively: segmenting each part of the image using a 2D rolling window.
62
+ Once the best segmentation strategy is found for the last image of the video, it is used to segment the network in all
63
+ other frames
64
+
65
+ 8. Graph extraction:
66
+ Cellects can extract the graph of the specimen, or if detected, of its internal network.
67
+ To do so, Cellects does the following:
68
+ - Get the skeleton of the binary matrix of presence/absence of the specimen, as well as the specimen/network
69
+ width at avery pixel of the skeleton.
70
+ If the original position from which the specimen started has not the same color as the rest of the arena, apply
71
+ a special algorithm to draw the skeleton at the border of that origin.
72
+ - Smooth the skeleton using an algorithm removing small loops of 3 pixels widths
73
+ - Keep only the largest connected component of the skeleton
74
+ - Use pixel connectivity and their neighborhood connectivity to detect all tips and branching vertices of the graph
75
+ summarizing the skeleton.
76
+ - Find and label all edges connecting tips and remove those that are shorter than the width of the skeleton arm it is connected to
77
+ - Find and label all edges connecting touching vertices
78
+ - Find and label all edges connected to the two previoussly mentioned vertices
79
+ - Find and label all edges forming loops and connected to only one vertex
80
+ - Remove all shapes of 1 or two pixels that are neither detected as vertices nor edges,
81
+ if and only if they do not break the skeleton into more than one connected component.
82
+ - Remove edge duplicates
83
+ - Remove vertices connectiong 2 edges
84
+ - Finally, create and save the tables storing edge and vertex coordinates and properties
85
+
86
+ 9. Save
87
+ Once the image analysis is finished, the software determines the value of each morphological descriptor at each time frame (SI - Table 1). Finally, Cellects saves a new video for each arena with the original video next to the converted video displaying the segmentation result, so that the user can easily validate the result. If an arena shows a poor segmentation result, the user can re-analyze it, tuning all parameters for that specific arena.
88
+ - the final results of the segmentation and its contour (if applicable)
89
+ - descriptors summarizing the whole video
90
+ - validation images (efficiency tests) and videos
91
+
92
+ 10. If this class has been used in the video_analysis_window only on one arena, the method
93
+ change_results_of_one_arena will open (or create if not existing) tables in the focal folder
94
+ and adjust every row corresponding to that particular arena to the current analysis results.
95
+
96
+ """
97
+
98
+ import weakref
99
+ from gc import collect
100
+ from time import sleep
101
+ from numba.typed import Dict as TDict
102
+ from psutil import virtual_memory
103
+
104
+ from cellects.image_analysis.cell_leaving_detection import cell_leaving_detection
105
+ from cellects.image_analysis.cluster_flux_study import ClusterFluxStudy
106
+ from cellects.image_analysis.fractal_analysis import box_counting, prepare_box_counting
107
+ from cellects.image_analysis.image_segmentation import segment_with_lum_value
108
+ from cellects.image_analysis.morphological_operations import (find_major_incline, image_borders, draw_me_a_sun,
109
+ make_gravity_field, expand_to_fill_holes)
110
+ from cellects.image_analysis.network_functions import *
111
+ from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
112
+ from cellects.image_analysis.shape_descriptors import ShapeDescriptors, from_shape_descriptors_class
113
+ from cellects.utils.utilitarian import PercentAndTimeTracker, smallest_memory_array
114
+
115
+
116
+ class MotionAnalysis:
117
+
118
+ def __init__(self, l):
119
+
120
+ """
121
+ :param video_name: The name of the video to read
122
+ :param convert_for_motion: The dict specifying the linear combination
123
+ of color channels (rgb_hsv_lab) to use
124
+ """
125
+ self.one_descriptor_per_arena = {}
126
+ self.one_descriptor_per_arena['arena'] = l[1]
127
+ vars = l[2]
128
+ detect_shape = l[3]
129
+ analyse_shape = l[4]
130
+ show_seg = l[5]
131
+ videos_already_in_ram = l[6]
132
+ self.visu = None
133
+ self.binary = None
134
+ self.origin_idx = None
135
+ self.smoothing_flag: bool = False
136
+ logging.info(f"Start the motion analysis of the arena n°{self.one_descriptor_per_arena['arena']}")
137
+
138
+ self.vars = vars
139
+ # self.origin = self.vars['first_image'][self.vars['top'][l[0]]:(
140
+ # self.vars['bot'][l[0]] + 1),
141
+ # self.vars['left'][l[0]]:(self.vars['right'][l[0]] + 1)]
142
+ self.load_images_and_videos(videos_already_in_ram, l[0])
143
+
144
+ self.dims = self.converted_video.shape
145
+ self.segmentation = np.zeros(self.dims, dtype=np.uint8)
146
+
147
+ self.covering_intensity = np.zeros(self.dims[1:], dtype=np.float64)
148
+ self.mean_intensity_per_frame = np.mean(self.converted_video, (1, 2))
149
+ if self.vars['arena_shape'] == "circle":
150
+ self.borders = Ellipse(self.dims[1:]).create()
151
+ img_contours = image_borders(self.dims[1:])
152
+ self.borders = self.borders * img_contours
153
+ else:
154
+ self.borders = image_borders(self.dims[1:])
155
+ self.pixel_ring_depth = 9
156
+ self.step = 10
157
+ self.lost_frames = 10
158
+ self.update_ring_width()
159
+
160
+ self.start = None
161
+ if detect_shape:
162
+ #self=self.motion
163
+ #self.drift_correction()
164
+ self.start = None
165
+ # Here to conditional layers allow to detect if an expansion/exploration occured
166
+ self.get_origin_shape()
167
+ # The first, user-defined is the 'first_move_threshold' and the second is the detection of the
168
+ # substantial image: if any of them is not detected, the program considers there is not exp.
169
+ if self.dims[0] >= 40:
170
+ step = self.dims[0] // 20
171
+ else:
172
+ step = 1
173
+ if self.start >= (self.dims[0] - step - 1):
174
+ self.start = None
175
+ else:
176
+ self.get_covering_duration(step)
177
+ if self.start is not None:
178
+ # self.vars['fading'] = -0.5
179
+ # self.vars['do_threshold_segmentation']: bool = False
180
+ # self.vars['do_slope_segmentation'] = True
181
+ # self.vars['true_if_use_light_AND_slope_else_OR']: bool = False
182
+ self.detection()
183
+ self.initialize_post_processing()
184
+ self.t = self.start
185
+ while self.t < self.binary.shape[0]: #200:
186
+ self.update_shape(show_seg)
187
+ #
188
+ if self.start is None:
189
+ self.binary = np.repeat(np.expand_dims(self.origin, 0), self.converted_video.shape[0], axis=0)
190
+
191
+ if analyse_shape:
192
+ self.get_descriptors_from_binary()
193
+ self.detect_growth_transitions()
194
+ self.networks_detection(show_seg)
195
+ self.study_cytoscillations(show_seg)
196
+ self.fractal_descriptions()
197
+ self.get_descriptors_summary()
198
+ if videos_already_in_ram is None:
199
+ self.save_results()
200
+
201
+ def load_images_and_videos(self, videos_already_in_ram, i):
202
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Load images and videos")
203
+ self.origin = self.vars['origin_list'][i]# self.vars['origins_list'][i]
204
+ if videos_already_in_ram is None:
205
+ true_frame_width = self.origin.shape[1]
206
+ vid_name = f"ind_{self.one_descriptor_per_arena['arena']}.npy"
207
+ if len(self.vars['background_list']) == 0:
208
+ self.background = None
209
+ else:
210
+ self.background = self.vars['background_list'][i]
211
+ if len(self.vars['background_list2']) == 0:
212
+ self.background2 = None
213
+ else:
214
+ self.background2 = self.vars['background_list2'][i]
215
+
216
+ if self.vars['already_greyscale']:
217
+ self.converted_video = video2numpy(
218
+ vid_name, None, self.background, true_frame_width)
219
+ if len(self.converted_video.shape) == 4:
220
+ self.converted_video = self.converted_video[:, :, :, 0]
221
+ else:
222
+ self.visu = video2numpy(
223
+ vid_name, None, self.background, true_frame_width)
224
+ self.get_converted_video()
225
+ else:
226
+ if self.vars['already_greyscale']:
227
+ self.converted_video = videos_already_in_ram
228
+ else:
229
+ if self.vars['convert_for_motion']['logical'] == 'None':
230
+ self.visu, self.converted_video = videos_already_in_ram
231
+ else:
232
+ (self.visu,
233
+ self.converted_video,
234
+ self.converted_video2) = videos_already_in_ram
235
+
236
+ def get_converted_video(self):
237
+ if not self.vars['already_greyscale']:
238
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Convert the RGB visu video into a greyscale image using the color space combination: {self.vars['convert_for_motion']}")
239
+ first_dict = TDict()
240
+ second_dict = TDict()
241
+ c_spaces = []
242
+ for k, v in self.vars['convert_for_motion'].items():
243
+ if k != 'logical' and v.sum() > 0:
244
+ if k[-1] != '2':
245
+ first_dict[k] = v
246
+ c_spaces.append(k)
247
+ else:
248
+ second_dict[k[:-1]] = v
249
+ c_spaces.append(k[:-1])
250
+ if self.vars['lose_accuracy_to_save_memory']:
251
+ self.converted_video = np.zeros(self.visu.shape[:3], dtype=np.uint8)
252
+ else:
253
+ self.converted_video = np.zeros(self.visu.shape[:3], dtype=np.float64)
254
+ if self.vars['convert_for_motion']['logical'] != 'None':
255
+ if self.vars['lose_accuracy_to_save_memory']:
256
+ self.converted_video2 = np.zeros(self.visu.shape[:3], dtype=np.uint8)
257
+ else:
258
+ self.converted_video2 = np.zeros(self.visu.shape[:3], dtype=np.float64)
259
+
260
+ # Trying to subtract the first image to the first image is a nonsense so,
261
+ # when doing background subtraction, the first and the second image are equal
262
+ for counter in np.arange(self.visu.shape[0]):
263
+ if self.vars['subtract_background'] and counter == 0:
264
+ img = self.visu[1, ...]
265
+ else:
266
+ img = self.visu[counter, ...]
267
+ greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces, first_dict,
268
+ second_dict, self.background,
269
+ self.background2,
270
+ self.vars['lose_accuracy_to_save_memory'])
271
+ self.converted_video[counter, ...] = greyscale_image
272
+ if self.vars['convert_for_motion']['logical'] != 'None':
273
+ self.converted_video2[counter, ...] = greyscale_image2
274
+
275
+ def get_origin_shape(self):
276
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Make sure of origin shape")
277
+ if self.vars['origin_state'] == "constant":
278
+ self.start = 1
279
+ self.origin_idx = np.nonzero(self.origin)
280
+ if self.vars['lighter_background']:
281
+ # Initialize the covering_intensity matrix as a reference for pixel fading
282
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = 200
283
+ self.substantial_growth = 1.2 * self.origin.sum()
284
+ else:
285
+ self.start = 0
286
+ analysisi = OneImageAnalysis(self.converted_video[0, :, :])
287
+ analysisi.binary_image = 0
288
+ if self.vars['drift_already_corrected']:
289
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
290
+ for frame_i in np.arange(self.dims[0]): # 100):#
291
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
292
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(
293
+ true_pixels[1])
294
+ else:
295
+ mask_coord = None
296
+ while np.logical_and(np.sum(analysisi.binary_image) < self.vars['first_move_threshold'], self.start < self.dims[0]):
297
+ analysisi = self.frame_by_frame_segmentation(self.start, mask_coord)
298
+ self.start += 1
299
+
300
+ # frame_i = OneImageAnalysis(self.converted_video[self.start, :, :])
301
+ # frame_i.thresholding(self.vars['luminosity_threshold'], self.vars['lighter_background'])
302
+ # frame_i.thresholding(self.vars['luminosity_threshold'], self.vars['lighter_background'])
303
+ # self.start += 1
304
+
305
+ # Use connected components to find which shape is the nearest from the image center.
306
+ if self.vars['several_blob_per_arena']:
307
+ self.origin = analysisi.binary_image
308
+ else:
309
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(analysisi.binary_image,
310
+ connectivity=8)
311
+ if self.vars['appearance_detection_method'] == 'most_central':
312
+ center = np.array((self.dims[2] // 2, self.dims[1] // 2))
313
+ stats = np.zeros(nb_components - 1)
314
+ for shape_i in np.arange(1, nb_components):
315
+ stats[shape_i - 1] = eudist(center, centroids[shape_i, :])
316
+ # The shape having the minimal euclidean distance from the center will be the original shape
317
+ self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
318
+ self.origin[output == (np.argmin(stats) + 1)] = 1
319
+ elif self.vars['appearance_detection_method'] == 'largest':
320
+ self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
321
+ self.origin[output == np.argmax(stats[1:, 4])] = 1
322
+ self.origin_idx = np.nonzero(self.origin)
323
+ self.substantial_growth = self.origin.sum() + 250
324
+ ##
325
+
326
+ def get_covering_duration(self, step):
327
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Find a frame with a significant growth/motion and determine the number of frames necessary for a pixel to get covered")
328
+ ## Find the time at which growth reached a substantial growth.
329
+ self.substantial_time = self.start
330
+ # To avoid noisy images to have deleterious effects, make sure that area area reaches the threshold thrice.
331
+ occurrence = 0
332
+ if self.vars['drift_already_corrected']:
333
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
334
+ for frame_i in np.arange(self.dims[0]): # 100):#
335
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
336
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(
337
+ true_pixels[1])
338
+ else:
339
+ mask_coord = None
340
+ while np.logical_and(occurrence < 3, self.substantial_time < (self.dims[0] - step - 1)):
341
+ self.substantial_time += step
342
+ growth_vision = self.frame_by_frame_segmentation(self.substantial_time, mask_coord)
343
+
344
+ # growth_vision = OneImageAnalysis(self.converted_video[self.substantial_time, :, :])
345
+ # # growth_vision.thresholding()
346
+ # if self.vars['convert_for_motion']['logical'] != 'None':
347
+ # growth_vision.image2 = self.converted_video2[self.substantial_time, ...]
348
+ #
349
+ # growth_vision.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'],
350
+ # bio_label=self.vars["bio_label"], bio_label2=self.vars["bio_label2"],
351
+ # grid_segmentation=self.vars['grid_segmentation'],
352
+ # lighter_background=self.vars['lighter_background'])
353
+
354
+ surfarea = np.sum(growth_vision.binary_image * self.borders)
355
+ if surfarea > self.substantial_growth:
356
+ occurrence += 1
357
+ # get a rough idea of the area covered during this time
358
+ if (self.substantial_time - self.start) > 20:
359
+ if self.vars['lighter_background']:
360
+ growth = (np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10) - (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10)
361
+ else:
362
+ growth = (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10) - (
363
+ np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10)
364
+ else:
365
+ if self.vars['lighter_background']:
366
+ growth = self.converted_video[self.start, ...] - self.converted_video[self.substantial_time, ...]
367
+ else:
368
+ growth = self.converted_video[self.substantial_time, ...] - self.converted_video[self.start, ...]
369
+ intensity_extent = np.ptp(self.converted_video[self.start:self.substantial_time, :, :], axis=0)
370
+ growth[np.logical_or(growth < 0, intensity_extent < np.median(intensity_extent))] = 0
371
+ growth = bracket_to_uint8_image_contrast(growth)
372
+ growth *= self.borders
373
+ growth_vision = OneImageAnalysis(growth)
374
+ growth_vision.thresholding()
375
+ self.substantial_image = cv2.erode(growth_vision.binary_image, cross_33, iterations=2)
376
+
377
+ if np.any(self.substantial_image):
378
+ natural_noise = np.nonzero(intensity_extent == np.min(intensity_extent))
379
+ natural_noise = self.converted_video[self.start:self.substantial_time, natural_noise[0][0], natural_noise[1][0]]
380
+ natural_noise = moving_average(natural_noise, 5)
381
+ natural_noise = np.ptp(natural_noise)
382
+ subst_idx = np.nonzero(self.substantial_image)
383
+ cover_lengths = np.zeros(len(subst_idx[0]), dtype=np.uint32)
384
+ for index in np.arange(len(subst_idx[0])):
385
+ vector = self.converted_video[self.start:self.substantial_time, subst_idx[0][index], subst_idx[1][index]]
386
+ left, right = find_major_incline(vector, natural_noise)
387
+ # If find_major_incline did find a major incline: (otherwise it put 0 to left and 1 to right)
388
+ if not np.logical_and(left == 0, right == 1):
389
+ cover_lengths[index] = len(vector[left:-right])
390
+ # If this analysis fails put a deterministic step
391
+ if len(cover_lengths[cover_lengths > 0]) > 0:
392
+ self.step = (np.round(np.mean(cover_lengths[cover_lengths > 0])).astype(np.uint32) // 2) + 1
393
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: the time for a pixel to get covered is set to {self.step}")
394
+ else:
395
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: could not automatically find the time for a pixel to get covered. Default value is 1 for video length < 40 and 10 otherwise")
396
+
397
+ # Make sure to avoid a step overestimation
398
+ if self.step > self.dims[0] // 20:
399
+ self.step = self.dims[0] // 20
400
+
401
+ if self.step == 0:
402
+ self.step = 1
403
+ # When the first_move_threshold is not stringent enough the program may detect a movement due to noise
404
+ # In that case, the substantial_image is empty and there is no reason to proceed further
405
+ else:
406
+ self.start = None
407
+ ##
408
+
409
+ def detection(self, compute_all_possibilities=False):
410
+ # self.lost_frames = (self.step - 1) * self.vars['repeat_video_smoothing'] # relevant when smoothing did not use padding.
411
+ self.lost_frames = self.step
412
+ # I/ Image by image segmentation algorithms
413
+ # If images contain a drift correction (zeros at borders of the image,
414
+ # Replace these 0 by normal background values before segmenting
415
+ if self.vars['frame_by_frame_segmentation'] or compute_all_possibilities:
416
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect cell motion and growth using the frame by frame segmentation algorithm")
417
+ self.segmentation = np.zeros(self.dims, dtype=np.uint8)
418
+ if self.vars['drift_already_corrected']:
419
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Adjust images to drift correction and segment them")
420
+ # 1. Get the mask valid for a number of images around it (step).
421
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
422
+ for frame_i in np.arange(self.dims[0]):#100):#
423
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
424
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(true_pixels[1])
425
+ else:
426
+ mask_coord = None
427
+
428
+ for t in np.arange(self.dims[0]):#20):#
429
+ analysisi = self.frame_by_frame_segmentation(t, mask_coord)
430
+ self.segmentation[t, ...] = analysisi.binary_image
431
+
432
+ if self.vars['lose_accuracy_to_save_memory']:
433
+ self.converted_video[t, ...] = bracket_to_uint8_image_contrast(analysisi.image)
434
+ else:
435
+ self.converted_video[t, ...] = analysisi.image
436
+ if self.vars['convert_for_motion']['logical'] != 'None':
437
+ if self.vars['lose_accuracy_to_save_memory']:
438
+ self.converted_video2[t, ...] = bracket_to_uint8_image_contrast(analysisi.image2)
439
+ else:
440
+ self.converted_video2[t, ...] = analysisi.image2
441
+
442
+ if self.vars['color_number'] == 2:
443
+ luminosity_segmentation, l_threshold_over_time = self.lum_value_segmentation(self.converted_video, do_threshold_segmentation=self.vars['do_threshold_segmentation'] or compute_all_possibilities)
444
+ self.converted_video = self.smooth_pixel_slopes(self.converted_video)
445
+ if self.vars['do_slope_segmentation'] or compute_all_possibilities:
446
+ gradient_segmentation = self.lum_slope_segmentation(self.converted_video)
447
+ gradient_segmentation[-self.lost_frames:, ...] = np.repeat(gradient_segmentation[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
448
+ if self.vars['convert_for_motion']['logical'] != 'None':
449
+ if self.vars['do_threshold_segmentation'] or compute_all_possibilities:
450
+ luminosity_segmentation2, l_threshold_over_time2 = self.lum_value_segmentation(self.converted_video2, do_threshold_segmentation=True)
451
+ if self.vars['convert_for_motion']['logical'] == 'Or':
452
+ luminosity_segmentation = np.logical_or(luminosity_segmentation, luminosity_segmentation2)
453
+ elif self.vars['convert_for_motion']['logical'] == 'And':
454
+ luminosity_segmentation = np.logical_and(luminosity_segmentation, luminosity_segmentation2)
455
+ elif self.vars['convert_for_motion']['logical'] == 'Xor':
456
+ luminosity_segmentation = np.logical_xor(luminosity_segmentation, luminosity_segmentation2)
457
+ self.converted_video2 = self.smooth_pixel_slopes(self.converted_video2)
458
+ if self.vars['do_slope_segmentation'] or compute_all_possibilities:
459
+ gradient_segmentation2 = self.lum_slope_segmentation(self.converted_video2)
460
+ gradient_segmentation2[-self.lost_frames:, ...] = np.repeat(gradient_segmentation2[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
461
+ if self.vars['convert_for_motion']['logical'] == 'Or':
462
+ gradient_segmentation = np.logical_or(gradient_segmentation, gradient_segmentation2)
463
+ elif self.vars['convert_for_motion']['logical'] == 'And':
464
+ gradient_segmentation = np.logical_and(gradient_segmentation, gradient_segmentation2)
465
+ elif self.vars['convert_for_motion']['logical'] == 'Xor':
466
+ gradient_segmentation = np.logical_xor(gradient_segmentation, gradient_segmentation2)
467
+
468
+ if compute_all_possibilities:
469
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Compute all options to detect cell motion and growth. Maximal growth per frame: {self.vars['maximal_growth_factor']}")
470
+ self.luminosity_segmentation = np.nonzero(luminosity_segmentation)
471
+ self.gradient_segmentation = np.nonzero(gradient_segmentation)
472
+ self.logical_and = np.nonzero(np.logical_and(luminosity_segmentation, gradient_segmentation))
473
+ self.logical_or = np.nonzero(np.logical_or(luminosity_segmentation, gradient_segmentation))
474
+ elif not self.vars['frame_by_frame_segmentation']:
475
+ if self.vars['do_threshold_segmentation'] and not self.vars['do_slope_segmentation']:
476
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity threshold segmentation algorithm")
477
+ self.segmentation = luminosity_segmentation
478
+ if self.vars['do_slope_segmentation']:# and not self.vars['do_threshold_segmentation']: NEW
479
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity slope segmentation algorithm")
480
+ # gradient_segmentation[:(self.lost_frames + 1), ...] = luminosity_segmentation[:(self.lost_frames + 1), ...]
481
+ if not self.vars['do_threshold_segmentation']:# NEW
482
+ self.segmentation = gradient_segmentation
483
+ if np.logical_and(self.vars['do_threshold_segmentation'], self.vars['do_slope_segmentation']):
484
+ if self.vars['true_if_use_light_AND_slope_else_OR']:
485
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold AND slope segmentation algorithms")
486
+ self.segmentation = np.logical_and(luminosity_segmentation, gradient_segmentation)
487
+ else:
488
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold OR slope segmentation algorithms")
489
+ self.segmentation = np.logical_or(luminosity_segmentation, gradient_segmentation)
490
+ self.segmentation = self.segmentation.astype(np.uint8)
491
+ self.converted_video2 = None
492
+
493
+
494
+ def frame_by_frame_segmentation(self, t, mask_coord=None):
495
+
496
+ contrasted_im = bracket_to_uint8_image_contrast(self.converted_video[t, :, :])
497
+ if self.vars['convert_for_motion']['logical'] != 'None':
498
+ contrasted_im2 = bracket_to_uint8_image_contrast(self.converted_video2[t, :, :])
499
+ # 1. Get the mask valid for a number of images around it (step).
500
+ if self.vars['drift_already_corrected']:
501
+ if t < self.step // 2:
502
+ t_start = 0
503
+ t_end = self.step
504
+ elif t > (self.dims[0] - self.step // 2):
505
+ t_start = self.dims[0] - self.step
506
+ t_end = self.dims[0]
507
+ else:
508
+ t_start = t - (self.step // 2)
509
+ t_end = t + (self.step // 2)
510
+ min_y, max_y = np.max(mask_coord[t_start:t_end, 0]), np.min(mask_coord[t_start:t_end, 1])
511
+ min_x, max_x = np.max(mask_coord[t_start:t_end, 2]), np.min(mask_coord[t_start:t_end, 3])
512
+ # 3. Bracket the focal image
513
+ image_i = contrasted_im[min_y:(max_y + 1), min_x:(max_x + 1)].astype(np.float64)
514
+ image_i /= np.mean(image_i)
515
+ image_i = OneImageAnalysis(image_i)
516
+ if self.vars['convert_for_motion']['logical'] != 'None':
517
+ image_i2 = contrasted_im2[min_y:(max_y + 1), min_x:(max_x + 1)]
518
+ image_i2 /= np.mean(image_i2)
519
+ image_i.image2 = image_i2
520
+ mask = (self.converted_video[t, ...] > 0).astype(np.uint8)
521
+ else:
522
+ mask = None
523
+ # 3. Bracket the focal image
524
+ if self.vars['grid_segmentation']:
525
+ int_variation_thresh = 100 - (np.ptp(contrasted_im) * 90 / 255)
526
+ else:
527
+ int_variation_thresh = None
528
+ analysisi = OneImageAnalysis(bracket_to_uint8_image_contrast(contrasted_im / np.mean(contrasted_im)))
529
+ if self.vars['convert_for_motion']['logical'] != 'None':
530
+ analysisi.image2 = bracket_to_uint8_image_contrast(contrasted_im2 / np.mean(contrasted_im2))
531
+
532
+ if t == 0:
533
+ analysisi.previous_binary_image = self.origin
534
+ else:
535
+ analysisi.previous_binary_image = deepcopy(self.segmentation[t - 1, ...])
536
+
537
+ analysisi.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'],
538
+ bio_label=self.vars["bio_label"], bio_label2=self.vars["bio_label2"],
539
+ grid_segmentation=self.vars['grid_segmentation'],
540
+ lighter_background=self.vars['lighter_background'],
541
+ side_length=20, step=5, int_variation_thresh=int_variation_thresh, mask=mask)
542
+
543
+ return analysisi
544
+
545
+ # 1. Get the mask valid for a number of images around it (step).
546
+
547
+
548
+ def lum_value_segmentation(self, converted_video, do_threshold_segmentation):
549
+ shape_motion_failed: bool = False
550
+ if self.vars['lighter_background']:
551
+ covering_l_values = np.min(converted_video[:self.substantial_time, :, :],
552
+ 0) * self.substantial_image
553
+ else:
554
+ covering_l_values = np.max(converted_video[:self.substantial_time, :, :],
555
+ 0) * self.substantial_image
556
+ # Avoid errors by checking whether the covering values are nonzero
557
+ covering_l_values = covering_l_values[covering_l_values != 0]
558
+ if len(covering_l_values) == 0:
559
+ shape_motion_failed = True
560
+ if not shape_motion_failed:
561
+ value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
562
+ validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
563
+ counter = 0
564
+ while_condition = True
565
+ max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor'] * 2
566
+ if self.vars['lighter_background']:
567
+ basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.9, axis=(1, 2))
568
+ else:
569
+ basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.1, axis=(1, 2))
570
+ # Try different values of do_threshold_segmentation and keep the one that does not
571
+ # segment more than x percent of the image
572
+ while counter <= 14:
573
+ value_threshold = value_segmentation_thresholds[counter]
574
+ if self.vars['lighter_background']:
575
+ l_threshold = (1 + value_threshold) * np.max(covering_l_values)
576
+ else:
577
+ l_threshold = (1 - value_threshold) * np.min(covering_l_values)
578
+ starting_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
579
+ basic_bckgrnd_values, l_threshold,
580
+ self.vars['lighter_background'])
581
+
582
+ changing_pixel_number = np.sum(np.absolute(np.diff(starting_segmentation.astype(np.int8), 1, 0)), (1, 2))
583
+ validation = np.max(np.sum(starting_segmentation, (1, 2))) < max_motion_per_frame and (
584
+ np.max(changing_pixel_number) < max_motion_per_frame)
585
+ validated_thresholds[counter] = validation
586
+ if np.any(validated_thresholds):
587
+ if not validation:
588
+ break
589
+ counter += 1
590
+ # If any threshold is accepted, use their average to proceed the final thresholding
591
+ valid_number = validated_thresholds.sum()
592
+ if valid_number > 0:
593
+ if valid_number > 2:
594
+ index_to_keep = 2
595
+ else:
596
+ index_to_keep = valid_number - 1
597
+ value_threshold = value_segmentation_thresholds[
598
+ np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
599
+ else:
600
+ value_threshold = 0
601
+
602
+ if self.vars['lighter_background']:
603
+ l_threshold = (1 + value_threshold) * np.max(covering_l_values)
604
+ else:
605
+ l_threshold = (1 - value_threshold) * np.min(covering_l_values)
606
+ if do_threshold_segmentation:
607
+ if self.vars['lighter_background']:
608
+ basic_bckgrnd_values = np.quantile(converted_video, 0.9, axis=(1, 2))
609
+ else:
610
+ basic_bckgrnd_values = np.quantile(converted_video, 0.1, axis=(1, 2))
611
+ luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video, basic_bckgrnd_values,
612
+ l_threshold, self.vars['lighter_background'])
613
+ else:
614
+ luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
615
+ basic_bckgrnd_values, l_threshold,
616
+ self.vars['lighter_background'])
617
+ else:
618
+ luminosity_segmentation = None
619
+
620
+ return luminosity_segmentation, l_threshold_over_time
621
+
622
+ def smooth_pixel_slopes(self, converted_video):
623
+ # smoothed_video = np.zeros(
624
+ # (self.dims[0] - self.lost_frames, self.dims[1], self.dims[2]),
625
+ # dtype=np.float64)
626
+ try:
627
+ if self.vars['lose_accuracy_to_save_memory']:
628
+ smoothed_video = np.zeros(self.dims, dtype=np.float16)
629
+ smooth_kernel = np.ones(self.step) / self.step
630
+ for i in np.arange(converted_video.shape[1]):
631
+ for j in np.arange(converted_video.shape[2]):
632
+ padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
633
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
634
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
635
+ if self.vars['repeat_video_smoothing'] > 1:
636
+ for it in np.arange(1, self.vars['repeat_video_smoothing']):
637
+ padded = np.pad(moving_average,
638
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
639
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
640
+ smoothed_video[:, i, j] = moving_average.astype(np.float16)
641
+ else:
642
+ smoothed_video = np.zeros(self.dims, dtype=np.float64)
643
+ smooth_kernel = np.ones(self.step) / self.step
644
+ for i in np.arange(converted_video.shape[1]):
645
+ for j in np.arange(converted_video.shape[2]):
646
+ padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
647
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
648
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
649
+ if self.vars['repeat_video_smoothing'] > 1:
650
+ for it in np.arange(1, self.vars['repeat_video_smoothing']):
651
+ padded = np.pad(moving_average,
652
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
653
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
654
+ smoothed_video[:, i, j] = moving_average
655
+ return smoothed_video
656
+
657
+ except MemoryError:
658
+ logging.error("Not enough RAM available to smooth pixel curves. Detection may fail.")
659
+ smoothed_video = converted_video
660
+ return smoothed_video
661
+
662
+ def lum_slope_segmentation(self, converted_video):
663
+ shape_motion_failed : bool = False
664
+ gradient_segmentation = np.zeros(self.dims, np.uint8)
665
+ # 2) Contrast increase
666
+ oridx = np.nonzero(self.origin)
667
+ notoridx = np.nonzero(1 - self.origin)
668
+ do_increase_contrast = np.mean(converted_video[0, oridx[0], oridx[1]]) * 10 > np.mean(
669
+ converted_video[0, notoridx[0], notoridx[1]])
670
+ necessary_memory = self.dims[0] * self.dims[1] * self.dims[2] * 64 * 2 * 1.16415e-10
671
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
672
+ if self.vars['lose_accuracy_to_save_memory']:
673
+ derive = converted_video.astype(np.float16)
674
+ else:
675
+ derive = converted_video.astype(np.float64)
676
+ if necessary_memory > available_memory:
677
+ converted_video = None
678
+
679
+ if do_increase_contrast:
680
+ derive = np.square(derive)
681
+
682
+ # 3) Get the gradient
683
+ necessary_memory = derive.size * 64 * 4 * 1.16415e-10
684
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
685
+ if necessary_memory > available_memory:
686
+ for cy in np.arange(self.dims[1]):
687
+ for cx in np.arange(self.dims[2]):
688
+ if self.vars['lose_accuracy_to_save_memory']:
689
+ derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step).astype(np.float16)
690
+ else:
691
+ derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step)
692
+ else:
693
+ if self.vars['lose_accuracy_to_save_memory']:
694
+ derive = np.gradient(derive, self.step, axis=0).astype(np.float16)
695
+ else:
696
+ derive = np.gradient(derive, self.step, axis=0)
697
+
698
+ # 4) Segment
699
+ if self.vars['lighter_background']:
700
+ covering_slopes = np.min(derive[:self.substantial_time, :, :], 0) * self.substantial_image
701
+ else:
702
+ covering_slopes = np.max(derive[:self.substantial_time, :, :], 0) * self.substantial_image
703
+ covering_slopes = covering_slopes[covering_slopes != 0]
704
+ if len(covering_slopes) == 0:
705
+ shape_motion_failed = True
706
+
707
+ if not shape_motion_failed:
708
+ ####
709
+ # ease_slope_segmentation = 0.8
710
+ value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
711
+ validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
712
+ counter = 0
713
+ while_condition = True
714
+ max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor']
715
+ # Try different values of do_slope_segmentation and keep the one that does not
716
+ # segment more than x percent of the image
717
+ while counter <= 14:
718
+ ease_slope_segmentation = value_segmentation_thresholds[counter]
719
+ if self.vars['lighter_background']:
720
+ gradient_threshold = (1 + ease_slope_segmentation) * np.max(covering_slopes)
721
+ sample = np.less(derive[:self.substantial_time], gradient_threshold)
722
+ else:
723
+ gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
724
+ sample = np.greater(derive[:self.substantial_time], gradient_threshold)
725
+ changing_pixel_number = np.sum(np.absolute(np.diff(sample.astype(np.int8), 1, 0)), (1, 2))
726
+ validation = np.max(np.sum(sample, (1, 2))) < max_motion_per_frame and (
727
+ np.max(changing_pixel_number) < max_motion_per_frame)
728
+ validated_thresholds[counter] = validation
729
+ if np.any(validated_thresholds):
730
+ if not validation:
731
+ break
732
+ counter += 1
733
+ # If any threshold is accepted, use their average to proceed the final thresholding
734
+ valid_number = validated_thresholds.sum()
735
+ if valid_number > 0:
736
+ if valid_number > 2:
737
+ index_to_keep = 2
738
+ else:
739
+ index_to_keep = valid_number - 1
740
+ ease_slope_segmentation = value_segmentation_thresholds[
741
+ np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
742
+ else:
743
+ ease_slope_segmentation = 0
744
+
745
+ if self.vars['lighter_background']:
746
+ gradient_threshold = (1 - ease_slope_segmentation) * np.max(covering_slopes)
747
+ gradient_segmentation[:-self.lost_frames, :, :] = np.less(derive, gradient_threshold)[self.lost_frames:, :, :]
748
+ else:
749
+ gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
750
+ gradient_segmentation[:-self.lost_frames, :, :] = np.greater(derive, gradient_threshold)[self.lost_frames:, :, :]
751
+ else:
752
+ gradient_segmentation = None
753
+ return gradient_segmentation
754
+
755
+ def update_ring_width(self):
756
+ # Make sure that self.pixels_depths are odd and greater than 3
757
+ if self.pixel_ring_depth <= 3:
758
+ self.pixel_ring_depth = 3
759
+ if self.pixel_ring_depth % 2 == 0:
760
+ self.pixel_ring_depth = self.pixel_ring_depth + 1
761
+ self.erodila_disk = Ellipse((self.pixel_ring_depth, self.pixel_ring_depth)).create().astype(np.uint8)
762
+ self.max_distance = self.pixel_ring_depth * self.vars['detection_range_factor']
763
+
764
+ def initialize_post_processing(self):
765
+ ## Initialization
766
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting Post_processing. Fading detection: {self.vars['do_fading']}: {self.vars['fading']}, Subtract background: {self.vars['subtract_background']}, Correct errors around initial shape: {self.vars['correct_errors_around_initial']}, Connect distant shapes: {self.vars['detection_range_factor'] > 0}, How to select appearing cell(s): {self.vars['appearance_detection_method']}")
767
+
768
+ self.binary = np.zeros(self.dims[:3], dtype=np.uint8)
769
+ if self.origin.shape[0] != self.binary[self.start - 1, :, :].shape[0] or self.origin.shape[1] != self.binary[self.start - 1, :, :].shape[1]:
770
+ logging.error("Unaltered videos deprecated, they have been created with different settings.\nDelete .npy videos and Data to run Cellects quickly.pkl and re-run")
771
+
772
+ if self.vars['origin_state'] == "invisible":
773
+ self.binary[self.start - 1, :, :] = deepcopy(self.origin)
774
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = self.converted_video[self.start, self.origin_idx[0], self.origin_idx[1]]
775
+ else:
776
+ if self.vars['origin_state'] == "fluctuating":
777
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = np.median(self.converted_video[:self.start, self.origin_idx[0], self.origin_idx[1]], axis=0)
778
+
779
+ self.binary[:self.start, :, :] = np.repeat(np.expand_dims(self.origin, 0), self.start, axis=0)
780
+ if self.start < self.step:
781
+ frames_to_assess = self.step
782
+ self.segmentation[self.start - 1, ...] = self.binary[self.start - 1, :, :]
783
+ for t in np.arange(self.start, self.lost_frames):
784
+ # Only keep pixels that are always detected
785
+ always_found = np.sum(self.segmentation[t:(t + frames_to_assess), ...], 0)
786
+ always_found = always_found == frames_to_assess
787
+ # Remove too small shapes
788
+ without_small, stats, centro = cc(always_found.astype(np.uint8))
789
+ large_enough = np.nonzero(stats[1:, 4] > ((self.vars['first_move_threshold'] + 1) // 2))[0]
790
+ if len(large_enough) > 0:
791
+ always_found *= np.isin(always_found, large_enough + 1)
792
+ always_found = np.logical_or(always_found, self.segmentation[t - 1, ...])
793
+ self.segmentation[t, ...] *= always_found
794
+ else:
795
+ self.segmentation[t, ...] = 0
796
+ self.segmentation[t, ...] = np.logical_or(self.segmentation[t - 1, ...], self.segmentation[t, ...])
797
+ self.mean_distance_per_frame = None
798
+ self.surfarea = np.zeros(self.dims[0], dtype =np.uint64)
799
+ self.surfarea[:self.start] = np.sum(self.binary[:self.start, :, :], (1, 2))
800
+ self.gravity_field = make_gravity_field(self.binary[(self.start - 1), :, :],
801
+ np.sqrt(np.sum(self.binary[(self.start - 1), :, :])))
802
+ if self.vars['correct_errors_around_initial']:
803
+ self.rays, self.sun = draw_me_a_sun(self.binary[(self.start - 1), :, :], cross_33, ray_length_coef=1.25) # plt.imshow(sun)
804
+ self.holes = np.zeros(self.dims[1:], dtype=np.uint8)
805
+ self.pixel_ring_depth += 2
806
+ self.update_ring_width()
807
+
808
+ if self.vars['prevent_fast_growth_near_periphery']:
809
+ self.near_periphery = np.zeros(self.dims[1:])
810
+ if self.vars['arena_shape'] == 'circle':
811
+ periphery_width = self.vars['periphery_width'] * 2
812
+ elliperiphery = Ellipse((self.dims[1] - periphery_width, self.dims[2] - periphery_width)).create()
813
+ half_width = periphery_width // 2
814
+ if periphery_width % 2 == 0:
815
+ self.near_periphery[half_width:-half_width, half_width:-half_width] = elliperiphery
816
+ else:
817
+ self.near_periphery[half_width:-half_width - 1, half_width:-half_width - 1] = elliperiphery
818
+ self.near_periphery = 1 - self.near_periphery
819
+ else:
820
+ self.near_periphery[:self.vars['periphery_width'], :] = 1
821
+ self.near_periphery[-self.vars['periphery_width']:, :] = 1
822
+ self.near_periphery[:, :self.vars['periphery_width']] = 1
823
+ self.near_periphery[:, -self.vars['periphery_width']:] = 1
824
+ self.near_periphery = np.nonzero(self.near_periphery)
825
+ # near_periphery = np.zeros(self.dims[1:])
826
+ # near_periphery[self.near_periphery] = 1
827
+
828
+ def update_shape(self, show_seg):
829
+
830
+ # Get from gradients, a 2D matrix of potentially covered pixels
831
+ # I/ dilate the shape made with covered pixels to assess for covering
832
+
833
+ # I/ 1) Only keep pixels that have been detected at least two times in the three previous frames
834
+ if self.dims[0] < 100:
835
+ new_potentials = self.segmentation[self.t, :, :]
836
+ else:
837
+ if self.t > 1:
838
+ new_potentials = np.sum(self.segmentation[(self.t - 2): (self.t + 1), :, :], 0, dtype=np.uint8)
839
+ else:
840
+ new_potentials = np.sum(self.segmentation[: (self.t + 1), :, :], 0, dtype=np.uint8)
841
+ new_potentials[new_potentials == 1] = 0
842
+ new_potentials[new_potentials > 1] = 1
843
+
844
+ # I/ 2) If an image displays more new potential pixels than 50% of image pixels,
845
+ # one of these images is considered noisy and we try taking only one.
846
+ frame_counter = -1
847
+ maximal_size = 0.5 * new_potentials.size
848
+ if (self.vars["do_threshold_segmentation"] or self.vars["frame_by_frame_segmentation"]) and self.t > np.max((self.start + self.step, 6)):
849
+ maximal_size = np.min((np.max(self.binary[:self.t].sum((1, 2))) * (1 + self.vars['maximal_growth_factor']), self.borders.sum()))
850
+ while np.logical_and(np.sum(new_potentials) > maximal_size,
851
+ frame_counter <= 5): # np.logical_and(np.sum(new_potentials > 0) > 5 * np.sum(dila_ring), frame_counter <= 5):
852
+ frame_counter += 1
853
+ if frame_counter > self.t:
854
+ break
855
+ else:
856
+ if frame_counter < 5:
857
+ new_potentials = self.segmentation[self.t - frame_counter, :, :]
858
+ else:
859
+ # If taking only one image is not enough, use the inverse of the fadinged matrix as new_potentials
860
+ # Given it haven't been processed by any slope calculation, it should be less noisy
861
+ new_potentials = np.sum(self.segmentation[(self.t - 5): (self.t + 1), :, :], 0, dtype=np.uint8)
862
+ new_potentials[new_potentials < 6] = 0
863
+ new_potentials[new_potentials == 6] = 1
864
+
865
+
866
+ new_shape = deepcopy(self.binary[self.t - 1, :, :])
867
+ new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_CLOSE, cross_33)
868
+ new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_OPEN, cross_33) * self.borders
869
+ new_shape = np.logical_or(new_shape, new_potentials).astype(np.uint8)
870
+ # Add distant shapes within a radius, score every added pixels according to their distance
871
+ if not self.vars['several_blob_per_arena']:
872
+ if new_shape.sum() == 0:
873
+ new_shape = deepcopy(new_potentials)
874
+ else:
875
+ pads = ProgressivelyAddDistantShapes(new_potentials, new_shape, self.max_distance)
876
+ r = weakref.ref(pads)
877
+ # If max_distance is non nul look for distant shapes
878
+ pads.consider_shapes_sizes(self.vars['min_size_for_connection'],
879
+ self.vars['max_size_for_connection'])
880
+ pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=True)
881
+
882
+ new_shape = deepcopy(pads.expanded_shape)
883
+ new_shape[new_shape > 1] = 1
884
+ if np.logical_and(self.t > self.step, self.t < self.dims[0]):
885
+ if np.any(pads.expanded_shape > 5):
886
+ # Add distant shapes back in time at the covering speed of neighbors
887
+ self.binary[self.t][np.nonzero(new_shape)] = 1
888
+ self.binary[(self.step):(self.t + 1), :, :] = \
889
+ pads.modify_past_analysis(self.binary[(self.step):(self.t + 1), :, :],
890
+ self.segmentation[(self.step):(self.t + 1), :, :])
891
+ new_shape = deepcopy(self.binary[self.t, :, :])
892
+ pads = None
893
+
894
+ # Fill holes
895
+ new_shape = cv2.morphologyEx(new_shape, cv2.MORPH_CLOSE, cross_33)
896
+
897
+ if self.vars['do_fading'] and (self.t > self.step + self.lost_frames):
898
+ # Shape Erosion
899
+ # I/ After a substantial growth, erode the shape made with covered pixels to assess for fading
900
+ # Use the newly covered pixels to calculate their mean covering intensity
901
+ new_idx = np.nonzero(np.logical_xor(new_shape, self.binary[self.t - 1, :, :]))
902
+ start_intensity_monitoring = self.t - self.lost_frames - self.step
903
+ end_intensity_monitoring = self.t - self.lost_frames
904
+ self.covering_intensity[new_idx[0], new_idx[1]] = np.median(self.converted_video[start_intensity_monitoring:end_intensity_monitoring, new_idx[0], new_idx[1]], axis=0)
905
+ previous_binary = self.binary[self.t - 1, :, :]
906
+ greyscale_image = self.converted_video[self.t - self.lost_frames, :, :]
907
+ protect_from_fading = None
908
+ if self.vars['origin_state'] == 'constant':
909
+ protect_from_fading = self.origin
910
+ new_shape, self.covering_intensity = cell_leaving_detection(new_shape, self.covering_intensity, previous_binary, greyscale_image, self.vars['fading'], self.vars['lighter_background'], self.vars['several_blob_per_arena'], self.erodila_disk, protect_from_fading)
911
+
912
+ self.covering_intensity *= new_shape
913
+ self.binary[self.t, :, :] = new_shape * self.borders
914
+ self.surfarea[self.t] = np.sum(self.binary[self.t, :, :])
915
+
916
+ # Calculate the mean distance covered per frame and correct for a ring of not really fading pixels
917
+ if self.mean_distance_per_frame is None:
918
+ if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
919
+ if np.logical_and((self.t % 20) == 0,
920
+ np.logical_and(self.surfarea[self.t] > self.substantial_growth,
921
+ self.surfarea[self.t] < self.substantial_growth * 2)):
922
+ shape = self.binary[self.t, :, :] * self.sun
923
+ back = (1 - self.binary[self.t, :, :]) * self.sun
924
+ for ray in self.rays:
925
+ # For each sun's ray, see how they cross the shape/back and
926
+ # store the gravity_field value of these pixels (distance to the original shape).
927
+ ray_through_shape = (shape == ray) * self.gravity_field
928
+ ray_through_back = (back == ray) * self.gravity_field
929
+ if np.any(ray_through_shape):
930
+ if np.any(ray_through_back):
931
+ # If at least one back pixel is nearer to the original shape than a shape pixel,
932
+ # there is a hole to fill.
933
+ if np.any(ray_through_back > np.min(ray_through_shape[ray_through_shape > 0])):
934
+ # Check if the nearest pixels are shape, if so, supress them until the nearest pixel
935
+ # becomes back
936
+ while np.max(ray_through_back) <= np.max(ray_through_shape):
937
+ ray_through_shape[ray_through_shape == np.max(ray_through_shape)] = 0
938
+ # Now, all back pixels that are nearer than the closest shape pixel should get filled
939
+ # To do so, replace back pixels further than the nearest shape pixel by 0
940
+ ray_through_back[ray_through_back < np.max(ray_through_shape)] = 0
941
+ self.holes[np.nonzero(ray_through_back)] = 1
942
+ else:
943
+ self.rays = np.concatenate((self.rays[:(ray - 2)], self.rays[(ray - 1):]))
944
+ ray_through_shape = None
945
+ ray_through_back = None
946
+ if np.any(self.surfarea[:self.t] > self.substantial_growth * 2):
947
+
948
+ if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
949
+ # Apply the hole correction
950
+ self.holes = cv2.morphologyEx(self.holes, cv2.MORPH_CLOSE, cross_33, iterations=10)
951
+ # If some holes are not covered by now
952
+ if np.any(self.holes * (1 - self.binary[self.t, :, :])):
953
+ self.binary[:(self.t + 1), :, :], holes_time_end, distance_against_time = \
954
+ expand_to_fill_holes(self.binary[:(self.t + 1), :, :], self.holes)
955
+ if holes_time_end is not None:
956
+ self.binary[holes_time_end:(self.t + 1), :, :] += self.binary[holes_time_end, :, :]
957
+ self.binary[holes_time_end:(self.t + 1), :, :][
958
+ self.binary[holes_time_end:(self.t + 1), :, :] > 1] = 1
959
+ self.surfarea[:(self.t + 1)] = np.sum(self.binary[:(self.t + 1), :, :], (1, 2))
960
+
961
+ else:
962
+ distance_against_time = [1, 2]
963
+ else:
964
+ distance_against_time = [1, 2]
965
+ distance_against_time = np.diff(distance_against_time)
966
+ if len(distance_against_time) > 0:
967
+ self.mean_distance_per_frame = np.mean(- distance_against_time)
968
+ else:
969
+ self.mean_distance_per_frame = 1
970
+
971
+ if self.vars['prevent_fast_growth_near_periphery']:
972
+ # growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, :, :] * self.near_periphery, axis=0)
973
+ growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, self.near_periphery[0], self.near_periphery[1]], axis=0)
974
+ if (growth_near_periphery == 1).sum() > self.vars['max_periphery_growth']:
975
+ # self.binary[self.t, self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t - 1, self.near_periphery[0], self.near_periphery[1]]
976
+ periphery_to_remove = np.zeros(self.dims[1:], dtype=np.uint8)
977
+ periphery_to_remove[self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t, self.near_periphery[0], self.near_periphery[1]]
978
+ shapes, stats, centers = cc(periphery_to_remove)
979
+ periphery_to_remove = np.nonzero(np.isin(shapes, np.nonzero(stats[:, 4] > self.vars['max_periphery_growth'])[0][1:]))
980
+ self.binary[self.t, periphery_to_remove[0], periphery_to_remove[1]] = self.binary[self.t - 1, periphery_to_remove[0], periphery_to_remove[1]]
981
+ if not self.vars['several_blob_per_arena']:
982
+ shapes, stats, centers = cc(self.binary[self.t, ...])
983
+ shapes[shapes != 1] = 0
984
+ self.binary[self.t, ...] = shapes
985
+
986
+ # Display
987
+
988
+ if show_seg:
989
+ if self.visu is not None:
990
+ im_to_display = deepcopy(self.visu[self.t, ...])
991
+ contours = np.nonzero(cv2.morphologyEx(self.binary[self.t, :, :], cv2.MORPH_GRADIENT, cross_33))
992
+ if self.vars['lighter_background']:
993
+ im_to_display[contours[0], contours[1]] = 0
994
+ else:
995
+ im_to_display[contours[0], contours[1]] = 255
996
+ else:
997
+ im_to_display = self.binary[self.t, :, :] * 255
998
+ imtoshow = resize(im_to_display, (540, 540))
999
+ cv2.imshow("shape_motion", imtoshow)
1000
+ waitKey(1)
1001
+ self.t += 1
1002
+
1003
+ def save_coord_specimen_and_contour(self):
1004
+ if self.vars['save_coord_specimen']:
1005
+ np.save(f"coord_specimen{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1006
+ smallest_memory_array(np.nonzero(self.binary), "uint"))
1007
+ if self.vars['save_coord_contour']:
1008
+ contours = np.zeros(self.dims[:3], np.uint8)
1009
+ for frame in range(self.dims[0]):
1010
+ eroded_binary = cv2.erode(self.binary[frame, ...], cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1011
+ contours[frame, ...] = self.binary[frame, ...] - eroded_binary
1012
+ np.save(f"coord_contour{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1013
+ smallest_memory_array(np.nonzero(contours), "uint"))
1014
+
1015
+ def get_descriptors_from_binary(self, release_memory=True):
1016
+ ##
1017
+ if release_memory:
1018
+ self.substantial_image = None
1019
+ self.covering_intensity = None
1020
+ self.segmentation = None
1021
+ self.gravity_field = None
1022
+ self.sun = None
1023
+ self.rays = None
1024
+ self.holes = None
1025
+ collect()
1026
+ self.save_coord_specimen_and_contour()
1027
+ if self.vars['do_fading']:
1028
+ self.newly_explored_area = np.zeros(self.dims[0], dtype =np.uint64)
1029
+ self.already_explored_area = deepcopy(self.origin)
1030
+ for self.t in range(self.dims[0]):
1031
+ self.newly_explored_area[self.t] = ((self.binary[self.t, :, :] - self.already_explored_area) == 1).sum()
1032
+ self.already_explored_area = np.logical_or(self.already_explored_area, self.binary[self.t, :, :])
1033
+
1034
+ self.surfarea = self.binary.sum((1, 2))
1035
+ timings = self.vars['exif']
1036
+ if len(timings) < self.dims[0]:
1037
+ timings = np.arange(self.dims[0])
1038
+ if np.any(timings > 0):
1039
+ self.time_interval = np.mean(np.diff(timings))
1040
+ timings = timings[:self.dims[0]]
1041
+ available_descriptors_in_sd = list(from_shape_descriptors_class.keys())
1042
+ # ["area", "perimeter", "circularity", "rectangularity", "total_hole_area", "solidity",
1043
+ # "convexity", "eccentricity", "euler_number", "standard_deviation_y",
1044
+ # "standard_deviation_x", "skewness_y", "skewness_x", "kurtosis_y", "kurtosis_x",
1045
+ # "major_axis_len", "minor_axis_len", "axes_orientation"]
1046
+ all_descriptors = []
1047
+ to_compute_from_sd = []
1048
+ for name, do_compute in self.vars['descriptors'].items():
1049
+ if do_compute:# and
1050
+ all_descriptors.append(name)
1051
+ if np.isin(name, available_descriptors_in_sd):
1052
+ to_compute_from_sd.append(name)
1053
+ self.compute_solidity_separately: bool = self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not np.isin("solidity", to_compute_from_sd)
1054
+ if self.compute_solidity_separately:
1055
+ self.solidity = np.zeros(self.dims[0], dtype=np.float64)
1056
+ if not self.vars['several_blob_per_arena']:
1057
+ self.one_row_per_frame = pd.DataFrame(np.zeros((self.dims[0], 2 + len(all_descriptors))),
1058
+ columns=['arena', 'time'] + all_descriptors)
1059
+ self.one_row_per_frame['arena'] = [self.one_descriptor_per_arena['arena']] * self.dims[0]
1060
+ self.one_row_per_frame['time'] = timings
1061
+ # solidity must be added if detect growth transition is computed
1062
+ origin = self.binary[0, :, :]
1063
+ self.one_descriptor_per_arena["first_move"] = pd.NA
1064
+
1065
+ for t in np.arange(self.dims[0]):
1066
+ SD = ShapeDescriptors(self.binary[t, :, :], to_compute_from_sd)
1067
+
1068
+
1069
+ # NEW
1070
+ for descriptor in to_compute_from_sd:
1071
+ self.one_row_per_frame.loc[t, descriptor] = SD.descriptors[descriptor]
1072
+ # Old
1073
+ # self.one_row_per_frame.iloc[t, 2: 2 + len(descriptors)] = SD.descriptors.values()
1074
+
1075
+
1076
+ if self.compute_solidity_separately:
1077
+ solidity = ShapeDescriptors(self.binary[t, :, :], ["solidity"])
1078
+ self.solidity[t] = solidity.descriptors["solidity"]
1079
+ # self.solidity[t] = list(solidity.descriptors.values())[0]
1080
+ # I) Find a first pseudopod [aim: time]
1081
+ if pd.isna(self.one_descriptor_per_arena["first_move"]):
1082
+ if self.surfarea[t] >= (origin.sum() + self.vars['first_move_threshold']):
1083
+ self.one_descriptor_per_arena["first_move"] = t
1084
+
1085
+ # Apply the scale to the variables
1086
+ if self.vars['output_in_mm']:
1087
+ if np.isin('area', to_compute_from_sd):
1088
+ self.one_row_per_frame['area'] *= self.vars['average_pixel_size']
1089
+ if np.isin('total_hole_area', to_compute_from_sd):
1090
+ self.one_row_per_frame['total_hole_area'] *= self.vars['average_pixel_size']
1091
+ if np.isin('perimeter', to_compute_from_sd):
1092
+ self.one_row_per_frame['perimeter'] *= np.sqrt(self.vars['average_pixel_size'])
1093
+ if np.isin('major_axis_len', to_compute_from_sd):
1094
+ self.one_row_per_frame['major_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1095
+ if np.isin('minor_axis_len', to_compute_from_sd):
1096
+ self.one_row_per_frame['minor_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1097
+ else:
1098
+ # Objective: create a matrix with 4 columns (time, y, x, colony) containing the coordinates of all colonies
1099
+ # against time
1100
+ self.one_descriptor_per_arena["first_move"] = 1
1101
+ max_colonies = 0
1102
+ for t in np.arange(self.dims[0]):
1103
+ nb, shapes = cv2.connectedComponents(self.binary[t, :, :])
1104
+ max_colonies = np.max((max_colonies, nb))
1105
+
1106
+ time_descriptor_colony = np.zeros((self.dims[0], len(to_compute_from_sd) * max_colonies * self.dims[0]),
1107
+ dtype=np.float32) # Adjust max_colonies
1108
+ colony_number = 0
1109
+ colony_id_matrix = np.zeros(self.dims[1:], dtype =np.uint64)
1110
+ coord_colonies = []
1111
+ centroids = []
1112
+
1113
+ pat_tracker = PercentAndTimeTracker(self.dims[0], compute_with_elements_number=True)
1114
+ for t in np.arange(self.dims[0]): #21):#
1115
+ # t=0
1116
+ # t+=1
1117
+ # We rank colonies in increasing order to make sure that the larger colony issued from a colony division
1118
+ # keeps the previous colony name.
1119
+ shapes, stats, centers = cc(self.binary[t, :, :])
1120
+
1121
+ # Consider that shapes bellow 3 pixels are noise. The loop will stop at nb and not compute them
1122
+ nb = stats[stats[:, 4] >= 4].shape[0]
1123
+
1124
+ # nb = stats.shape[0]
1125
+ current_percentage, eta = pat_tracker.get_progress(t, element_number=nb)
1126
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}, Colony descriptors computation: {current_percentage}%{eta}")
1127
+
1128
+ updated_colony_names = np.zeros(1, dtype=np.uint32)
1129
+ for colony in (np.arange(nb - 1) + 1): # 120)):# #92
1130
+ # colony = 1
1131
+ # colony+=1
1132
+ # logging.info(f'Colony number {colony}')
1133
+ current_colony_img = (shapes == colony).astype(np.uint8)
1134
+
1135
+ # I/ Find out which names the current colony had at t-1
1136
+ colony_previous_names = np.unique(current_colony_img * colony_id_matrix)
1137
+ colony_previous_names = colony_previous_names[colony_previous_names != 0]
1138
+ # II/ Find out if the current colony name had already been analyzed at t
1139
+ # If there no match with the saved colony_id_matrix, assign colony ID
1140
+ if t == 0 or len(colony_previous_names) == 0:
1141
+ # logging.info("New colony")
1142
+ colony_number += 1
1143
+ colony_names = [colony_number]
1144
+ # If there is at least 1 match with the saved colony_id_matrix, we keep the colony_previous_name(s)
1145
+ else:
1146
+ colony_names = colony_previous_names.tolist()
1147
+ # Handle colony division if necessary
1148
+ if np.any(np.isin(updated_colony_names, colony_names)):
1149
+ colony_number += 1
1150
+ colony_names = [colony_number]
1151
+
1152
+ # Update colony ID matrix for the current frame
1153
+ coords = np.nonzero(current_colony_img)
1154
+ colony_id_matrix[coords[0], coords[1]] = colony_names[0]
1155
+
1156
+ # Add coordinates to coord_colonies
1157
+ time_column = np.full(coords[0].shape, t, dtype=np.uint32)
1158
+ colony_column = np.full(coords[0].shape, colony_names[0], dtype=np.uint32)
1159
+ coord_colonies.append(np.column_stack((time_column, colony_column, coords[0], coords[1])))
1160
+
1161
+ # Calculate centroid and add to centroids list
1162
+ centroid_x, centroid_y = centers[colony, :]
1163
+ centroids.append((t, colony_names[0], centroid_y, centroid_x))
1164
+
1165
+ # Compute shape descriptors
1166
+ SD = ShapeDescriptors(current_colony_img, to_compute_from_sd)
1167
+ descriptors = list(SD.descriptors.values())
1168
+ # Adjust descriptors if output_in_mm is specified
1169
+ if self.vars['output_in_mm']:
1170
+ if 'area' in to_compute_from_sd:
1171
+ descriptors['area'] *= self.vars['average_pixel_size']
1172
+ if 'total_hole_area' in to_compute_from_sd:
1173
+ descriptors['total_hole_area'] *= self.vars['average_pixel_size']
1174
+ if 'perimeter' in to_compute_from_sd:
1175
+ descriptors['perimeter'] *= np.sqrt(self.vars['average_pixel_size'])
1176
+ if 'major_axis_len' in to_compute_from_sd:
1177
+ descriptors['major_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1178
+ if 'minor_axis_len' in to_compute_from_sd:
1179
+ descriptors['minor_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1180
+
1181
+ # Store descriptors in time_descriptor_colony
1182
+ descriptor_index = (colony_names[0] - 1) * len(to_compute_from_sd)
1183
+ time_descriptor_colony[t, descriptor_index:(descriptor_index + len(descriptors))] = descriptors
1184
+
1185
+ updated_colony_names = np.append(updated_colony_names, colony_names)
1186
+
1187
+ # Reset colony_id_matrix for the next frame
1188
+ colony_id_matrix *= self.binary[t, :, :]
1189
+
1190
+ coord_colonies = np.vstack(coord_colonies)
1191
+ centroids = np.array(centroids, dtype=np.float32)
1192
+ time_descriptor_colony = time_descriptor_colony[:, :(colony_number*len(to_compute_from_sd))]
1193
+
1194
+ if self.vars['save_coord_specimen']:
1195
+ coord_colonies = pd.DataFrame(coord_colonies, columns=["time", "colony", "y", "x"])
1196
+ coord_colonies.to_csv(f"coord_colonies{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_col{colony_number}_y{self.dims[1]}_x{self.dims[2]}.csv", sep=';', index=False, lineterminator='\n')
1197
+
1198
+ centroids = pd.DataFrame(centroids, columns=["time", "colony", "y", "x"])
1199
+ centroids.to_csv(f"colony_centroids{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_col{colony_number}_y{self.dims[1]}_x{self.dims[2]}.csv", sep=';', index=False, lineterminator='\n')
1200
+
1201
+ # Format the final dataframe to have one row per time frame, and one column per descriptor_colony_name
1202
+ self.one_row_per_frame = pd.DataFrame({'arena': self.one_descriptor_per_arena['arena'], 'time': timings, 'area_total': self.surfarea.astype(np.float64)})
1203
+ if self.vars['output_in_mm']:
1204
+ self.one_row_per_frame['area_total'] *= self.vars['average_pixel_size']
1205
+ column_names = np.char.add(np.repeat(to_compute_from_sd, colony_number),
1206
+ np.tile((np.arange(colony_number) + 1).astype(str), len(to_compute_from_sd)))
1207
+ time_descriptor_colony = pd.DataFrame(time_descriptor_colony, columns=column_names)
1208
+ self.one_row_per_frame = pd.concat([self.one_row_per_frame, time_descriptor_colony], axis=1)
1209
+
1210
+
1211
+ if self.vars['do_fading']:
1212
+ self.one_row_per_frame['newly_explored_area'] = self.newly_explored_area
1213
+ if self.vars['output_in_mm']:
1214
+ self.one_row_per_frame['newly_explored_area'] *= self.vars['average_pixel_size']
1215
+
1216
+ def detect_growth_transitions(self):
1217
+ ##
1218
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena']:
1219
+ self.one_descriptor_per_arena["iso_digi_transi"] = pd.NA
1220
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]):
1221
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting growth transition analysis.")
1222
+
1223
+ # II) Once a pseudopod is deployed, look for a disk/ around the original shape
1224
+ growth_begining = self.surfarea < ((self.surfarea[0] * 1.2) + ((self.dims[1] / 4) * (self.dims[2] / 4)))
1225
+ dilated_origin = cv2.dilate(self.binary[self.one_descriptor_per_arena["first_move"], :, :], kernel=cross_33, iterations=10, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1226
+ isisotropic = np.sum(self.binary[:, :, :] * dilated_origin, (1, 2))
1227
+ isisotropic *= growth_begining
1228
+ # Ask if the dilated origin area is 90% covered during the growth beginning
1229
+ isisotropic = isisotropic > 0.9 * dilated_origin.sum()
1230
+ if np.any(isisotropic):
1231
+ self.one_descriptor_per_arena["is_growth_isotropic"] = 1
1232
+ # Determine a solidity reference to look for a potential breaking of the isotropic growth
1233
+ if self.compute_solidity_separately:
1234
+ solidity_reference = np.mean(self.solidity[:self.one_descriptor_per_arena["first_move"]])
1235
+ different_solidity = self.solidity < (0.9 * solidity_reference)
1236
+ del self.solidity
1237
+ else:
1238
+ solidity_reference = np.mean(
1239
+ self.one_row_per_frame.iloc[:(self.one_descriptor_per_arena["first_move"]), :]["solidity"])
1240
+ different_solidity = self.one_row_per_frame["solidity"].values < (0.9 * solidity_reference)
1241
+ # Make sure that isotropic breaking not occur before isotropic growth
1242
+ if np.any(different_solidity):
1243
+ self.one_descriptor_per_arena["iso_digi_transi"] = np.nonzero(different_solidity)[0][0] * self.time_interval
1244
+ else:
1245
+ self.one_descriptor_per_arena["is_growth_isotropic"] = 0
1246
+ else:
1247
+ self.one_descriptor_per_arena["is_growth_isotropic"] = pd.NA
1248
+
1249
+
1250
+ def check_converted_video_type(self):
1251
+ if self.converted_video.dtype != "uint8":
1252
+ self.converted_video -= np.min(self.converted_video)
1253
+ self.converted_video = np.round((255 * (self.converted_video / np.max(self.converted_video)))).astype(np.uint8)
1254
+
1255
+
1256
+ def networks_detection(self, show_seg=False):
1257
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and not self.vars['several_blob_per_arena'] and (self.vars['save_coord_network'] or self.vars['network_analysis']):
1258
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting network detection.")
1259
+ self.check_converted_video_type()
1260
+ self.network_dynamics = np.zeros_like(self.binary, dtype=bool)
1261
+ greyscale = self.visu[-1, ...].mean(axis=-1)
1262
+ NetDet = NetworkDetection(greyscale, possibly_filled_pixels=self.binary[-1, ...],
1263
+ lighter_background=self.vars['lighter_background'],
1264
+ origin_to_add=self.origin)
1265
+ NetDet.get_best_network_detection_method()
1266
+ for t in np.arange(self.one_descriptor_per_arena["first_move"], self.dims[0]): # 20):#
1267
+ greyscale = self.visu[t, ...].mean(axis=-1)
1268
+ NetDet_fast = NetworkDetection(greyscale, possibly_filled_pixels=self.binary[t, ...],
1269
+ lighter_background=self.vars['lighter_background'],
1270
+ origin_to_add=self.origin, best_result=NetDet.best_result)
1271
+ NetDet_fast.detect_network()
1272
+ self.network_dynamics[t, ...] = NetDet_fast.complete_network
1273
+
1274
+ imtoshow = self.visu[t, ...]
1275
+ eroded_binary = cv2.erode(self.network_dynamics[t, ...], cross_33)
1276
+ net_coord = np.nonzero(self.network_dynamics[t, ...] - eroded_binary)
1277
+ imtoshow[net_coord[0], net_coord[1], :] = (34, 34, 158)
1278
+ if show_seg:
1279
+ cv2.imshow("", resize(imtoshow, (1000, 1000)))
1280
+ cv2.waitKey(1)
1281
+ else:
1282
+ self.visu[t, ...] = imtoshow
1283
+ if show_seg:
1284
+ cv2.destroyAllWindows()
1285
+ self.network_dynamics = smallest_memory_array(np.nonzero(self.network_dynamics), "uint")
1286
+ if self.vars['save_coord_network']:
1287
+ np.save(
1288
+ f"coord_tubular_network{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1289
+ self.network_dynamics)
1290
+
1291
+ def graph_extraction(self):
1292
+ if self.vars['graph_extraction'] and not self.vars['network_analysis'] and not self.vars['save_coord_network']:
1293
+ self.network_dynamics = self.binary
1294
+ _, _, _, origin_centroid = cv2.connectedComponentsWithStats(self.origin)
1295
+ origin_centroid = np.round((origin_centroid[1, 1], origin_centroid[1, 0])).astype(np.uint64)
1296
+ for t in np.arange(self.one_descriptor_per_arena["first_move"], self.dims[0]): # 20):#
1297
+
1298
+
1299
+ if self.origin is not None:
1300
+ computed_network = self.network_dynamics[t, ...] * (1 - self.origin)
1301
+ origin_contours = get_contours(self.origin)
1302
+ computed_network = np.logical_or(origin_contours, computed_network).astype(np.uint8)
1303
+ else:
1304
+ origin_contours = None
1305
+ computed_network = self.network_dynamics[t, ...].astype(np.uint8)
1306
+ computed_network = keep_one_connected_component(computed_network)
1307
+ pad_network, pad_origin = add_padding([computed_network, self.origin])
1308
+ pad_origin_centroid = origin_centroid + 1
1309
+ pad_skeleton, pad_distances, pad_origin_contours = get_skeleton_and_widths(pad_network, pad_origin,
1310
+ pad_origin_centroid)
1311
+ edge_id = EdgeIdentification(pad_skeleton)
1312
+ edge_id.get_vertices_and_tips_coord()
1313
+ edge_id.get_tipped_edges()
1314
+ edge_id.remove_tipped_edge_smaller_than_branch_width(pad_distances)
1315
+ edge_id.label_tipped_edges_and_their_vertices()
1316
+ edge_id.identify_all_other_edges()
1317
+ edge_id.remove_edge_duplicates()
1318
+ edge_id.remove_vertices_connecting_2_edges()
1319
+ if pad_origin_contours is not None:
1320
+ origin_contours = remove_padding([pad_origin_contours])[0]
1321
+ edge_id.make_vertex_table(origin_contours)
1322
+ edge_id.make_edge_table(self.converted_video[:, t])
1323
+
1324
+
1325
+ edge_id.vertex_table = np.hstack((np.repeat(t, edge_id.vertex_table.shape[0])[:, None], edge_id.vertex_table))
1326
+ edge_id.edge_table = np.hstack((np.repeat(t, edge_id.edge_table.shape[0])[:, None], edge_id.edge_table))
1327
+ if t == self.one_descriptor_per_arena["first_move"]:
1328
+ vertex_table = edge_id.vertex_table.copy()
1329
+ edge_table = edge_id.edge_table.copy()
1330
+ else:
1331
+ vertex_table = np.vstack((vertex_table, edge_id.vertex_table))
1332
+ edge_table = np.vstack((edge_table, edge_id.edge_table))
1333
+
1334
+ vertex_table = pd.DataFrame(vertex_table, columns=["t", "y", "x", "vertex_id", "is_tip", "origin",
1335
+ "vertex_connected"])
1336
+ edge_table = pd.DataFrame(edge_table,
1337
+ columns=["t", "edge_id", "vertex1", "vertex2", "length", "average_width", "intensity", "betweenness_centrality"])
1338
+ vertex_table.to_csv(
1339
+ f"vertex_table{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.csv")
1340
+ edge_table.to_csv(
1341
+ f"edge_table{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.csv")
1342
+
1343
+
1344
+ def memory_allocation_for_cytoscillations(self):
1345
+ try:
1346
+ period_in_frame_nb = int(self.vars['expected_oscillation_period'] / self.time_interval)
1347
+ if period_in_frame_nb < 2:
1348
+ period_in_frame_nb = 2
1349
+ necessary_memory = self.converted_video.shape[0] * self.converted_video.shape[1] * \
1350
+ self.converted_video.shape[2] * 64 * 4 * 1.16415e-10
1351
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1352
+ if len(self.converted_video.shape) == 4:
1353
+ self.converted_video = self.converted_video[:, :, :, 0]
1354
+ average_intensities = np.mean(self.converted_video, (1, 2))
1355
+ if self.vars['lose_accuracy_to_save_memory'] or (necessary_memory > available_memory):
1356
+ oscillations_video = np.zeros(self.converted_video.shape, dtype=np.float16)
1357
+ for cy in np.arange(self.converted_video.shape[1]):
1358
+ for cx in np.arange(self.converted_video.shape[2]):
1359
+ oscillations_video[:, cy, cx] = np.round(np.gradient(self.converted_video[:, cy, cx, ...]/average_intensities,
1360
+ period_in_frame_nb), 3).astype(np.float16)
1361
+ else:
1362
+ oscillations_video = np.gradient(self.converted_video/average_intensities, period_in_frame_nb, axis=0)
1363
+ # check if conv change here
1364
+ self.check_converted_video_type()
1365
+ if len(self.converted_video.shape) == 3:
1366
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video), axis=3)
1367
+ oscillations_video = np.sign(oscillations_video)
1368
+ return oscillations_video
1369
+ except Exception as exc:
1370
+ logging.error(f"{exc}. Retrying to allocate for 10 minutes before crashing. ")
1371
+ return None
1372
+
1373
+
1374
+ def study_cytoscillations(self, show_seg):
1375
+ if pd.isna(self.one_descriptor_per_arena["first_move"]):
1376
+ if not self.vars['lose_accuracy_to_save_memory']:
1377
+ self.check_converted_video_type()
1378
+ if self.vars['oscilacyto_analysis']:
1379
+ self.one_row_per_frame['mean_cluster_area'] = pd.NA
1380
+ self.one_row_per_frame['cluster_number'] = pd.NA
1381
+ else:
1382
+ if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
1383
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting oscillation analysis.")
1384
+ oscillations_video = None
1385
+ staring_time = default_timer()
1386
+ current_time = staring_time
1387
+ while oscillations_video is None and (current_time - staring_time) < 600:
1388
+ oscillations_video = self.memory_allocation_for_cytoscillations()
1389
+ if oscillations_video is None:
1390
+ sleep(30)
1391
+ current_time = default_timer()
1392
+
1393
+ within_range = (1 - self.binary[0, :, :]) * self.borders
1394
+ within_range = self.binary * within_range
1395
+ oscillations_video *= within_range
1396
+ del within_range
1397
+ oscillations_video += 1
1398
+ oscillations_video = oscillations_video.astype(np.uint8)
1399
+
1400
+ dotted_image = np.ones(self.converted_video.shape[1:3], np.uint8)
1401
+ for cy in np.arange(dotted_image.shape[0]):
1402
+ if cy % 2 != 0:
1403
+ dotted_image[cy, :] = 0
1404
+ for cx in np.arange(dotted_image.shape[1]):
1405
+ if cx % 2 != 0:
1406
+ dotted_image[:, cx] = 0
1407
+
1408
+ if self.start is None:
1409
+ self.start = 0
1410
+
1411
+ for t in np.arange(self.dims[0]):
1412
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1413
+ contours = self.binary[t, :, :] - eroded_binary
1414
+ contours_idx = np.nonzero(contours)
1415
+ imtoshow = deepcopy(self.converted_video[t, ...])
1416
+ imtoshow[contours_idx[0], contours_idx[1], :] = self.vars['contour_color']
1417
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
1418
+ if self.one_descriptor_per_arena["is_growth_isotropic"] == 1:
1419
+ if t < self.one_descriptor_per_arena["iso_digi_transi"]:
1420
+ imtoshow[contours_idx[0], contours_idx[1], 2] = 255
1421
+ oscillations_image = np.zeros(self.dims[1:], np.uint8)
1422
+ if t >= self.start:
1423
+ # Add in or ef if a pixel has at least 4 neighbor in or ef
1424
+ neigh_comp = CompareNeighborsWithValue(oscillations_video[t, :, :], connectivity=8, data_type=np.int8)
1425
+ neigh_comp.is_inf(1, and_itself=False)
1426
+ neigh_comp.is_sup(1, and_itself=False)
1427
+ # Not verified if influx is really influx (resp efflux)
1428
+ influx = neigh_comp.sup_neighbor_nb
1429
+ efflux = neigh_comp.inf_neighbor_nb
1430
+
1431
+ # Only keep pixels having at least 4 positive (resp. negative) neighbors
1432
+ influx[influx <= 4] = 0
1433
+ efflux[efflux <= 4] = 0
1434
+ influx[influx > 4] = 1
1435
+ efflux[efflux > 4] = 1
1436
+ if np.any(influx) or np.any(efflux):
1437
+ influx, in_stats, in_centroids = cc(influx)
1438
+ efflux, ef_stats, ef_centroids = cc(efflux)
1439
+ # Only keep clusters larger than 'minimal_oscillating_cluster_size' pixels (smaller are considered as noise
1440
+ in_smalls = np.nonzero(in_stats[:, 4] < self.vars['minimal_oscillating_cluster_size'])[0]
1441
+ if len(in_smalls) > 0:
1442
+ influx[np.isin(influx, in_smalls)] = 0
1443
+ in_stats = in_stats[:in_smalls[0], :]
1444
+ in_centroids = in_centroids[:in_smalls[0], :]
1445
+ ef_smalls = np.nonzero(ef_stats[:, 4] < self.vars['minimal_oscillating_cluster_size'])[0]
1446
+ if len(ef_smalls) > 0:
1447
+ efflux[np.isin(efflux, ef_smalls)] = 0
1448
+ ef_stats = ef_stats[:(ef_smalls[0]), :]
1449
+ ef_centroids = ef_centroids[:(ef_smalls[0]), :]
1450
+ in_idx = np.nonzero(influx) # NEW
1451
+ ef_idx = np.nonzero(efflux) # NEW
1452
+ oscillations_image[in_idx[0], in_idx[1]] = 1 # NEW
1453
+ oscillations_image[ef_idx[0], ef_idx[1]] = 2 # NEW
1454
+ # Prepare the image for display
1455
+ influx *= dotted_image
1456
+ efflux *= dotted_image
1457
+ in_idx = np.nonzero(influx)
1458
+ ef_idx = np.nonzero(efflux)
1459
+ imtoshow[in_idx[0], in_idx[1], :2] = 153 # Green: influx, intensity increase
1460
+ imtoshow[in_idx[0], in_idx[1], 2] = 0
1461
+ imtoshow[ef_idx[0], ef_idx[1], 1:] = 0 # Blue: efflux, intensity decrease
1462
+ imtoshow[ef_idx[0], ef_idx[1], 0] = 204
1463
+ oscillations_video[t, :, :] = oscillations_image
1464
+ self.converted_video[t, ...] = deepcopy(imtoshow)
1465
+ if show_seg:
1466
+ im_to_show = resize(imtoshow, (540, 540))
1467
+ cv2.imshow("shape_motion", im_to_show)
1468
+ cv2.waitKey(1)
1469
+ if show_seg:
1470
+ cv2.destroyAllWindows()
1471
+ if self.vars['save_coord_thickening_slimming']:
1472
+ np.save(f"coord_thickening{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy", smallest_memory_array(np.nonzero(oscillations_video == 1), "uint"))
1473
+ np.save(f"coord_slimming{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy", smallest_memory_array(np.nonzero(oscillations_video == 2), "uint"))
1474
+
1475
+
1476
+ if self.vars['oscilacyto_analysis']:
1477
+ # To get the median oscillatory period of each oscillating cluster,
1478
+ # we create a dict containing two lists (for influx and efflux)
1479
+ # Each list element correspond to a cluster and stores :
1480
+ # All pixel coordinates of that cluster, their corresponding lifespan, their time of disappearing
1481
+ # Row number will give the size. Euclidean distance between pix coord, the wave distance
1482
+ self.clusters_final_data = np.empty((0, 6),
1483
+ dtype=np.float32) # ["mean_pixel_period", "phase", "total_size", "edge_distance", cy, cx]
1484
+ period_tracking = np.zeros(self.converted_video.shape[1:3], dtype=np.uint32)
1485
+ efflux_study = ClusterFluxStudy(self.converted_video.shape[:3])
1486
+ influx_study = ClusterFluxStudy(self.converted_video.shape[:3])
1487
+
1488
+ if self.start is None:
1489
+ self.start = 0
1490
+ if self.vars['fractal_analysis']:
1491
+ if os.path.exists(f"oscillating_clusters_temporal_dynamics.h5"):
1492
+ remove_h5_key(f"oscillating_clusters_temporal_dynamics.h5",
1493
+ f"arena{self.one_descriptor_per_arena['arena']}")
1494
+ cluster_id_matrix = np.zeros(self.dims[1:], dtype =np.uint64)
1495
+ named_cluster_number = 0
1496
+ mean_cluster_area = np.zeros(oscillations_video.shape[0])
1497
+ pat_tracker = PercentAndTimeTracker(self.dims[0], compute_with_elements_number=True)
1498
+ for t in np.arange(np.max((self.start, self.lost_frames)), self.dims[0]): # np.arange(21): #
1499
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1500
+ contours = self.binary[t, :, :] - eroded_binary
1501
+ oscillations_image = oscillations_video[t, ...]
1502
+ influx = (oscillations_image == 1).astype(np.uint8)
1503
+ efflux = (oscillations_image == 2).astype(np.uint8)
1504
+ in_idx = np.nonzero(influx) # NEW
1505
+ ef_idx = np.nonzero(efflux)
1506
+ influx, in_stats, in_centroids = cc(influx)
1507
+ efflux, ef_stats, ef_centroids = cc(efflux)
1508
+ in_stats = in_stats[1:]
1509
+ in_centroids = in_centroids[1:]
1510
+ ef_stats = ef_stats[1:]
1511
+ ef_centroids = ef_centroids[1:]
1512
+ # Sum the number of connected components minus the background to get the number of clusters
1513
+ oscillating_cluster_number = in_stats.shape[0] + ef_stats.shape[0]
1514
+ updated_cluster_names = [0]
1515
+ if oscillating_cluster_number > 0:
1516
+ current_percentage, eta = pat_tracker.get_progress(t, element_number=oscillating_cluster_number)
1517
+ logging.info(
1518
+ f"Arena n°{self.one_descriptor_per_arena['arena']}, Oscillatory cluster computation: {current_percentage}%{eta}")
1519
+ if self.vars['fractal_analysis']:
1520
+ # New analysis to get the surface dynamic of every oscillatory cluster: Part 2 openning:
1521
+ network_at_t = np.zeros(self.dims[1:], dtype=np.uint8)
1522
+ network_idx = self.network_dynamics[:, self.network_dynamics[0, :] == t]
1523
+ network_at_t[network_idx[1, :], network_idx[2, :]] = 1
1524
+ shapes = np.zeros(self.dims[1:], dtype=np.uint32)
1525
+ shapes[in_idx[0], in_idx[1]] = influx[in_idx[0], in_idx[1]]
1526
+ max_in = in_stats.shape[0]
1527
+ shapes[ef_idx[0], ef_idx[1]] = max_in + efflux[ef_idx[0], ef_idx[1]]
1528
+ centers = np.vstack((in_centroids, ef_centroids))
1529
+ cluster_dynamic = np.zeros((int(oscillating_cluster_number) - 1, 13), dtype=np.float64)
1530
+ for clust_i in np.arange(oscillating_cluster_number - 1, dtype=np.uint32): # 120)):# #92
1531
+ cluster = clust_i + 1
1532
+ # cluster = 1
1533
+ # print(cluster)
1534
+ current_cluster_img = (shapes == cluster).astype(np.uint8)
1535
+ # I/ Find out which names the current cluster had at t-1
1536
+ cluster_previous_names = np.unique(current_cluster_img * cluster_id_matrix)
1537
+ cluster_previous_names = cluster_previous_names[cluster_previous_names != 0]
1538
+ # II/ Find out if the current cluster name had already been analyzed at t
1539
+ # If there no match with the saved cluster_id_matrix, assign cluster ID
1540
+ if t == 0 or len(cluster_previous_names) == 0:
1541
+ # logging.info("New cluster")
1542
+ named_cluster_number += 1
1543
+ cluster_names = [named_cluster_number]
1544
+ # If there is at least 1 match with the saved cluster_id_matrix, we keep the cluster_previous_name(s)
1545
+ else:
1546
+ cluster_names = cluster_previous_names.tolist()
1547
+ # Handle cluster division if necessary
1548
+ if np.any(np.isin(updated_cluster_names, cluster_names)):
1549
+ named_cluster_number += 1
1550
+ cluster_names = [named_cluster_number]
1551
+
1552
+ # Get flow direction:
1553
+ if np.unique(oscillations_image * current_cluster_img)[1] == 1:
1554
+ flow = 1
1555
+ else:
1556
+ flow = - 1
1557
+ # Update cluster ID matrix for the current frame
1558
+ coords = np.nonzero(current_cluster_img)
1559
+ cluster_id_matrix[coords[0], coords[1]] = cluster_names[0]
1560
+
1561
+ # Save the current cluster areas:
1562
+ inner_network = current_cluster_img * network_at_t
1563
+ inner_network_area = inner_network.sum()
1564
+ zoomed_binary, side_lengths = prepare_box_counting(current_cluster_img,
1565
+ side_threshold=self.vars[
1566
+ 'fractal_box_side_threshold'],
1567
+ zoom_step=self.vars[
1568
+ 'fractal_zoom_step'],
1569
+ contours=True)
1570
+ box_count_dim, r_value, box_nb = box_counting(zoomed_binary, side_lengths)
1571
+
1572
+ if np.any(inner_network):
1573
+ zoomed_binary, side_lengths = prepare_box_counting(inner_network,
1574
+ side_threshold=self.vars[
1575
+ 'fractal_box_side_threshold'],
1576
+ zoom_step=self.vars[
1577
+ 'fractal_zoom_step'],
1578
+ contours=False)
1579
+ inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb = box_counting(
1580
+ zoomed_binary, side_lengths)
1581
+ else:
1582
+ inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb = 0, 0, 0
1583
+ # Calculate centroid and add to centroids list
1584
+ centroid_x, centroid_y = centers[cluster, :]
1585
+ if self.vars['output_in_mm']:
1586
+ cluster_dynamic[clust_i, :] = np.array(
1587
+ (t * self.time_interval, cluster_names[0], flow, centroid_y, centroid_x,
1588
+ current_cluster_img.sum() * self.vars['average_pixel_size'],
1589
+ inner_network_area * self.vars['average_pixel_size'], box_count_dim, r_value,
1590
+ box_nb, inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb),
1591
+ dtype=np.float64)
1592
+ else:
1593
+ cluster_dynamic[clust_i, :] = np.array((t, cluster_names[0], flow, centroid_y,
1594
+ centroid_x, current_cluster_img.sum(),
1595
+ inner_network_area, box_count_dim, r_value,
1596
+ box_nb, inner_network_box_count_dim,
1597
+ inner_net_r_value, inner_net_box_nb),
1598
+ dtype=np.float64)
1599
+
1600
+ updated_cluster_names = np.append(updated_cluster_names, cluster_names)
1601
+ vstack_h5_array(f"oscillating_clusters_temporal_dynamics.h5",
1602
+ cluster_dynamic, key=f"arena{self.one_descriptor_per_arena['arena']}")
1603
+
1604
+ # Reset cluster_id_matrix for the next frame
1605
+ cluster_id_matrix *= self.binary[t, :, :]
1606
+
1607
+ period_tracking, self.clusters_final_data = efflux_study.update_flux(t, contours, efflux,
1608
+ period_tracking,
1609
+ self.clusters_final_data)
1610
+ period_tracking, self.clusters_final_data = influx_study.update_flux(t, contours, influx,
1611
+ period_tracking,
1612
+ self.clusters_final_data)
1613
+
1614
+ mean_cluster_area[t] = np.mean(np.concatenate((in_stats[:, 4], ef_stats[:, 4])))
1615
+ if self.vars['output_in_mm']:
1616
+ self.clusters_final_data[:, 1] *= self.time_interval # phase
1617
+ self.clusters_final_data[:, 2] *= self.vars['average_pixel_size'] # size
1618
+ self.clusters_final_data[:, 3] *= np.sqrt(self.vars['average_pixel_size']) # distance
1619
+ self.one_row_per_frame['mean_cluster_area'] = mean_cluster_area * self.vars['average_pixel_size']
1620
+ self.one_row_per_frame['cluster_number'] = named_cluster_number
1621
+
1622
+ del oscillations_video
1623
+
1624
+
1625
+ def fractal_descriptions(self):
1626
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['fractal_analysis']:
1627
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting fractal analysis.")
1628
+
1629
+ if self.vars['network_analysis']:
1630
+ box_counting_dimensions = np.zeros((self.dims[0], 7), dtype=np.float64)
1631
+ else:
1632
+ box_counting_dimensions = np.zeros((self.dims[0], 3), dtype=np.float64)
1633
+
1634
+ for t in np.arange(self.dims[0]):
1635
+ if self.vars['network_analysis']:
1636
+ box_counting_dimensions[t, 0] = self.network_dynamics[t, ...].sum()
1637
+ zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...], side_threshold=self.vars[
1638
+ 'fractal_box_side_threshold'], zoom_step=self.vars['fractal_zoom_step'], contours=True)
1639
+ box_counting_dimensions[t, 1], box_counting_dimensions[t, 2], box_counting_dimensions[
1640
+ t, 3] = box_counting(zoomed_binary, side_lengths)
1641
+ zoomed_binary, side_lengths = prepare_box_counting(self.network_dynamics[t, ...],
1642
+ side_threshold=self.vars[
1643
+ 'fractal_box_side_threshold'],
1644
+ zoom_step=self.vars['fractal_zoom_step'],
1645
+ contours=False)
1646
+ box_counting_dimensions[t, 4], box_counting_dimensions[t, 5], box_counting_dimensions[
1647
+ t, 6] = box_counting(zoomed_binary, side_lengths)
1648
+ else:
1649
+ zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...],
1650
+ side_threshold=self.vars['fractal_box_side_threshold'],
1651
+ zoom_step=self.vars['fractal_zoom_step'], contours=True)
1652
+ box_counting_dimensions[t, :] = box_counting(zoomed_binary, side_lengths)
1653
+
1654
+ if self.vars['network_analysis']:
1655
+ self.one_row_per_frame["inner_network_size"] = box_counting_dimensions[:, 0]
1656
+ self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 1]
1657
+ self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
1658
+ self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 3]
1659
+ self.one_row_per_frame["inner_network_fractal_dimension"] = box_counting_dimensions[:, 4]
1660
+ self.one_row_per_frame["inner_network_fractal_r_value"] = box_counting_dimensions[:, 5]
1661
+ self.one_row_per_frame["inner_network_fractal_box_nb"] = box_counting_dimensions[:, 6]
1662
+ if self.vars['output_in_mm']:
1663
+ self.one_row_per_frame["inner_network_size"] *= self.vars['average_pixel_size']
1664
+ else:
1665
+ self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 0]
1666
+ self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 1]
1667
+ self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
1668
+
1669
+ if self.vars['network_analysis'] or self.vars['save_coord_network']:
1670
+ del self.network_dynamics
1671
+
1672
+ def get_descriptors_summary(self):
1673
+ potential_descriptors = ["area", "perimeter", "circularity", "rectangularity", "total_hole_area", "solidity",
1674
+ "convexity", "eccentricity", "euler_number", "standard_deviation_y",
1675
+ "standard_deviation_x", "skewness_y", "skewness_x", "kurtosis_y", "kurtosis_x",
1676
+ "major_axis_len", "minor_axis_len", "axes_orientation"]
1677
+
1678
+ self.one_descriptor_per_arena["final_area"] = self.binary[-1, :, :].sum()
1679
+
1680
+ def save_efficiency_tests(self):
1681
+ # Provide images allowing to assess the analysis efficiency
1682
+ if self.dims[0] > 1:
1683
+ after_one_tenth_of_time = np.ceil(self.dims[0] / 10).astype(np.uint64)
1684
+ else:
1685
+ after_one_tenth_of_time = 0
1686
+
1687
+ last_good_detection = self.dims[0] - 1
1688
+ if self.dims[0] > self.lost_frames:
1689
+ if self.vars['do_threshold_segmentation']:
1690
+ last_good_detection -= self.lost_frames
1691
+ else:
1692
+ last_good_detection = 0
1693
+ if self.visu is None:
1694
+ if len(self.converted_video.shape) == 3:
1695
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
1696
+ axis=3)
1697
+ self.efficiency_test_1 = deepcopy(self.converted_video[after_one_tenth_of_time, ...])
1698
+ self.efficiency_test_2 = deepcopy(self.converted_video[last_good_detection, ...])
1699
+ else:
1700
+ self.efficiency_test_1 = deepcopy(self.visu[after_one_tenth_of_time, :, :, :])
1701
+ self.efficiency_test_2 = deepcopy(self.visu[last_good_detection, :, :, :])
1702
+
1703
+ position = (25, self.dims[1] // 2)
1704
+ text = str(self.one_descriptor_per_arena['arena'])
1705
+ eroded_binary = cv2.erode(self.binary[after_one_tenth_of_time, :, :], cross_33)
1706
+ contours = np.nonzero(self.binary[after_one_tenth_of_time, :, :] - eroded_binary)
1707
+ self.efficiency_test_1[contours[0], contours[1], :] = self.vars['contour_color']
1708
+ self.efficiency_test_1 = cv2.putText(self.efficiency_test_1, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
1709
+ (self.vars["contour_color"], self.vars["contour_color"],
1710
+ self.vars["contour_color"], 255), 3)
1711
+
1712
+ eroded_binary = cv2.erode(self.binary[last_good_detection, :, :], cross_33)
1713
+ contours = np.nonzero(self.binary[last_good_detection, :, :] - eroded_binary)
1714
+ self.efficiency_test_2[contours[0], contours[1], :] = self.vars['contour_color']
1715
+ self.efficiency_test_2 = cv2.putText(self.efficiency_test_2, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
1716
+ (self.vars["contour_color"], self.vars["contour_color"],
1717
+ self.vars["contour_color"], 255), 3)
1718
+
1719
+ def save_video(self):
1720
+
1721
+ if self.vars['save_processed_videos']:
1722
+ self.check_converted_video_type()
1723
+ if len(self.converted_video.shape) == 3:
1724
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
1725
+ axis=3)
1726
+ for t in np.arange(self.dims[0]):
1727
+
1728
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1729
+ contours = np.nonzero(self.binary[t, :, :] - eroded_binary)
1730
+ self.converted_video[t, contours[0], contours[1], :] = self.vars['contour_color']
1731
+ if "iso_digi_transi" in self.one_descriptor_per_arena.keys():
1732
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
1733
+ if self.one_descriptor_per_arena["is_growth_isotropic"] == 1:
1734
+ if t < self.one_descriptor_per_arena["iso_digi_transi"]:
1735
+ self.converted_video[t, contours[0], contours[1], :] = 0, 0, 255
1736
+ del self.binary
1737
+ del self.surfarea
1738
+ del self.borders
1739
+ del self.origin
1740
+ del self.origin_idx
1741
+ del self.mean_intensity_per_frame
1742
+ del self.erodila_disk
1743
+ collect()
1744
+ if self.visu is None:
1745
+ true_frame_width = self.dims[2]
1746
+ if len(self.vars['background_list']) == 0:
1747
+ self.background = None
1748
+ else:
1749
+ self.background = self.vars['background_list'][self.one_descriptor_per_arena['arena'] - 1]
1750
+ self.visu = video2numpy(f"ind_{self.one_descriptor_per_arena['arena']}.npy", None, self.background, true_frame_width)
1751
+ if len(self.visu.shape) == 3:
1752
+ self.visu = np.stack((self.visu, self.visu, self.visu), axis=3)
1753
+ self.converted_video = np.concatenate((self.visu, self.converted_video), axis=2)
1754
+ # self.visu = None
1755
+
1756
+ if np.any(self.one_row_per_frame['time'] > 0):
1757
+ position = (5, self.dims[1] - 5)
1758
+ for t in np.arange(self.dims[0]):
1759
+ image = self.converted_video[t, ...]
1760
+ text = str(self.one_row_per_frame['time'][t]) + " min"
1761
+ image = cv2.putText(image, # numpy array on which text is written
1762
+ text, # text
1763
+ position, # position at which writing has to start
1764
+ cv2.FONT_HERSHEY_SIMPLEX, # font family
1765
+ 1, # font size
1766
+ (self.vars["contour_color"], self.vars["contour_color"], self.vars["contour_color"], 255), #(209, 80, 0, 255),
1767
+ 2) # font stroke
1768
+ self.converted_video[t, ...] = image
1769
+ vid_name = f"ind_{self.one_descriptor_per_arena['arena']}{self.vars['videos_extension']}"
1770
+ write_video(self.converted_video, vid_name, is_color=True, fps=self.vars['video_fps'])
1771
+ # self.converted_video = None
1772
+
1773
+ def save_results(self):
1774
+ self.save_efficiency_tests()
1775
+ self.save_video()
1776
+ if self.vars['several_blob_per_arena']:
1777
+ try:
1778
+ with open(f"one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv", 'w') as file:
1779
+ self.one_row_per_frame.to_csv(file, sep=';', index=False, lineterminator='\n')
1780
+ except PermissionError:
1781
+ logging.error(f"Never let one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv open when Cellects runs")
1782
+
1783
+ create_new_csv: bool = False
1784
+ if os.path.isfile("one_row_per_arena.csv"):
1785
+ try:
1786
+ with open(f"one_row_per_arena.csv", 'r') as file:
1787
+ stats = pd.read_csv(file, header=0, sep=";")
1788
+ except PermissionError:
1789
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1790
+
1791
+ if len(self.one_descriptor_per_arena) == len(stats.columns) - 1:
1792
+ try:
1793
+ with open(f"one_row_per_arena.csv", 'w') as file:
1794
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), 1:] = self.one_descriptor_per_arena.values()
1795
+ # if len(self.vars['analyzed_individuals']) == 1:
1796
+ # stats = pd.DataFrame(self.one_descriptor_per_arena, index=[0])
1797
+ # else:
1798
+ # stats = pd.DataFrame.from_dict(self.one_descriptor_per_arena)
1799
+ # stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
1800
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1801
+ except PermissionError:
1802
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1803
+ else:
1804
+ create_new_csv = True
1805
+ else:
1806
+ create_new_csv = True
1807
+ if create_new_csv:
1808
+ with open(f"one_row_per_arena.csv", 'w') as file:
1809
+ stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
1810
+ columns=list(self.one_descriptor_per_arena.keys()))
1811
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
1812
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1813
+ if not self.vars['keep_unaltered_videos'] and os.path.isfile(f"ind_{self.one_descriptor_per_arena['arena']}.npy"):
1814
+ os.remove(f"ind_{self.one_descriptor_per_arena['arena']}.npy")
1815
+
1816
+ def change_results_of_one_arena(self):
1817
+ self.save_video()
1818
+ # I/ Update/Create one_row_per_arena.csv
1819
+ create_new_csv: bool = False
1820
+ if os.path.isfile("one_row_per_arena.csv"):
1821
+ try:
1822
+ with open(f"one_row_per_arena.csv", 'r') as file:
1823
+ stats = pd.read_csv(file, header=0, sep=";")
1824
+ for stat_name, stat_value in self.one_descriptor_per_arena.items():
1825
+ if stat_name in stats.columns:
1826
+ stats.loc[(self.one_descriptor_per_arena['arena'] - 1), stat_name] = np.uint32(self.one_descriptor_per_arena[stat_name])
1827
+ with open(f"one_row_per_arena.csv", 'w') as file:
1828
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1829
+ except PermissionError:
1830
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1831
+ except Exception as e:
1832
+ logging.error(f"{e}")
1833
+ create_new_csv = True
1834
+ # if len(self.one_descriptor_per_arena) == len(stats.columns):
1835
+ # try:
1836
+ # with open(f"one_row_per_arena.csv", 'w') as file:
1837
+ # stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = self.one_descriptor_per_arena.values()
1838
+ # # stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
1839
+ # stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1840
+ # except PermissionError:
1841
+ # logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1842
+ # else:
1843
+ # create_new_csv = True
1844
+ else:
1845
+ create_new_csv = True
1846
+ if create_new_csv:
1847
+ logging.info("Create a new one_row_per_arena.csv file")
1848
+ try:
1849
+ with open(f"one_row_per_arena.csv", 'w') as file:
1850
+ stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
1851
+ columns=list(self.one_descriptor_per_arena.keys()))
1852
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
1853
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1854
+ except PermissionError:
1855
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1856
+
1857
+ # II/ Update/Create one_row_per_frame.csv
1858
+ create_new_csv = False
1859
+ if os.path.isfile("one_row_per_frame.csv"):
1860
+ try:
1861
+ with open(f"one_row_per_frame.csv", 'r') as file:
1862
+ descriptors = pd.read_csv(file, header=0, sep=";")
1863
+ for stat_name, stat_value in self.one_row_per_frame.items():
1864
+ if stat_name in descriptors.columns:
1865
+ descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0] - 1), stat_name] = self.one_row_per_frame.loc[:, stat_name].values[:]
1866
+ with open(f"one_row_per_frame.csv", 'w') as file:
1867
+ descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1868
+ # with open(f"one_row_per_frame.csv", 'w') as file:
1869
+ # for descriptor in descriptors.keys():
1870
+ # descriptors.loc[
1871
+ # ((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]),
1872
+ # descriptor] = self.one_row_per_frame[descriptor]
1873
+ # descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1874
+
1875
+
1876
+
1877
+ # if len(self.one_row_per_frame.columns) == len(descriptors.columns):
1878
+ # with open(f"one_row_per_frame.csv", 'w') as file:
1879
+ # # NEW
1880
+ # for descriptor in descriptors.keys():
1881
+ # descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), descriptor] = self.one_row_per_frame[descriptor]
1882
+ # # Old
1883
+ # # descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
1884
+ # descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1885
+ # else:
1886
+ # create_new_csv = True
1887
+ except PermissionError:
1888
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1889
+ except Exception as e:
1890
+ logging.error(f"{e}")
1891
+ create_new_csv = True
1892
+ else:
1893
+ create_new_csv = True
1894
+ if create_new_csv:
1895
+ logging.info("Create a new one_row_per_frame.csv file")
1896
+ try:
1897
+ with open(f"one_row_per_frame.csv", 'w') as file:
1898
+ descriptors = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']) * self.dims[0], len(self.one_row_per_frame.columns))),
1899
+ columns=list(self.one_row_per_frame.keys()))
1900
+ descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
1901
+ descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1902
+ except PermissionError:
1903
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1904
+
1905
+ # III/ Update/Create one_row_per_oscillating_cluster.csv
1906
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['oscilacyto_analysis']:
1907
+ oscil_i = pd.DataFrame(
1908
+ np.c_[np.repeat(self.one_descriptor_per_arena['arena'], self.clusters_final_data.shape[0]), self.clusters_final_data],
1909
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1910
+ if os.path.isfile("one_row_per_oscillating_cluster.csv"):
1911
+ try:
1912
+ with open(f"one_row_per_oscillating_cluster.csv", 'r') as file:
1913
+ one_row_per_oscillating_cluster = pd.read_csv(file, header=0, sep=";")
1914
+ with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
1915
+ one_row_per_oscillating_cluster_before = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] < self.one_descriptor_per_arena['arena']]
1916
+ one_row_per_oscillating_cluster_after = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] > self.one_descriptor_per_arena['arena']]
1917
+ one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster_before, oscil_i, one_row_per_oscillating_cluster_after))
1918
+ one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
1919
+
1920
+ # one_row_per_oscillating_cluster = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] != self.one_descriptor_per_arena['arena']]
1921
+ # one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster, oscil_i))
1922
+ # one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
1923
+ except PermissionError:
1924
+ logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1925
+ else:
1926
+ try:
1927
+ with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
1928
+ oscil_i.to_csv(file, sep=';', index=False, lineterminator='\n')
1929
+ except PermissionError:
1930
+ logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1931
+