cellects 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +155 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +31 -0
  7. cellects/core/cellects_threads.py +1451 -0
  8. cellects/core/motion_analysis.py +2010 -0
  9. cellects/core/one_image_analysis.py +1061 -0
  10. cellects/core/one_video_per_blob.py +540 -0
  11. cellects/core/program_organizer.py +1316 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +790 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +2066 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/image_segmentation.py +706 -0
  29. cellects/image_analysis/morphological_operations.py +1635 -0
  30. cellects/image_analysis/network_functions.py +1757 -0
  31. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  32. cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
  33. cellects/image_analysis/shape_descriptors.py +1016 -0
  34. cellects/utils/__init__.py +0 -0
  35. cellects/utils/decorators.py +14 -0
  36. cellects/utils/formulas.py +637 -0
  37. cellects/utils/load_display_save.py +1054 -0
  38. cellects/utils/utilitarian.py +490 -0
  39. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  40. cellects-0.1.2.dist-info/METADATA +132 -0
  41. cellects-0.1.2.dist-info/RECORD +44 -0
  42. cellects-0.1.2.dist-info/WHEEL +5 -0
  43. cellects-0.1.2.dist-info/entry_points.txt +2 -0
  44. cellects-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2010 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ This script contains the MotionAnalysis class. This class, called by program_organizer,
4
+ calls all methods used to read, process videos and save results.
5
+ 1. load_images_and_videos: It starts by loading a video in .npy (which must have been written before, thanks to the one_video_per_blob file)
6
+ and if it exists, the background used for background subtraction. Then, it uses a particular color space combination
7
+ to convert the rgb video into greyscale.
8
+ At this point, arenas have been delimited and each can be analyzed separately. The following describes what happens during the analysis of one arena. Also, while Cellects can work with either one or several cells in each arena, we will describe the algorithm for a single cell, making clarifications whenever anything changes for multiple cells.
9
+ Cellects starts by reading and converting the video of each arena into grayscale, using the selected color space combination. Then, it processes it through the following steps.
10
+ 3. It validates the presence/absence of the specimen(s) in the first image of the video, named origin.
11
+ Cellects finds the frame in which the cell is visible for the first time in each arena. When the seed image is the first image, then all cells are visible from the beginning. Otherwise, it will apply the same segmentation as for the seed image to the first, second, third images, etc. until the cell appears in one of them.
12
+ 4. It browses the first frames of the video to find the average covering duration of a pixel.
13
+ It does so using a very conservative method, to make sure that only pixels that really are covered by the specimen(s)
14
+ are used to do compute that covering duration.
15
+ 5. It performs the main segmentation algorithm on the whole video.
16
+ This segmentation will consist in transforming the grayscale video resulting from the color space combination conversion
17
+ into a binary video of presence/absence. To do this, Cellects provides several different options to detect specimen
18
+ motion and growth throughout the video. The video segmentation transforms a grayscale video into a binary one.
19
+ In simple datasets with strong contrast between specimens and background, Cellects can simply segment each image by
20
+ thresholding. In more challenging conditions, the algorithm tracks the intensity of each pixel over time,
21
+ using this dynamical information to determine when a pixel has been covered. This is done through an automatically
22
+ determined threshold on the intensity or on its derivative. Additionally, Cellects can apply the logical operators
23
+ AND or OR to these intensity and derivative thresholds. The default option is the dynamical intensity threshold and it
24
+ works in many cases, but the user interface lets the user quickly check the results of different options and choose
25
+ the best one by visual inspection of the segmentation result in different frames.
26
+ For Cellects to be as versatile as possible, the user can select across five segmentation strategies.
27
+ The first option is the simplest: It starts at the frame in which the cell is visible for the first time and segments the video frame by frame, using the same method as when analyzing only one image (as described in sections 1 and 2). The only difference is an optional background subtraction algorithm, which subtracts the first image to all others.
28
+ The second option segments each frame by intensity thresholding. The threshold changes over time to adapt to changes in the background over time. To estimate the optimal threshold for each frame, Cellects proceeds as follows: It first estimates the typical background intensity of each frame as an intensity higher than the first decile of all pixels in the frame. Then, it defines an initial threshold for each frame at a fixed distance above this decile. This fixed distance is initially low, so that the initial segmentation is an overestimation of the actual area covered by the specimen. Then, it performs segmentation of all frames. If any frame presents a growth greater than a user_set threshold (whose default value is 5% of the area), all thresholds are diminished by 10%. Then the segmentation is performed again, and this process continues until no frame presents excessive growth. This description refers to cases in which the background is darker than the specimen. Cellects automatically detects if contrast is reversed, and adapts the method accordingly. Finally, Cellects segments the whole video with these adjusted intensity thresholds.
29
+ The third option uses the change in intensity over time: For each pixel, it considers the evolution in time of its intensity, and considers that the cell covers the pixel when the slope of this intensity over time exceeds a threshold (Fig 3d in the main text). For each frame, Cellects computes each frame’s threshold with the similar procedure as in the second option, except for the following. As the value of the slope of a derivative is highly sensitive to noise, Cellects first smooths the intensity curves using a moving average with a window length adapted to the typical time it takes for the cell to cover each pixel. Cellects tries to compute this typical time using the dynamics of a subset of pixels whose intensity varies strongly at the beginning of the growth (see the code for further details), and uses a default value of 10 frames when this computation fails. Cellects also uses this subset of pixels to get the reference slope threshold. Finally, it progressively modifies this reference until the video segmentation matches the required growth ratio, as in the second step.
30
+ The two next options are combinations of the two first ones.
31
+ The fourth is a logical OR between the intensity value and the intensity slope segmentations. It provides a very permissive segmentation, which is useful when parts of the cells are very hard to detect.
32
+ The fifth is the logical AND between the intensity value and the intensity slope segmentations. It provides a more restrictive segmentation that can be useful when both the value and the slope segmentations detect areas that are not covered by the cell.
33
+ 6. Video post-processing improves the resulting binary video obtained through segmentation.
34
+ The final step consists in improving the segmentation (see section S3.5 of the Supplementary Materials for more information). Cellects will first apply several filters that consistently improve the results, such as checking that each detected pixel was also detected at least twice in the three previous frames, omitting images containing too many detected pixels, and performing morphological opening and closing. Optionally, the user can activate the detection of areas left by the cell (See section S3.5.B of the Supplementary Materials for details).
35
+
36
+ Additionally, optional algorithms correct particular types of errors. The first algorithm is useful when the substrate on which the cells are at the first image is of a different color than the substrate on which they will grow, expand or move. This color difference may produce holes in the segmentation and we developed an optional algorithm to correct this kind of error around the initial shape. The second algorithm should be used when each arena contains a single specimen, which should generate a single connected component. We can use this information to correct mistakes in models such as P. polycephalum, whose strong heterogeneity produces large variations of opacity. In these cases, segmentation may fail in the most transparent parts of the specimens and identify two disconnected components. The correction algorithm merges these disconnecting components by finding the most likely pixels connecting them and the most likely times at which those pixels were covered during growth.
37
+ 6.A Basic post-processing
38
+ This process improves the raw segmentation. It includes algorithms to filter out aberrant frames, remove small artifacts and holes, and to detect when the specimens leave pixels. First, it checks that every pixel was detected at least twice in the three previous frames. Second, it excludes frames containing too many newly detected pixels, according to the maximal growth ratio per frame (as defined in section 3B). For these frames, the previous segmentation is kept, making the analysis robust to events producing a sharp variation in the brightness of a few images in the video (for example, when an enclosed device is temporarily opened or a light is switched on or off). Third, it removes potential small artifacts and holes by performing morphological opening followed by morphological closing.
39
+
40
+ 6.B Cell leaving detection
41
+ This optional algorithm detects when areas are left by the specimens. It is useful when the cells not only grow but also move, so they can leave pixels that were covered before. When a pixel is covered, Cellects saves the intensity it had before being covered, computed as the median of the pixel’s intensity over a time window before it was covered. The length of this time window matches the typical time it takes for the cell to cover each pixel (computed as described in section 4.B, third segmentation strategy). Then, pixels at the border of the cell whose intensity fall below the saved intensity, rescaled by a user-defined multiplier (set by default at 1) are considered to be left by the cell. When there should be only one cell in the arena, Cellects tries to remove each component one by one, accepting this removal only when it does not break the connectivity of all parts of the cell.
42
+
43
+ 6.C Special error correction algorithms
44
+ At the time of writing, Cellects contains two post-processing algorithms adapted to two specific situations. The first one is useful when there should be only one specimen per arena and when Cellects fails to detect its distant parts because their connections are not sufficiently visible. The second one is useful when Cellects fails to detect small areas around the initial shape, for example due to reflections near the edges. The following explains how these optional algorithms work.
45
+
46
+ 6.D Connect distant components:
47
+ This algorithm automatically and progressively adds distant shapes to the main one. This correcting process occurs in three steps. First, it selects which distant component should get connected to the main one. The user can adjust this selection process according to the distance of the distant components with the main shape, and the minimal and maximal size of these components. Second, for each distant component, it computes and creates the shortest connection with the main shape. The width of that connection depends on the size of the distant shape where the connection occurs. Third, it uses an algorithm similar to the one used to correct errors around initial shape to estimate how quickly the gaps should be filled. This algorithm uses distance and timing vectors to create a dynamic connection between these two shapes (Figure 3f-h in the main text).
48
+
49
+ 6.E Correct errors around initial shape:
50
+ This correcting process occurs in two steps. The first one scans the formation of holes around the initial segmentation during the beginning of the growth. The second one finds out when and how these holes are to be filled. To determine how the holes should be covered, Cellects uses the same algorithm as the one used to connect distant components. Computing the speed at which growth occurs from the initial position allows Cellects to fill the holes at the same speed, and therefore to correct these errors.
51
+
52
+ 7. Special algorithms for Physarum polycephalum
53
+ Although done for this organism, these methods can be used with other biological models, such as mycelia.
54
+ 7.A. Oscillatory activity detection:
55
+ This algorithm analyzes grayscale video frames to detect whether pixel intensities increase or decrease over time. To prevent artifacts from arena-scale illumination fluctuations, pixel intensities are first standardized by the average intensity of the entire image. A pixel is considered to have increased (or decreased) in intensity if at least four of its eight neighboring pixels have also shown an increase (or decrease). Then, regions with adjacent pixels whose intensity is changing in the same direction are detected, keeping only those larger than a user-selected threshold. Each region is tracked throughout the video, recording its oscillatory period, phase, and coordinates until it dissipates or reaches the video’s end.
56
+
57
+ 7.B. Network detection:
58
+ P. polycephalum cells are composed of two types of compartments: A tubular network that transports cytoplasmic materials, and a thinner compartment that covers the rest of the space. Cellects’ initial segmentation does not distinguish between these two compartments, detecting all pixels that have been covered by any of them. This step distinguishes them, in order to segment the tubular network, whose intensity is further from that of the background.
59
+ Cellects detects such a network using an algorithm that scores the segmentation results after using filters of
60
+ vesselness detection: sato and frangi. On top of testing these filters with around 10 variations of their parameters,
61
+ Cellects tries to segment the images adaptatively: segmenting each part of the image using a 2D rolling window.
62
+ Once the best segmentation strategy is found for the last image of the video, it is used to segment the network in all
63
+ other frames
64
+
65
+ 8. Graph extraction:
66
+ Cellects can extract the graph of the specimen, or if detected, of its internal network.
67
+ To do so, Cellects does the following:
68
+ - Get the skeleton of the binary matrix of presence/absence of the specimen, as well as the specimen/network
69
+ width at avery pixel of the skeleton.
70
+ If the original position from which the specimen started has not the same color as the rest of the arena, apply
71
+ a special algorithm to draw the skeleton at the border of that origin.
72
+ - Smooth the skeleton using an algorithm removing small loops of 3 pixels widths
73
+ - Keep only the largest connected component of the skeleton
74
+ - Use pixel connectivity and their neighborhood connectivity to detect all tips and branching vertices of the graph
75
+ summarizing the skeleton.
76
+ - Find and label all edges connecting tips and remove those that are shorter than the width of the skeleton arm it is connected to
77
+ - Find and label all edges connecting touching vertices
78
+ - Find and label all edges connected to the two previoussly mentioned vertices
79
+ - Find and label all edges forming loops and connected to only one vertex
80
+ - Remove all shapes of 1 or two pixels that are neither detected as vertices nor edges,
81
+ if and only if they do not break the skeleton into more than one connected component.
82
+ - Remove edge duplicates
83
+ - Remove vertices connectiong 2 edges
84
+ - Finally, create and save the tables storing edge and vertex coordinates and properties
85
+
86
+ 9. Save
87
+ Once the image analysis is finished, the software determines the value of each morphological descriptor at each time frame (SI - Table 1). Finally, Cellects saves a new video for each arena with the original video next to the converted video displaying the segmentation result, so that the user can easily validate the result. If an arena shows a poor segmentation result, the user can re-analyze it, tuning all parameters for that specific arena.
88
+ - the final results of the segmentation and its contour (if applicable)
89
+ - descriptors summarizing the whole video
90
+ - validation images (efficiency tests) and videos
91
+
92
+ 10. If this class has been used in the video_analysis_window only on one arena, the method
93
+ change_results_of_one_arena will open (or create if not existing) tables in the focal folder
94
+ and adjust every row corresponding to that particular arena to the current analysis results.
95
+
96
+ """
97
+
98
+ import weakref
99
+ from gc import collect
100
+ from time import sleep
101
+ from numba.typed import Dict as TDict
102
+ from psutil import virtual_memory
103
+ import pandas as pd
104
+ from cellects.core.one_image_analysis import OneImageAnalysis
105
+ from cellects.image_analysis.cell_leaving_detection import cell_leaving_detection
106
+ from cellects.image_analysis.cluster_flux_study import ClusterFluxStudy
107
+ from cellects.image_analysis.image_segmentation import segment_with_lum_value, apply_filter
108
+ from cellects.image_analysis.morphological_operations import (find_major_incline, image_borders, draw_me_a_sun,
109
+ inverted_distance_transform, dynamically_expand_to_fill_holes,
110
+ box_counting_dimension, prepare_box_counting,
111
+ keep_one_connected_component, cc)
112
+ from cellects.image_analysis.network_functions import *
113
+ from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
114
+ from cellects.image_analysis.shape_descriptors import ShapeDescriptors, from_shape_descriptors_class
115
+ from cellects.utils.utilitarian import PercentAndTimeTracker, smallest_memory_array
116
+
117
+
118
+ class MotionAnalysis:
119
+
120
+ def __init__(self, l):
121
+
122
+ """
123
+ :param video_name: The name of the video to read
124
+ :param convert_for_motion: The dict specifying the linear combination
125
+ of color channels (rgb_hsv_lab) to use
126
+ """
127
+ self.one_descriptor_per_arena = {}
128
+ self.one_descriptor_per_arena['arena'] = l[1]
129
+ vars = l[2]
130
+ detect_shape = l[3]
131
+ analyse_shape = l[4]
132
+ show_seg = l[5]
133
+ videos_already_in_ram = l[6]
134
+ self.visu = None
135
+ self.binary = None
136
+ self.origin_idx = None
137
+ self.smoothing_flag: bool = False
138
+ logging.info(f"Start the motion analysis of the arena n°{self.one_descriptor_per_arena['arena']}")
139
+
140
+ self.vars = vars
141
+ # self.origin = self.vars['first_image'][self.vars['top'][l[0]]:(
142
+ # self.vars['bot'][l[0]] + 1),
143
+ # self.vars['left'][l[0]]:(self.vars['right'][l[0]] + 1)]
144
+ self.load_images_and_videos(videos_already_in_ram, l[0])
145
+
146
+ self.dims = self.converted_video.shape
147
+ self.segmentation = np.zeros(self.dims, dtype=np.uint8)
148
+
149
+ self.covering_intensity = np.zeros(self.dims[1:], dtype=np.float64)
150
+ self.mean_intensity_per_frame = np.mean(self.converted_video, (1, 2))
151
+
152
+ self.borders = image_borders(self.dims[1:], shape=self.vars['arena_shape'])
153
+ # if self.vars['arena_shape'] == "circle":
154
+ # self.borders = Ellipse(self.dims[1:]).create()
155
+ # img_contours = image_borders(self.dims[1:])
156
+ # self.borders = self.borders * img_contours
157
+ # else:
158
+ # self.borders = image_borders(self.dims[1:])
159
+ self.pixel_ring_depth = 9
160
+ self.step = 10
161
+ self.lost_frames = 10
162
+ self.update_ring_width()
163
+
164
+ self.start = None
165
+ if detect_shape:
166
+ #self=self.motion
167
+ #self.drift_correction()
168
+ self.start = None
169
+ # Here to conditional layers allow to detect if an expansion/exploration occured
170
+ self.get_origin_shape()
171
+ # The first, user-defined is the 'first_move_threshold' and the second is the detection of the
172
+ # substantial image: if any of them is not detected, the program considers there is not exp.
173
+ if self.dims[0] >= 40:
174
+ step = self.dims[0] // 20
175
+ else:
176
+ step = 1
177
+ if self.start >= (self.dims[0] - step - 1):
178
+ self.start = None
179
+ else:
180
+ self.get_covering_duration(step)
181
+ if self.start is not None:
182
+ # self.vars['fading'] = -0.5
183
+ # self.vars['do_threshold_segmentation']: bool = False
184
+ # self.vars['do_slope_segmentation'] = True
185
+ # self.vars['true_if_use_light_AND_slope_else_OR']: bool = False
186
+ self.detection()
187
+ self.initialize_post_processing()
188
+ self.t = self.start
189
+ while self.t < self.binary.shape[0]: #200:
190
+ self.update_shape(show_seg)
191
+ #
192
+ if self.start is None:
193
+ self.binary = np.repeat(np.expand_dims(self.origin, 0), self.converted_video.shape[0], axis=0)
194
+
195
+ if analyse_shape:
196
+ self.get_descriptors_from_binary()
197
+ self.detect_growth_transitions()
198
+ self.networks_detection(show_seg)
199
+ self.study_cytoscillations(show_seg)
200
+ self.fractal_descriptions()
201
+ self.get_descriptors_summary()
202
+ if videos_already_in_ram is None:
203
+ self.save_results()
204
+
205
+ def load_images_and_videos(self, videos_already_in_ram, i):
206
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Load images and videos")
207
+ self.origin = self.vars['origin_list'][i]# self.vars['origins_list'][i]
208
+ if videos_already_in_ram is None:
209
+ true_frame_width = self.origin.shape[1]
210
+ vid_name = f"ind_{self.one_descriptor_per_arena['arena']}.npy"
211
+ if len(self.vars['background_list']) == 0:
212
+ self.background = None
213
+ else:
214
+ self.background = self.vars['background_list'][i]
215
+ if len(self.vars['background_list2']) == 0:
216
+ self.background2 = None
217
+ else:
218
+ self.background2 = self.vars['background_list2'][i]
219
+
220
+ if self.vars['already_greyscale']:
221
+ self.converted_video = video2numpy(
222
+ vid_name, None, self.background, true_frame_width)
223
+ if len(self.converted_video.shape) == 4:
224
+ self.converted_video = self.converted_video[:, :, :, 0]
225
+ else:
226
+ self.visu = video2numpy(
227
+ vid_name, None, self.background, true_frame_width)
228
+ self.get_converted_video()
229
+ else:
230
+ if self.vars['already_greyscale']:
231
+ self.converted_video = videos_already_in_ram
232
+ else:
233
+ if self.vars['convert_for_motion']['logical'] == 'None':
234
+ self.visu, self.converted_video = videos_already_in_ram
235
+ else:
236
+ (self.visu,
237
+ self.converted_video,
238
+ self.converted_video2) = videos_already_in_ram
239
+
240
+ def get_converted_video(self):
241
+ if not self.vars['already_greyscale']:
242
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Convert the RGB visu video into a greyscale image using the color space combination: {self.vars['convert_for_motion']}")
243
+ first_dict = TDict()
244
+ second_dict = TDict()
245
+ c_spaces = []
246
+ for k, v in self.vars['convert_for_motion'].items():
247
+ if k != 'logical' and v.sum() > 0:
248
+ if k[-1] != '2':
249
+ first_dict[k] = v
250
+ c_spaces.append(k)
251
+ else:
252
+ second_dict[k[:-1]] = v
253
+ c_spaces.append(k[:-1])
254
+ if self.vars['lose_accuracy_to_save_memory']:
255
+ self.converted_video = np.zeros(self.visu.shape[:3], dtype=np.uint8)
256
+ else:
257
+ self.converted_video = np.zeros(self.visu.shape[:3], dtype=np.float64)
258
+ if self.vars['convert_for_motion']['logical'] != 'None':
259
+ if self.vars['lose_accuracy_to_save_memory']:
260
+ self.converted_video2 = np.zeros(self.visu.shape[:3], dtype=np.uint8)
261
+ else:
262
+ self.converted_video2 = np.zeros(self.visu.shape[:3], dtype=np.float64)
263
+
264
+ # Trying to subtract the first image to the first image is a nonsense so,
265
+ # when doing background subtraction, the first and the second image are equal
266
+ for counter in np.arange(self.visu.shape[0]):
267
+ if self.vars['subtract_background'] and counter == 0:
268
+ img = self.visu[1, ...]
269
+ else:
270
+ img = self.visu[counter, ...]
271
+ greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces, first_dict,
272
+ second_dict, self.background,
273
+ self.background2,
274
+ self.vars['lose_accuracy_to_save_memory'])
275
+ if self.vars['filter_spec'] is not None and self.vars['filter_spec']['filter1_type'] != "":
276
+ greyscale_image = apply_filter(greyscale_image, self.vars['filter_spec']['filter1_type'],
277
+ self.vars['filter_spec']['filter1_param'],
278
+ self.vars['lose_accuracy_to_save_memory'])
279
+ if greyscale_image2 is not None and self.vars['filter_spec']['filter2_type'] != "":
280
+ greyscale_image2 = apply_filter(greyscale_image2, self.vars['filter_spec']['filter2_type'],
281
+ self.vars['filter_spec']['filter2_param'],
282
+ self.vars['lose_accuracy_to_save_memory'])
283
+
284
+ self.converted_video[counter, ...] = greyscale_image
285
+ if self.vars['convert_for_motion']['logical'] != 'None':
286
+ self.converted_video2[counter, ...] = greyscale_image2
287
+
288
+ def get_origin_shape(self):
289
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Make sure of origin shape")
290
+ if self.vars['origin_state'] == "constant":
291
+ self.start = 1
292
+ self.origin_idx = np.nonzero(self.origin)
293
+ if self.vars['lighter_background']:
294
+ # Initialize the covering_intensity matrix as a reference for pixel fading
295
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = 200
296
+ self.substantial_growth = 1.2 * self.origin.sum()
297
+ else:
298
+ self.start = 0
299
+ analysisi = OneImageAnalysis(self.converted_video[0, :, :])
300
+ analysisi.binary_image = 0
301
+ if self.vars['drift_already_corrected']:
302
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
303
+ for frame_i in np.arange(self.dims[0]): # 100):#
304
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
305
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(
306
+ true_pixels[1])
307
+ else:
308
+ mask_coord = None
309
+ while np.logical_and(np.sum(analysisi.binary_image) < self.vars['first_move_threshold'], self.start < self.dims[0]):
310
+ analysisi = self.frame_by_frame_segmentation(self.start, mask_coord)
311
+ self.start += 1
312
+
313
+ # frame_i = OneImageAnalysis(self.converted_video[self.start, :, :])
314
+ # frame_i.thresholding(self.vars['luminosity_threshold'], self.vars['lighter_background'])
315
+ # frame_i.thresholding(self.vars['luminosity_threshold'], self.vars['lighter_background'])
316
+ # self.start += 1
317
+
318
+ # Use connected components to find which shape is the nearest from the image center.
319
+ if self.vars['several_blob_per_arena']:
320
+ self.origin = analysisi.binary_image
321
+ else:
322
+ nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(analysisi.binary_image,
323
+ connectivity=8)
324
+ if self.vars['appearance_detection_method'] == 'most_central':
325
+ center = np.array((self.dims[2] // 2, self.dims[1] // 2))
326
+ stats = np.zeros(nb_components - 1)
327
+ for shape_i in np.arange(1, nb_components):
328
+ stats[shape_i - 1] = eudist(center, centroids[shape_i, :])
329
+ # The shape having the minimal euclidean distance from the center will be the original shape
330
+ self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
331
+ self.origin[output == (np.argmin(stats) + 1)] = 1
332
+ elif self.vars['appearance_detection_method'] == 'largest':
333
+ self.origin = np.zeros((self.dims[1], self.dims[2]), dtype=np.uint8)
334
+ self.origin[output == np.argmax(stats[1:, 4])] = 1
335
+ self.origin_idx = np.nonzero(self.origin)
336
+ self.substantial_growth = self.origin.sum() + 250
337
+ ##
338
+
339
+ def get_covering_duration(self, step):
340
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Find a frame with a significant growth/motion and determine the number of frames necessary for a pixel to get covered")
341
+ ## Find the time at which growth reached a substantial growth.
342
+ self.substantial_time = self.start
343
+ # To avoid noisy images to have deleterious effects, make sure that area area reaches the threshold thrice.
344
+ occurrence = 0
345
+ if self.vars['drift_already_corrected']:
346
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
347
+ for frame_i in np.arange(self.dims[0]): # 100):#
348
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
349
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(
350
+ true_pixels[1])
351
+ else:
352
+ mask_coord = None
353
+ while np.logical_and(occurrence < 3, self.substantial_time < (self.dims[0] - step - 1)):
354
+ self.substantial_time += step
355
+ growth_vision = self.frame_by_frame_segmentation(self.substantial_time, mask_coord)
356
+
357
+ # growth_vision = OneImageAnalysis(self.converted_video[self.substantial_time, :, :])
358
+ # # growth_vision.thresholding()
359
+ # if self.vars['convert_for_motion']['logical'] != 'None':
360
+ # growth_vision.image2 = self.converted_video2[self.substantial_time, ...]
361
+ #
362
+ # growth_vision.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'],
363
+ # bio_label=self.vars["bio_label"], bio_label2=self.vars["bio_label2"],
364
+ # grid_segmentation=self.vars['grid_segmentation'],
365
+ # lighter_background=self.vars['lighter_background'])
366
+
367
+ surfarea = np.sum(growth_vision.binary_image * self.borders)
368
+ if surfarea > self.substantial_growth:
369
+ occurrence += 1
370
+ # get a rough idea of the area covered during this time
371
+ if (self.substantial_time - self.start) > 20:
372
+ if self.vars['lighter_background']:
373
+ growth = (np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10) - (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10)
374
+ else:
375
+ growth = (np.sum(self.converted_video[(self.substantial_time - 10):self.substantial_time, :, :], 0) / 10) - (
376
+ np.sum(self.converted_video[self.start:(self.start + 10), :, :], 0) / 10)
377
+ else:
378
+ if self.vars['lighter_background']:
379
+ growth = self.converted_video[self.start, ...] - self.converted_video[self.substantial_time, ...]
380
+ else:
381
+ growth = self.converted_video[self.substantial_time, ...] - self.converted_video[self.start, ...]
382
+ intensity_extent = np.ptp(self.converted_video[self.start:self.substantial_time, :, :], axis=0)
383
+ growth[np.logical_or(growth < 0, intensity_extent < np.median(intensity_extent))] = 0
384
+ growth = bracket_to_uint8_image_contrast(growth)
385
+ growth *= self.borders
386
+ growth_vision = OneImageAnalysis(growth)
387
+ growth_vision.thresholding()
388
+ self.substantial_image = cv2.erode(growth_vision.binary_image, cross_33, iterations=2)
389
+
390
+ if np.any(self.substantial_image):
391
+ natural_noise = np.nonzero(intensity_extent == np.min(intensity_extent))
392
+ natural_noise = self.converted_video[self.start:self.substantial_time, natural_noise[0][0], natural_noise[1][0]]
393
+ natural_noise = moving_average(natural_noise, 5)
394
+ natural_noise = np.ptp(natural_noise)
395
+ subst_idx = np.nonzero(self.substantial_image)
396
+ cover_lengths = np.zeros(len(subst_idx[0]), dtype=np.uint32)
397
+ for index in np.arange(len(subst_idx[0])):
398
+ vector = self.converted_video[self.start:self.substantial_time, subst_idx[0][index], subst_idx[1][index]]
399
+ left, right = find_major_incline(vector, natural_noise)
400
+ # If find_major_incline did find a major incline: (otherwise it put 0 to left and 1 to right)
401
+ if not np.logical_and(left == 0, right == 1):
402
+ cover_lengths[index] = len(vector[left:-right])
403
+ # If this analysis fails put a deterministic step
404
+ if len(cover_lengths[cover_lengths > 0]) > 0:
405
+ self.step = (np.round(np.mean(cover_lengths[cover_lengths > 0])).astype(np.uint32) // 2) + 1
406
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: the time for a pixel to get covered is set to {self.step}")
407
+ else:
408
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Pre-processing detection: could not automatically find the time for a pixel to get covered. Default value is 1 for video length < 40 and 10 otherwise")
409
+
410
+ # Make sure to avoid a step overestimation
411
+ if self.step > self.dims[0] // 20:
412
+ self.step = self.dims[0] // 20
413
+
414
+ if self.step == 0:
415
+ self.step = 1
416
+ # When the first_move_threshold is not stringent enough the program may detect a movement due to noise
417
+ # In that case, the substantial_image is empty and there is no reason to proceed further
418
+ else:
419
+ self.start = None
420
+ ##
421
+
422
+ def detection(self, compute_all_possibilities=False):
423
+ # self.lost_frames = (self.step - 1) * self.vars['repeat_video_smoothing'] # relevant when smoothing did not use padding.
424
+ self.lost_frames = self.step
425
+ # I/ Image by image segmentation algorithms
426
+ # If images contain a drift correction (zeros at borders of the image,
427
+ # Replace these 0 by normal background values before segmenting
428
+ if self.vars['frame_by_frame_segmentation'] or compute_all_possibilities:
429
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect cell motion and growth using the frame by frame segmentation algorithm")
430
+ self.segmentation = np.zeros(self.dims, dtype=np.uint8)
431
+ if self.vars['drift_already_corrected']:
432
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Adjust images to drift correction and segment them")
433
+ # 1. Get the mask valid for a number of images around it (step).
434
+ mask_coord = np.zeros((self.dims[0], 4), dtype=np.uint32)
435
+ for frame_i in np.arange(self.dims[0]):#100):#
436
+ true_pixels = np.nonzero(self.converted_video[frame_i, ...])
437
+ mask_coord[frame_i, :] = np.min(true_pixels[0]), np.max(true_pixels[0]), np.min(true_pixels[1]), np.max(true_pixels[1])
438
+ else:
439
+ mask_coord = None
440
+
441
+ for t in np.arange(self.dims[0]):#20):#
442
+ analysisi = self.frame_by_frame_segmentation(t, mask_coord)
443
+ self.segmentation[t, ...] = analysisi.binary_image
444
+
445
+ if self.vars['lose_accuracy_to_save_memory']:
446
+ self.converted_video[t, ...] = bracket_to_uint8_image_contrast(analysisi.image)
447
+ else:
448
+ self.converted_video[t, ...] = analysisi.image
449
+ if self.vars['convert_for_motion']['logical'] != 'None':
450
+ if self.vars['lose_accuracy_to_save_memory']:
451
+ self.converted_video2[t, ...] = bracket_to_uint8_image_contrast(analysisi.image2)
452
+ else:
453
+ self.converted_video2[t, ...] = analysisi.image2
454
+
455
+ if self.vars['color_number'] == 2:
456
+ luminosity_segmentation, l_threshold_over_time = self.lum_value_segmentation(self.converted_video, do_threshold_segmentation=self.vars['do_threshold_segmentation'] or compute_all_possibilities)
457
+ self.converted_video = self.smooth_pixel_slopes(self.converted_video)
458
+ if self.vars['do_slope_segmentation'] or compute_all_possibilities:
459
+ gradient_segmentation = self.lum_slope_segmentation(self.converted_video)
460
+ gradient_segmentation[-self.lost_frames:, ...] = np.repeat(gradient_segmentation[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
461
+ if self.vars['convert_for_motion']['logical'] != 'None':
462
+ if self.vars['do_threshold_segmentation'] or compute_all_possibilities:
463
+ luminosity_segmentation2, l_threshold_over_time2 = self.lum_value_segmentation(self.converted_video2, do_threshold_segmentation=True)
464
+ if self.vars['convert_for_motion']['logical'] == 'Or':
465
+ luminosity_segmentation = np.logical_or(luminosity_segmentation, luminosity_segmentation2)
466
+ elif self.vars['convert_for_motion']['logical'] == 'And':
467
+ luminosity_segmentation = np.logical_and(luminosity_segmentation, luminosity_segmentation2)
468
+ elif self.vars['convert_for_motion']['logical'] == 'Xor':
469
+ luminosity_segmentation = np.logical_xor(luminosity_segmentation, luminosity_segmentation2)
470
+ self.converted_video2 = self.smooth_pixel_slopes(self.converted_video2)
471
+ if self.vars['do_slope_segmentation'] or compute_all_possibilities:
472
+ gradient_segmentation2 = self.lum_slope_segmentation(self.converted_video2)
473
+ gradient_segmentation2[-self.lost_frames:, ...] = np.repeat(gradient_segmentation2[-self.lost_frames, :, :][np.newaxis, :, :], self.lost_frames, axis=0)
474
+ if self.vars['convert_for_motion']['logical'] == 'Or':
475
+ gradient_segmentation = np.logical_or(gradient_segmentation, gradient_segmentation2)
476
+ elif self.vars['convert_for_motion']['logical'] == 'And':
477
+ gradient_segmentation = np.logical_and(gradient_segmentation, gradient_segmentation2)
478
+ elif self.vars['convert_for_motion']['logical'] == 'Xor':
479
+ gradient_segmentation = np.logical_xor(gradient_segmentation, gradient_segmentation2)
480
+
481
+ if compute_all_possibilities:
482
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Compute all options to detect cell motion and growth. Maximal growth per frame: {self.vars['maximal_growth_factor']}")
483
+ self.luminosity_segmentation = np.nonzero(luminosity_segmentation)
484
+ self.gradient_segmentation = np.nonzero(gradient_segmentation)
485
+ self.logical_and = np.nonzero(np.logical_and(luminosity_segmentation, gradient_segmentation))
486
+ self.logical_or = np.nonzero(np.logical_or(luminosity_segmentation, gradient_segmentation))
487
+ elif not self.vars['frame_by_frame_segmentation']:
488
+ if self.vars['do_threshold_segmentation'] and not self.vars['do_slope_segmentation']:
489
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity threshold segmentation algorithm")
490
+ self.segmentation = luminosity_segmentation
491
+ if self.vars['do_slope_segmentation']:# and not self.vars['do_threshold_segmentation']: NEW
492
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detect with luminosity slope segmentation algorithm")
493
+ # gradient_segmentation[:(self.lost_frames + 1), ...] = luminosity_segmentation[:(self.lost_frames + 1), ...]
494
+ if not self.vars['do_threshold_segmentation']:# NEW
495
+ self.segmentation = gradient_segmentation
496
+ if np.logical_and(self.vars['do_threshold_segmentation'], self.vars['do_slope_segmentation']):
497
+ if self.vars['true_if_use_light_AND_slope_else_OR']:
498
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold AND slope segmentation algorithms")
499
+ self.segmentation = np.logical_and(luminosity_segmentation, gradient_segmentation)
500
+ else:
501
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Detection resuts from threshold OR slope segmentation algorithms")
502
+ self.segmentation = np.logical_or(luminosity_segmentation, gradient_segmentation)
503
+ self.segmentation = self.segmentation.astype(np.uint8)
504
+ self.converted_video2 = None
505
+
506
+
507
+ def frame_by_frame_segmentation(self, t, mask_coord=None):
508
+
509
+ contrasted_im = bracket_to_uint8_image_contrast(self.converted_video[t, :, :])
510
+ if self.vars['convert_for_motion']['logical'] != 'None':
511
+ contrasted_im2 = bracket_to_uint8_image_contrast(self.converted_video2[t, :, :])
512
+ # 1. Get the mask valid for a number of images around it (step).
513
+ if self.vars['drift_already_corrected']:
514
+ if t < self.step // 2:
515
+ t_start = 0
516
+ t_end = self.step
517
+ elif t > (self.dims[0] - self.step // 2):
518
+ t_start = self.dims[0] - self.step
519
+ t_end = self.dims[0]
520
+ else:
521
+ t_start = t - (self.step // 2)
522
+ t_end = t + (self.step // 2)
523
+ min_y, max_y = np.max(mask_coord[t_start:t_end, 0]), np.min(mask_coord[t_start:t_end, 1])
524
+ min_x, max_x = np.max(mask_coord[t_start:t_end, 2]), np.min(mask_coord[t_start:t_end, 3])
525
+ # 3. Bracket the focal image
526
+ image_i = contrasted_im[min_y:(max_y + 1), min_x:(max_x + 1)].astype(np.float64)
527
+ image_i /= np.mean(image_i)
528
+ image_i = OneImageAnalysis(image_i)
529
+ if self.vars['convert_for_motion']['logical'] != 'None':
530
+ image_i2 = contrasted_im2[min_y:(max_y + 1), min_x:(max_x + 1)]
531
+ image_i2 /= np.mean(image_i2)
532
+ image_i.image2 = image_i2
533
+ mask = (self.converted_video[t, ...] > 0).astype(np.uint8)
534
+ else:
535
+ mask = None
536
+ # 3. Bracket the focal image
537
+ if self.vars['grid_segmentation']:
538
+ int_variation_thresh = 100 - (np.ptp(contrasted_im) * 90 / 255)
539
+ else:
540
+ int_variation_thresh = None
541
+ analysisi = OneImageAnalysis(bracket_to_uint8_image_contrast(contrasted_im / np.mean(contrasted_im)))
542
+ if self.vars['convert_for_motion']['logical'] != 'None':
543
+ analysisi.image2 = bracket_to_uint8_image_contrast(contrasted_im2 / np.mean(contrasted_im2))
544
+
545
+ if t == 0:
546
+ analysisi.previous_binary_image = self.origin
547
+ else:
548
+ analysisi.previous_binary_image = deepcopy(self.segmentation[t - 1, ...])
549
+
550
+ analysisi.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'],
551
+ bio_label=self.vars["bio_label"], bio_label2=self.vars["bio_label2"],
552
+ grid_segmentation=self.vars['grid_segmentation'],
553
+ lighter_background=self.vars['lighter_background'],
554
+ side_length=20, step=5, int_variation_thresh=int_variation_thresh, mask=mask,
555
+ filter_spec=None) # filtering already done when creating converted_video
556
+
557
+ return analysisi
558
+
559
+ # 1. Get the mask valid for a number of images around it (step).
560
+
561
+
562
+ def lum_value_segmentation(self, converted_video, do_threshold_segmentation):
563
+ shape_motion_failed: bool = False
564
+ if self.vars['lighter_background']:
565
+ covering_l_values = np.min(converted_video[:self.substantial_time, :, :],
566
+ 0) * self.substantial_image
567
+ else:
568
+ covering_l_values = np.max(converted_video[:self.substantial_time, :, :],
569
+ 0) * self.substantial_image
570
+ # Avoid errors by checking whether the covering values are nonzero
571
+ covering_l_values = covering_l_values[covering_l_values != 0]
572
+ if len(covering_l_values) == 0:
573
+ shape_motion_failed = True
574
+ if not shape_motion_failed:
575
+ value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
576
+ validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
577
+ counter = 0
578
+ while_condition = True
579
+ max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor'] * 2
580
+ if self.vars['lighter_background']:
581
+ basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.9, axis=(1, 2))
582
+ else:
583
+ basic_bckgrnd_values = np.quantile(converted_video[:(self.lost_frames + 1), ...], 0.1, axis=(1, 2))
584
+ # Try different values of do_threshold_segmentation and keep the one that does not
585
+ # segment more than x percent of the image
586
+ while counter <= 14:
587
+ value_threshold = value_segmentation_thresholds[counter]
588
+ if self.vars['lighter_background']:
589
+ l_threshold = (1 + value_threshold) * np.max(covering_l_values)
590
+ else:
591
+ l_threshold = (1 - value_threshold) * np.min(covering_l_values)
592
+ starting_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
593
+ basic_bckgrnd_values, l_threshold,
594
+ self.vars['lighter_background'])
595
+
596
+ changing_pixel_number = np.sum(np.absolute(np.diff(starting_segmentation.astype(np.int8), 1, 0)), (1, 2))
597
+ validation = np.max(np.sum(starting_segmentation, (1, 2))) < max_motion_per_frame and (
598
+ np.max(changing_pixel_number) < max_motion_per_frame)
599
+ validated_thresholds[counter] = validation
600
+ if np.any(validated_thresholds):
601
+ if not validation:
602
+ break
603
+ counter += 1
604
+ # If any threshold is accepted, use their average to proceed the final thresholding
605
+ valid_number = validated_thresholds.sum()
606
+ if valid_number > 0:
607
+ if valid_number > 2:
608
+ index_to_keep = 2
609
+ else:
610
+ index_to_keep = valid_number - 1
611
+ value_threshold = value_segmentation_thresholds[
612
+ np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
613
+ else:
614
+ value_threshold = 0
615
+
616
+ if self.vars['lighter_background']:
617
+ l_threshold = (1 + value_threshold) * np.max(covering_l_values)
618
+ else:
619
+ l_threshold = (1 - value_threshold) * np.min(covering_l_values)
620
+ if do_threshold_segmentation:
621
+ if self.vars['lighter_background']:
622
+ basic_bckgrnd_values = np.quantile(converted_video, 0.9, axis=(1, 2))
623
+ else:
624
+ basic_bckgrnd_values = np.quantile(converted_video, 0.1, axis=(1, 2))
625
+ luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video, basic_bckgrnd_values,
626
+ l_threshold, self.vars['lighter_background'])
627
+ else:
628
+ luminosity_segmentation, l_threshold_over_time = segment_with_lum_value(converted_video[:(self.lost_frames + 1), ...],
629
+ basic_bckgrnd_values, l_threshold,
630
+ self.vars['lighter_background'])
631
+ else:
632
+ luminosity_segmentation = None
633
+
634
+ return luminosity_segmentation, l_threshold_over_time
635
+
636
+ def smooth_pixel_slopes(self, converted_video):
637
+ # smoothed_video = np.zeros(
638
+ # (self.dims[0] - self.lost_frames, self.dims[1], self.dims[2]),
639
+ # dtype=np.float64)
640
+ try:
641
+ if self.vars['lose_accuracy_to_save_memory']:
642
+ smoothed_video = np.zeros(self.dims, dtype=np.float16)
643
+ smooth_kernel = np.ones(self.step) / self.step
644
+ for i in np.arange(converted_video.shape[1]):
645
+ for j in np.arange(converted_video.shape[2]):
646
+ padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
647
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
648
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
649
+ if self.vars['repeat_video_smoothing'] > 1:
650
+ for it in np.arange(1, self.vars['repeat_video_smoothing']):
651
+ padded = np.pad(moving_average,
652
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
653
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
654
+ smoothed_video[:, i, j] = moving_average.astype(np.float16)
655
+ else:
656
+ smoothed_video = np.zeros(self.dims, dtype=np.float64)
657
+ smooth_kernel = np.ones(self.step) / self.step
658
+ for i in np.arange(converted_video.shape[1]):
659
+ for j in np.arange(converted_video.shape[2]):
660
+ padded = np.pad(converted_video[:, i, j] / self.mean_intensity_per_frame,
661
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
662
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
663
+ if self.vars['repeat_video_smoothing'] > 1:
664
+ for it in np.arange(1, self.vars['repeat_video_smoothing']):
665
+ padded = np.pad(moving_average,
666
+ (self.step // 2, self.step - 1 - self.step // 2), mode='edge')
667
+ moving_average = np.convolve(padded, smooth_kernel, mode='valid')
668
+ smoothed_video[:, i, j] = moving_average
669
+ return smoothed_video
670
+
671
+ except MemoryError:
672
+ logging.error("Not enough RAM available to smooth pixel curves. Detection may fail.")
673
+ smoothed_video = converted_video
674
+ return smoothed_video
675
+
676
+ def lum_slope_segmentation(self, converted_video):
677
+ shape_motion_failed : bool = False
678
+ gradient_segmentation = np.zeros(self.dims, np.uint8)
679
+ # 2) Contrast increase
680
+ oridx = np.nonzero(self.origin)
681
+ notoridx = np.nonzero(1 - self.origin)
682
+ do_increase_contrast = np.mean(converted_video[0, oridx[0], oridx[1]]) * 10 > np.mean(
683
+ converted_video[0, notoridx[0], notoridx[1]])
684
+ necessary_memory = self.dims[0] * self.dims[1] * self.dims[2] * 64 * 2 * 1.16415e-10
685
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
686
+ if self.vars['lose_accuracy_to_save_memory']:
687
+ derive = converted_video.astype(np.float16)
688
+ else:
689
+ derive = converted_video.astype(np.float64)
690
+ if necessary_memory > available_memory:
691
+ converted_video = None
692
+
693
+ if do_increase_contrast:
694
+ derive = np.square(derive)
695
+
696
+ # 3) Get the gradient
697
+ necessary_memory = derive.size * 64 * 4 * 1.16415e-10
698
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
699
+ if necessary_memory > available_memory:
700
+ for cy in np.arange(self.dims[1]):
701
+ for cx in np.arange(self.dims[2]):
702
+ if self.vars['lose_accuracy_to_save_memory']:
703
+ derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step).astype(np.float16)
704
+ else:
705
+ derive[:, cy, cx] = np.gradient(derive[:, cy, cx], self.step)
706
+ else:
707
+ if self.vars['lose_accuracy_to_save_memory']:
708
+ derive = np.gradient(derive, self.step, axis=0).astype(np.float16)
709
+ else:
710
+ derive = np.gradient(derive, self.step, axis=0)
711
+
712
+ # 4) Segment
713
+ if self.vars['lighter_background']:
714
+ covering_slopes = np.min(derive[:self.substantial_time, :, :], 0) * self.substantial_image
715
+ else:
716
+ covering_slopes = np.max(derive[:self.substantial_time, :, :], 0) * self.substantial_image
717
+ covering_slopes = covering_slopes[covering_slopes != 0]
718
+ if len(covering_slopes) == 0:
719
+ shape_motion_failed = True
720
+
721
+ if not shape_motion_failed:
722
+ ####
723
+ # ease_slope_segmentation = 0.8
724
+ value_segmentation_thresholds = np.arange(0.8, -0.7, -0.1)
725
+ validated_thresholds = np.zeros(value_segmentation_thresholds.shape, dtype=bool)
726
+ counter = 0
727
+ while_condition = True
728
+ max_motion_per_frame = (self.dims[1] * self.dims[2]) * self.vars['maximal_growth_factor']
729
+ # Try different values of do_slope_segmentation and keep the one that does not
730
+ # segment more than x percent of the image
731
+ while counter <= 14:
732
+ ease_slope_segmentation = value_segmentation_thresholds[counter]
733
+ if self.vars['lighter_background']:
734
+ gradient_threshold = (1 + ease_slope_segmentation) * np.max(covering_slopes)
735
+ sample = np.less(derive[:self.substantial_time], gradient_threshold)
736
+ else:
737
+ gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
738
+ sample = np.greater(derive[:self.substantial_time], gradient_threshold)
739
+ changing_pixel_number = np.sum(np.absolute(np.diff(sample.astype(np.int8), 1, 0)), (1, 2))
740
+ validation = np.max(np.sum(sample, (1, 2))) < max_motion_per_frame and (
741
+ np.max(changing_pixel_number) < max_motion_per_frame)
742
+ validated_thresholds[counter] = validation
743
+ if np.any(validated_thresholds):
744
+ if not validation:
745
+ break
746
+ counter += 1
747
+ # If any threshold is accepted, use their average to proceed the final thresholding
748
+ valid_number = validated_thresholds.sum()
749
+ if valid_number > 0:
750
+ if valid_number > 2:
751
+ index_to_keep = 2
752
+ else:
753
+ index_to_keep = valid_number - 1
754
+ ease_slope_segmentation = value_segmentation_thresholds[
755
+ np.uint8(np.floor(np.mean(np.nonzero(validated_thresholds)[0][index_to_keep])))]
756
+ else:
757
+ ease_slope_segmentation = 0
758
+
759
+ if self.vars['lighter_background']:
760
+ gradient_threshold = (1 - ease_slope_segmentation) * np.max(covering_slopes)
761
+ gradient_segmentation[:-self.lost_frames, :, :] = np.less(derive, gradient_threshold)[self.lost_frames:, :, :]
762
+ else:
763
+ gradient_threshold = (1 - ease_slope_segmentation) * np.min(covering_slopes)
764
+ gradient_segmentation[:-self.lost_frames, :, :] = np.greater(derive, gradient_threshold)[self.lost_frames:, :, :]
765
+ else:
766
+ gradient_segmentation = None
767
+ return gradient_segmentation
768
+
769
+ def update_ring_width(self):
770
+ # Make sure that self.pixels_depths are odd and greater than 3
771
+ if self.pixel_ring_depth <= 3:
772
+ self.pixel_ring_depth = 3
773
+ if self.pixel_ring_depth % 2 == 0:
774
+ self.pixel_ring_depth = self.pixel_ring_depth + 1
775
+ self.erodila_disk = Ellipse((self.pixel_ring_depth, self.pixel_ring_depth)).create().astype(np.uint8)
776
+ self.max_distance = self.pixel_ring_depth * self.vars['detection_range_factor']
777
+
778
+ def initialize_post_processing(self):
779
+ ## Initialization
780
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting Post_processing. Fading detection: {self.vars['do_fading']}: {self.vars['fading']}, Subtract background: {self.vars['subtract_background']}, Correct errors around initial shape: {self.vars['correct_errors_around_initial']}, Connect distant shapes: {self.vars['detection_range_factor'] > 0}, How to select appearing cell(s): {self.vars['appearance_detection_method']}")
781
+
782
+ self.binary = np.zeros(self.dims[:3], dtype=np.uint8)
783
+ if self.origin.shape[0] != self.binary[self.start - 1, :, :].shape[0] or self.origin.shape[1] != self.binary[self.start - 1, :, :].shape[1]:
784
+ logging.error("Unaltered videos deprecated, they have been created with different settings.\nDelete .npy videos and Data to run Cellects quickly.pkl and re-run")
785
+
786
+ if self.vars['origin_state'] == "invisible":
787
+ self.binary[self.start - 1, :, :] = deepcopy(self.origin)
788
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = self.converted_video[self.start, self.origin_idx[0], self.origin_idx[1]]
789
+ else:
790
+ if self.vars['origin_state'] == "fluctuating":
791
+ self.covering_intensity[self.origin_idx[0], self.origin_idx[1]] = np.median(self.converted_video[:self.start, self.origin_idx[0], self.origin_idx[1]], axis=0)
792
+
793
+ self.binary[:self.start, :, :] = np.repeat(np.expand_dims(self.origin, 0), self.start, axis=0)
794
+ if self.start < self.step:
795
+ frames_to_assess = self.step
796
+ self.segmentation[self.start - 1, ...] = self.binary[self.start - 1, :, :]
797
+ for t in np.arange(self.start, self.lost_frames):
798
+ # Only keep pixels that are always detected
799
+ always_found = np.sum(self.segmentation[t:(t + frames_to_assess), ...], 0)
800
+ always_found = always_found == frames_to_assess
801
+ # Remove too small shapes
802
+ without_small, stats, centro = cc(always_found.astype(np.uint8))
803
+ large_enough = np.nonzero(stats[1:, 4] > ((self.vars['first_move_threshold'] + 1) // 2))[0]
804
+ if len(large_enough) > 0:
805
+ always_found *= np.isin(always_found, large_enough + 1)
806
+ always_found = np.logical_or(always_found, self.segmentation[t - 1, ...])
807
+ self.segmentation[t, ...] *= always_found
808
+ else:
809
+ self.segmentation[t, ...] = 0
810
+ self.segmentation[t, ...] = np.logical_or(self.segmentation[t - 1, ...], self.segmentation[t, ...])
811
+ self.mean_distance_per_frame = None
812
+ self.surfarea = np.zeros(self.dims[0], dtype =np.uint64)
813
+ self.surfarea[:self.start] = np.sum(self.binary[:self.start, :, :], (1, 2))
814
+ self.gravity_field = inverted_distance_transform(self.binary[(self.start - 1), :, :],
815
+ np.sqrt(np.sum(self.binary[(self.start - 1), :, :])))
816
+ if self.vars['correct_errors_around_initial']:
817
+ self.rays, self.sun = draw_me_a_sun(self.binary[(self.start - 1), :, :], ray_length_coef=1.25) # plt.imshow(sun)
818
+ self.holes = np.zeros(self.dims[1:], dtype=np.uint8)
819
+ self.pixel_ring_depth += 2
820
+ self.update_ring_width()
821
+
822
+ if self.vars['prevent_fast_growth_near_periphery']:
823
+ self.near_periphery = np.zeros(self.dims[1:])
824
+ if self.vars['arena_shape'] == 'circle':
825
+ periphery_width = self.vars['periphery_width'] * 2
826
+ elliperiphery = Ellipse((self.dims[1] - periphery_width, self.dims[2] - periphery_width)).create()
827
+ half_width = periphery_width // 2
828
+ if periphery_width % 2 == 0:
829
+ self.near_periphery[half_width:-half_width, half_width:-half_width] = elliperiphery
830
+ else:
831
+ self.near_periphery[half_width:-half_width - 1, half_width:-half_width - 1] = elliperiphery
832
+ self.near_periphery = 1 - self.near_periphery
833
+ else:
834
+ self.near_periphery[:self.vars['periphery_width'], :] = 1
835
+ self.near_periphery[-self.vars['periphery_width']:, :] = 1
836
+ self.near_periphery[:, :self.vars['periphery_width']] = 1
837
+ self.near_periphery[:, -self.vars['periphery_width']:] = 1
838
+ self.near_periphery = np.nonzero(self.near_periphery)
839
+ # near_periphery = np.zeros(self.dims[1:])
840
+ # near_periphery[self.near_periphery] = 1
841
+
842
+ def update_shape(self, show_seg):
843
+
844
+ # Get from gradients, a 2D matrix of potentially covered pixels
845
+ # I/ dilate the shape made with covered pixels to assess for covering
846
+
847
+ # I/ 1) Only keep pixels that have been detected at least two times in the three previous frames
848
+ if self.dims[0] < 100:
849
+ new_potentials = self.segmentation[self.t, :, :]
850
+ else:
851
+ if self.t > 1:
852
+ new_potentials = np.sum(self.segmentation[(self.t - 2): (self.t + 1), :, :], 0, dtype=np.uint8)
853
+ else:
854
+ new_potentials = np.sum(self.segmentation[: (self.t + 1), :, :], 0, dtype=np.uint8)
855
+ new_potentials[new_potentials == 1] = 0
856
+ new_potentials[new_potentials > 1] = 1
857
+
858
+ # I/ 2) If an image displays more new potential pixels than 50% of image pixels,
859
+ # one of these images is considered noisy and we try taking only one.
860
+ frame_counter = -1
861
+ maximal_size = 0.5 * new_potentials.size
862
+ if (self.vars["do_threshold_segmentation"] or self.vars["frame_by_frame_segmentation"]) and self.t > np.max((self.start + self.step, 6)):
863
+ maximal_size = np.min((np.max(self.binary[:self.t].sum((1, 2))) * (1 + self.vars['maximal_growth_factor']), self.borders.sum()))
864
+ while np.logical_and(np.sum(new_potentials) > maximal_size,
865
+ frame_counter <= 5): # np.logical_and(np.sum(new_potentials > 0) > 5 * np.sum(dila_ring), frame_counter <= 5):
866
+ frame_counter += 1
867
+ if frame_counter > self.t:
868
+ break
869
+ else:
870
+ if frame_counter < 5:
871
+ new_potentials = self.segmentation[self.t - frame_counter, :, :]
872
+ else:
873
+ # If taking only one image is not enough, use the inverse of the fadinged matrix as new_potentials
874
+ # Given it haven't been processed by any slope calculation, it should be less noisy
875
+ new_potentials = np.sum(self.segmentation[(self.t - 5): (self.t + 1), :, :], 0, dtype=np.uint8)
876
+ new_potentials[new_potentials < 6] = 0
877
+ new_potentials[new_potentials == 6] = 1
878
+
879
+
880
+ new_shape = deepcopy(self.binary[self.t - 1, :, :])
881
+ new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_CLOSE, cross_33)
882
+ new_potentials = cv2.morphologyEx(new_potentials, cv2.MORPH_OPEN, cross_33) * self.borders
883
+ new_shape = np.logical_or(new_shape, new_potentials).astype(np.uint8)
884
+ # Add distant shapes within a radius, score every added pixels according to their distance
885
+ if not self.vars['several_blob_per_arena']:
886
+ if new_shape.sum() == 0:
887
+ new_shape = deepcopy(new_potentials)
888
+ else:
889
+ pads = ProgressivelyAddDistantShapes(new_potentials, new_shape, self.max_distance)
890
+ r = weakref.ref(pads)
891
+ # If max_distance is non nul look for distant shapes
892
+ pads.consider_shapes_sizes(self.vars['min_size_for_connection'],
893
+ self.vars['max_size_for_connection'])
894
+ pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=True)
895
+
896
+ new_shape = deepcopy(pads.expanded_shape)
897
+ new_shape[new_shape > 1] = 1
898
+ if np.logical_and(self.t > self.step, self.t < self.dims[0]):
899
+ if np.any(pads.expanded_shape > 5):
900
+ # Add distant shapes back in time at the covering speed of neighbors
901
+ self.binary[self.t][np.nonzero(new_shape)] = 1
902
+ self.binary[(self.step):(self.t + 1), :, :] = \
903
+ pads.modify_past_analysis(self.binary[(self.step):(self.t + 1), :, :],
904
+ self.segmentation[(self.step):(self.t + 1), :, :])
905
+ new_shape = deepcopy(self.binary[self.t, :, :])
906
+ pads = None
907
+
908
+ # Fill holes
909
+ new_shape = cv2.morphologyEx(new_shape, cv2.MORPH_CLOSE, cross_33)
910
+
911
+ if self.vars['do_fading'] and (self.t > self.step + self.lost_frames):
912
+ # Shape Erosion
913
+ # I/ After a substantial growth, erode the shape made with covered pixels to assess for fading
914
+ # Use the newly covered pixels to calculate their mean covering intensity
915
+ new_idx = np.nonzero(np.logical_xor(new_shape, self.binary[self.t - 1, :, :]))
916
+ start_intensity_monitoring = self.t - self.lost_frames - self.step
917
+ end_intensity_monitoring = self.t - self.lost_frames
918
+ self.covering_intensity[new_idx[0], new_idx[1]] = np.median(self.converted_video[start_intensity_monitoring:end_intensity_monitoring, new_idx[0], new_idx[1]], axis=0)
919
+ previous_binary = self.binary[self.t - 1, :, :]
920
+ greyscale_image = self.converted_video[self.t - self.lost_frames, :, :]
921
+ protect_from_fading = None
922
+ if self.vars['origin_state'] == 'constant':
923
+ protect_from_fading = self.origin
924
+ new_shape, self.covering_intensity = cell_leaving_detection(new_shape, self.covering_intensity, previous_binary, greyscale_image, self.vars['fading'], self.vars['lighter_background'], self.vars['several_blob_per_arena'], self.erodila_disk, protect_from_fading)
925
+
926
+ self.covering_intensity *= new_shape
927
+ self.binary[self.t, :, :] = new_shape * self.borders
928
+ self.surfarea[self.t] = np.sum(self.binary[self.t, :, :])
929
+
930
+ # Calculate the mean distance covered per frame and correct for a ring of not really fading pixels
931
+ if self.mean_distance_per_frame is None:
932
+ if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
933
+ if np.logical_and((self.t % 20) == 0,
934
+ np.logical_and(self.surfarea[self.t] > self.substantial_growth,
935
+ self.surfarea[self.t] < self.substantial_growth * 2)):
936
+ shape = self.binary[self.t, :, :] * self.sun
937
+ back = (1 - self.binary[self.t, :, :]) * self.sun
938
+ for ray in self.rays:
939
+ # For each sun's ray, see how they cross the shape/back and
940
+ # store the gravity_field value of these pixels (distance to the original shape).
941
+ ray_through_shape = (shape == ray) * self.gravity_field
942
+ ray_through_back = (back == ray) * self.gravity_field
943
+ if np.any(ray_through_shape):
944
+ if np.any(ray_through_back):
945
+ # If at least one back pixel is nearer to the original shape than a shape pixel,
946
+ # there is a hole to fill.
947
+ if np.any(ray_through_back > np.min(ray_through_shape[ray_through_shape > 0])):
948
+ # Check if the nearest pixels are shape, if so, supress them until the nearest pixel
949
+ # becomes back
950
+ while np.max(ray_through_back) <= np.max(ray_through_shape):
951
+ ray_through_shape[ray_through_shape == np.max(ray_through_shape)] = 0
952
+ # Now, all back pixels that are nearer than the closest shape pixel should get filled
953
+ # To do so, replace back pixels further than the nearest shape pixel by 0
954
+ ray_through_back[ray_through_back < np.max(ray_through_shape)] = 0
955
+ self.holes[np.nonzero(ray_through_back)] = 1
956
+ else:
957
+ self.rays = np.concatenate((self.rays[:(ray - 2)], self.rays[(ray - 1):]))
958
+ ray_through_shape = None
959
+ ray_through_back = None
960
+ if np.any(self.surfarea[:self.t] > self.substantial_growth * 2):
961
+
962
+ if self.vars['correct_errors_around_initial'] and not self.vars['several_blob_per_arena']:
963
+ # Apply the hole correction
964
+ self.holes = cv2.morphologyEx(self.holes, cv2.MORPH_CLOSE, cross_33, iterations=10)
965
+ # If some holes are not covered by now
966
+ if np.any(self.holes * (1 - self.binary[self.t, :, :])):
967
+ self.binary[:(self.t + 1), :, :], holes_time_end, distance_against_time = \
968
+ dynamically_expand_to_fill_holes(self.binary[:(self.t + 1), :, :], self.holes)
969
+ if holes_time_end is not None:
970
+ self.binary[holes_time_end:(self.t + 1), :, :] += self.binary[holes_time_end, :, :]
971
+ self.binary[holes_time_end:(self.t + 1), :, :][
972
+ self.binary[holes_time_end:(self.t + 1), :, :] > 1] = 1
973
+ self.surfarea[:(self.t + 1)] = np.sum(self.binary[:(self.t + 1), :, :], (1, 2))
974
+
975
+ else:
976
+ distance_against_time = [1, 2]
977
+ else:
978
+ distance_against_time = [1, 2]
979
+ distance_against_time = np.diff(distance_against_time)
980
+ if len(distance_against_time) > 0:
981
+ self.mean_distance_per_frame = np.mean(- distance_against_time)
982
+ else:
983
+ self.mean_distance_per_frame = 1
984
+
985
+ if self.vars['prevent_fast_growth_near_periphery']:
986
+ # growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, :, :] * self.near_periphery, axis=0)
987
+ growth_near_periphery = np.diff(self.binary[self.t-1:self.t+1, self.near_periphery[0], self.near_periphery[1]], axis=0)
988
+ if (growth_near_periphery == 1).sum() > self.vars['max_periphery_growth']:
989
+ # self.binary[self.t, self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t - 1, self.near_periphery[0], self.near_periphery[1]]
990
+ periphery_to_remove = np.zeros(self.dims[1:], dtype=np.uint8)
991
+ periphery_to_remove[self.near_periphery[0], self.near_periphery[1]] = self.binary[self.t, self.near_periphery[0], self.near_periphery[1]]
992
+ shapes, stats, centers = cc(periphery_to_remove)
993
+ periphery_to_remove = np.nonzero(np.isin(shapes, np.nonzero(stats[:, 4] > self.vars['max_periphery_growth'])[0][1:]))
994
+ self.binary[self.t, periphery_to_remove[0], periphery_to_remove[1]] = self.binary[self.t - 1, periphery_to_remove[0], periphery_to_remove[1]]
995
+ if not self.vars['several_blob_per_arena']:
996
+ shapes, stats, centers = cc(self.binary[self.t, ...])
997
+ shapes[shapes != 1] = 0
998
+ self.binary[self.t, ...] = shapes
999
+
1000
+ # Display
1001
+
1002
+ if show_seg:
1003
+ if self.visu is not None:
1004
+ im_to_display = deepcopy(self.visu[self.t, ...])
1005
+ contours = np.nonzero(cv2.morphologyEx(self.binary[self.t, :, :], cv2.MORPH_GRADIENT, cross_33))
1006
+ if self.vars['lighter_background']:
1007
+ im_to_display[contours[0], contours[1]] = 0
1008
+ else:
1009
+ im_to_display[contours[0], contours[1]] = 255
1010
+ else:
1011
+ im_to_display = self.binary[self.t, :, :] * 255
1012
+ imtoshow = cv2.resize(im_to_display, (540, 540))
1013
+ cv2.imshow("shape_motion", imtoshow)
1014
+ cv2.waitKey(1)
1015
+ self.t += 1
1016
+
1017
+ def save_coord_specimen_and_contour(self):
1018
+ if self.vars['save_coord_specimen']:
1019
+ np.save(f"coord_specimen{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1020
+ smallest_memory_array(np.nonzero(self.binary), "uint"))
1021
+ if self.vars['save_coord_contour']:
1022
+ contours = np.zeros(self.dims[:3], np.uint8)
1023
+ for frame in range(self.dims[0]):
1024
+ eroded_binary = cv2.erode(self.binary[frame, ...], cross_33, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1025
+ contours[frame, ...] = self.binary[frame, ...] - eroded_binary
1026
+ np.save(f"coord_contour{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1027
+ smallest_memory_array(np.nonzero(contours), "uint"))
1028
+
1029
+ def get_descriptors_from_binary(self, release_memory=True):
1030
+ ##
1031
+ if release_memory:
1032
+ self.substantial_image = None
1033
+ self.covering_intensity = None
1034
+ self.segmentation = None
1035
+ self.gravity_field = None
1036
+ self.sun = None
1037
+ self.rays = None
1038
+ self.holes = None
1039
+ collect()
1040
+ self.save_coord_specimen_and_contour()
1041
+ if self.vars['do_fading']:
1042
+ self.newly_explored_area = np.zeros(self.dims[0], dtype =np.uint64)
1043
+ self.already_explored_area = deepcopy(self.origin)
1044
+ for self.t in range(self.dims[0]):
1045
+ self.newly_explored_area[self.t] = ((self.binary[self.t, :, :] - self.already_explored_area) == 1).sum()
1046
+ self.already_explored_area = np.logical_or(self.already_explored_area, self.binary[self.t, :, :])
1047
+
1048
+ self.surfarea = self.binary.sum((1, 2))
1049
+ timings = self.vars['exif']
1050
+ if len(timings) < self.dims[0]:
1051
+ timings = np.arange(self.dims[0])
1052
+ if np.any(timings > 0):
1053
+ self.time_interval = np.mean(np.diff(timings))
1054
+ timings = timings[:self.dims[0]]
1055
+ available_descriptors_in_sd = list(from_shape_descriptors_class.keys())
1056
+ # ["area", "perimeter", "circularity", "rectangularity", "total_hole_area", "solidity",
1057
+ # "convexity", "eccentricity", "euler_number", "standard_deviation_y",
1058
+ # "standard_deviation_x", "skewness_y", "skewness_x", "kurtosis_y", "kurtosis_x",
1059
+ # "major_axis_len", "minor_axis_len", "axes_orientation"]
1060
+ all_descriptors = []
1061
+ to_compute_from_sd = []
1062
+ for name, do_compute in self.vars['descriptors'].items():
1063
+ if do_compute:# and
1064
+ all_descriptors.append(name)
1065
+ if np.isin(name, available_descriptors_in_sd):
1066
+ to_compute_from_sd.append(name)
1067
+ self.compute_solidity_separately: bool = self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not np.isin("solidity", to_compute_from_sd)
1068
+ if self.compute_solidity_separately:
1069
+ self.solidity = np.zeros(self.dims[0], dtype=np.float64)
1070
+ if not self.vars['several_blob_per_arena']:
1071
+ self.one_row_per_frame = pd.DataFrame(np.zeros((self.dims[0], 2 + len(all_descriptors))),
1072
+ columns=['arena', 'time'] + all_descriptors)
1073
+ self.one_row_per_frame['arena'] = [self.one_descriptor_per_arena['arena']] * self.dims[0]
1074
+ self.one_row_per_frame['time'] = timings
1075
+ # solidity must be added if detect growth transition is computed
1076
+ origin = self.binary[0, :, :]
1077
+ self.one_descriptor_per_arena["first_move"] = pd.NA
1078
+
1079
+ for t in np.arange(self.dims[0]):
1080
+ SD = ShapeDescriptors(self.binary[t, :, :], to_compute_from_sd)
1081
+
1082
+
1083
+ # NEW
1084
+ for descriptor in to_compute_from_sd:
1085
+ self.one_row_per_frame.loc[t, descriptor] = SD.descriptors[descriptor]
1086
+ # Old
1087
+ # self.one_row_per_frame.iloc[t, 2: 2 + len(descriptors)] = SD.descriptors.values()
1088
+
1089
+
1090
+ if self.compute_solidity_separately:
1091
+ solidity = ShapeDescriptors(self.binary[t, :, :], ["solidity"])
1092
+ self.solidity[t] = solidity.descriptors["solidity"]
1093
+ # self.solidity[t] = list(solidity.descriptors.values())[0]
1094
+ # I) Find a first pseudopod [aim: time]
1095
+ if pd.isna(self.one_descriptor_per_arena["first_move"]):
1096
+ if self.surfarea[t] >= (origin.sum() + self.vars['first_move_threshold']):
1097
+ self.one_descriptor_per_arena["first_move"] = t
1098
+
1099
+ # Apply the scale to the variables
1100
+ if self.vars['output_in_mm']:
1101
+ if np.isin('area', to_compute_from_sd):
1102
+ self.one_row_per_frame['area'] *= self.vars['average_pixel_size']
1103
+ if np.isin('total_hole_area', to_compute_from_sd):
1104
+ self.one_row_per_frame['total_hole_area'] *= self.vars['average_pixel_size']
1105
+ if np.isin('perimeter', to_compute_from_sd):
1106
+ self.one_row_per_frame['perimeter'] *= np.sqrt(self.vars['average_pixel_size'])
1107
+ if np.isin('major_axis_len', to_compute_from_sd):
1108
+ self.one_row_per_frame['major_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1109
+ if np.isin('minor_axis_len', to_compute_from_sd):
1110
+ self.one_row_per_frame['minor_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1111
+ else:
1112
+ # Objective: create a matrix with 4 columns (time, y, x, colony) containing the coordinates of all colonies
1113
+ # against time
1114
+ self.one_descriptor_per_arena["first_move"] = 1
1115
+ max_colonies = 0
1116
+ for t in np.arange(self.dims[0]):
1117
+ nb, shapes = cv2.connectedComponents(self.binary[t, :, :])
1118
+ max_colonies = np.max((max_colonies, nb))
1119
+
1120
+ time_descriptor_colony = np.zeros((self.dims[0], len(to_compute_from_sd) * max_colonies * self.dims[0]),
1121
+ dtype=np.float32) # Adjust max_colonies
1122
+ colony_number = 0
1123
+ colony_id_matrix = np.zeros(self.dims[1:], dtype =np.uint64)
1124
+ coord_colonies = []
1125
+ centroids = []
1126
+
1127
+ pat_tracker = PercentAndTimeTracker(self.dims[0], compute_with_elements_number=True)
1128
+ for t in np.arange(self.dims[0]): #21):#
1129
+ # t=0
1130
+ # t+=1
1131
+ # We rank colonies in increasing order to make sure that the larger colony issued from a colony division
1132
+ # keeps the previous colony name.
1133
+ shapes, stats, centers = cc(self.binary[t, :, :])
1134
+
1135
+ # Consider that shapes bellow 3 pixels are noise. The loop will stop at nb and not compute them
1136
+ nb = stats[stats[:, 4] >= 4].shape[0]
1137
+
1138
+ # nb = stats.shape[0]
1139
+ current_percentage, eta = pat_tracker.get_progress(t, element_number=nb)
1140
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}, Colony descriptors computation: {current_percentage}%{eta}")
1141
+
1142
+ updated_colony_names = np.zeros(1, dtype=np.uint32)
1143
+ for colony in (np.arange(nb - 1) + 1): # 120)):# #92
1144
+ # colony = 1
1145
+ # colony+=1
1146
+ # logging.info(f'Colony number {colony}')
1147
+ current_colony_img = (shapes == colony).astype(np.uint8)
1148
+
1149
+ # I/ Find out which names the current colony had at t-1
1150
+ colony_previous_names = np.unique(current_colony_img * colony_id_matrix)
1151
+ colony_previous_names = colony_previous_names[colony_previous_names != 0]
1152
+ # II/ Find out if the current colony name had already been analyzed at t
1153
+ # If there no match with the saved colony_id_matrix, assign colony ID
1154
+ if t == 0 or len(colony_previous_names) == 0:
1155
+ # logging.info("New colony")
1156
+ colony_number += 1
1157
+ colony_names = [colony_number]
1158
+ # If there is at least 1 match with the saved colony_id_matrix, we keep the colony_previous_name(s)
1159
+ else:
1160
+ colony_names = colony_previous_names.tolist()
1161
+ # Handle colony division if necessary
1162
+ if np.any(np.isin(updated_colony_names, colony_names)):
1163
+ colony_number += 1
1164
+ colony_names = [colony_number]
1165
+
1166
+ # Update colony ID matrix for the current frame
1167
+ coords = np.nonzero(current_colony_img)
1168
+ colony_id_matrix[coords[0], coords[1]] = colony_names[0]
1169
+
1170
+ # Add coordinates to coord_colonies
1171
+ time_column = np.full(coords[0].shape, t, dtype=np.uint32)
1172
+ colony_column = np.full(coords[0].shape, colony_names[0], dtype=np.uint32)
1173
+ coord_colonies.append(np.column_stack((time_column, colony_column, coords[0], coords[1])))
1174
+
1175
+ # Calculate centroid and add to centroids list
1176
+ centroid_x, centroid_y = centers[colony, :]
1177
+ centroids.append((t, colony_names[0], centroid_y, centroid_x))
1178
+
1179
+ # Compute shape descriptors
1180
+ SD = ShapeDescriptors(current_colony_img, to_compute_from_sd)
1181
+ descriptors = list(SD.descriptors.values())
1182
+ # Adjust descriptors if output_in_mm is specified
1183
+ if self.vars['output_in_mm']:
1184
+ if 'area' in to_compute_from_sd:
1185
+ descriptors['area'] *= self.vars['average_pixel_size']
1186
+ if 'total_hole_area' in to_compute_from_sd:
1187
+ descriptors['total_hole_area'] *= self.vars['average_pixel_size']
1188
+ if 'perimeter' in to_compute_from_sd:
1189
+ descriptors['perimeter'] *= np.sqrt(self.vars['average_pixel_size'])
1190
+ if 'major_axis_len' in to_compute_from_sd:
1191
+ descriptors['major_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1192
+ if 'minor_axis_len' in to_compute_from_sd:
1193
+ descriptors['minor_axis_len'] *= np.sqrt(self.vars['average_pixel_size'])
1194
+
1195
+ # Store descriptors in time_descriptor_colony
1196
+ descriptor_index = (colony_names[0] - 1) * len(to_compute_from_sd)
1197
+ time_descriptor_colony[t, descriptor_index:(descriptor_index + len(descriptors))] = descriptors
1198
+
1199
+ updated_colony_names = np.append(updated_colony_names, colony_names)
1200
+
1201
+ # Reset colony_id_matrix for the next frame
1202
+ colony_id_matrix *= self.binary[t, :, :]
1203
+
1204
+ coord_colonies = np.vstack(coord_colonies)
1205
+ centroids = np.array(centroids, dtype=np.float32)
1206
+ time_descriptor_colony = time_descriptor_colony[:, :(colony_number*len(to_compute_from_sd))]
1207
+
1208
+ if self.vars['save_coord_specimen']:
1209
+ coord_colonies = pd.DataFrame(coord_colonies, columns=["time", "colony", "y", "x"])
1210
+ coord_colonies.to_csv(f"coord_colonies{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_col{colony_number}_y{self.dims[1]}_x{self.dims[2]}.csv", sep=';', index=False, lineterminator='\n')
1211
+
1212
+ centroids = pd.DataFrame(centroids, columns=["time", "colony", "y", "x"])
1213
+ centroids.to_csv(f"colony_centroids{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_col{colony_number}_y{self.dims[1]}_x{self.dims[2]}.csv", sep=';', index=False, lineterminator='\n')
1214
+
1215
+ # Format the final dataframe to have one row per time frame, and one column per descriptor_colony_name
1216
+ self.one_row_per_frame = pd.DataFrame({'arena': self.one_descriptor_per_arena['arena'], 'time': timings, 'area_total': self.surfarea.astype(np.float64)})
1217
+ if self.vars['output_in_mm']:
1218
+ self.one_row_per_frame['area_total'] *= self.vars['average_pixel_size']
1219
+ column_names = np.char.add(np.repeat(to_compute_from_sd, colony_number),
1220
+ np.tile((np.arange(colony_number) + 1).astype(str), len(to_compute_from_sd)))
1221
+ time_descriptor_colony = pd.DataFrame(time_descriptor_colony, columns=column_names)
1222
+ self.one_row_per_frame = pd.concat([self.one_row_per_frame, time_descriptor_colony], axis=1)
1223
+
1224
+
1225
+ if self.vars['do_fading']:
1226
+ self.one_row_per_frame['newly_explored_area'] = self.newly_explored_area
1227
+ if self.vars['output_in_mm']:
1228
+ self.one_row_per_frame['newly_explored_area'] *= self.vars['average_pixel_size']
1229
+
1230
+ def detect_growth_transitions(self):
1231
+ ##
1232
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena']:
1233
+ self.one_descriptor_per_arena["iso_digi_transi"] = pd.NA
1234
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]):
1235
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting growth transition analysis.")
1236
+
1237
+ # II) Once a pseudopod is deployed, look for a disk/ around the original shape
1238
+ growth_begining = self.surfarea < ((self.surfarea[0] * 1.2) + ((self.dims[1] / 4) * (self.dims[2] / 4)))
1239
+ dilated_origin = cv2.dilate(self.binary[self.one_descriptor_per_arena["first_move"], :, :], kernel=cross_33, iterations=10, borderType=cv2.BORDER_CONSTANT, borderValue=0)
1240
+ isisotropic = np.sum(self.binary[:, :, :] * dilated_origin, (1, 2))
1241
+ isisotropic *= growth_begining
1242
+ # Ask if the dilated origin area is 90% covered during the growth beginning
1243
+ isisotropic = isisotropic > 0.9 * dilated_origin.sum()
1244
+ if np.any(isisotropic):
1245
+ self.one_descriptor_per_arena["is_growth_isotropic"] = 1
1246
+ # Determine a solidity reference to look for a potential breaking of the isotropic growth
1247
+ if self.compute_solidity_separately:
1248
+ solidity_reference = np.mean(self.solidity[:self.one_descriptor_per_arena["first_move"]])
1249
+ different_solidity = self.solidity < (0.9 * solidity_reference)
1250
+ del self.solidity
1251
+ else:
1252
+ solidity_reference = np.mean(
1253
+ self.one_row_per_frame.iloc[:(self.one_descriptor_per_arena["first_move"]), :]["solidity"])
1254
+ different_solidity = self.one_row_per_frame["solidity"].values < (0.9 * solidity_reference)
1255
+ # Make sure that isotropic breaking not occur before isotropic growth
1256
+ if np.any(different_solidity):
1257
+ self.one_descriptor_per_arena["iso_digi_transi"] = np.nonzero(different_solidity)[0][0] * self.time_interval
1258
+ else:
1259
+ self.one_descriptor_per_arena["is_growth_isotropic"] = 0
1260
+ else:
1261
+ self.one_descriptor_per_arena["is_growth_isotropic"] = pd.NA
1262
+
1263
+
1264
+ def check_converted_video_type(self):
1265
+ if self.converted_video.dtype != "uint8":
1266
+ self.converted_video -= np.min(self.converted_video)
1267
+ self.converted_video = np.round((255 * (self.converted_video / np.max(self.converted_video)))).astype(np.uint8)
1268
+
1269
+
1270
+ def networks_detection(self, show_seg=False):
1271
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and not self.vars['several_blob_per_arena'] and (self.vars['save_coord_network'] or self.vars['network_analysis']):
1272
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting network detection.")
1273
+ smooth_segmentation_over_time = True
1274
+ detect_pseudopods = True
1275
+ pseudopod_min_size = 50
1276
+ self.check_converted_video_type()
1277
+ if detect_pseudopods:
1278
+ pseudopod_vid = np.zeros_like(self.binary, dtype=bool)
1279
+ potential_network = np.zeros_like(self.binary, dtype=bool)
1280
+ self.network_dynamics = np.zeros_like(self.binary, dtype=np.uint8)
1281
+ greyscale = self.visu[-1, ...].mean(axis=-1)
1282
+ NetDet = NetworkDetection(greyscale, possibly_filled_pixels=self.binary[-1, ...],
1283
+ origin_to_add=self.origin)
1284
+ NetDet.get_best_network_detection_method()
1285
+ NetDet.change_greyscale(self.visu[-1, ...], c_space_dict=self.vars['convert_for_motion'])
1286
+ lighter_background = NetDet.greyscale_image[self.binary[-1, ...] > 0].mean() < NetDet.greyscale_image[self.binary[-1, ...] == 0].mean()
1287
+
1288
+
1289
+ for t in np.arange(self.one_descriptor_per_arena["first_move"], self.dims[0]): # 20):#
1290
+ greyscale = self.visu[t, ...].mean(axis=-1)
1291
+ NetDet_fast = NetworkDetection(greyscale, possibly_filled_pixels=self.binary[t, ...],
1292
+ origin_to_add=self.origin, best_result=NetDet.best_result)
1293
+ NetDet_fast.detect_network()
1294
+ if detect_pseudopods:
1295
+ NetDet_fast.detect_pseudopods(lighter_background, pseudopod_min_size=pseudopod_min_size)
1296
+ NetDet_fast.merge_network_with_pseudopods()
1297
+ pseudopod_vid[t, ...] = NetDet_fast.pseudopods
1298
+ potential_network[t, ...] = NetDet_fast.complete_network
1299
+ for t in np.arange(self.one_descriptor_per_arena["first_move"], self.dims[0]): # 20):#
1300
+ if smooth_segmentation_over_time:
1301
+ if 2 <= t <= (self.dims[0] - 2):
1302
+ computed_network = potential_network[(t - 2):(t + 3), :, :].sum(axis=0)
1303
+ computed_network[computed_network == 1] = 0
1304
+ computed_network[computed_network > 1] = 1
1305
+ else:
1306
+ if t < 2:
1307
+ computed_network = potential_network[:2, :, :].sum(axis=0)
1308
+ else:
1309
+ computed_network = potential_network[-2:, :, :].sum(axis=0)
1310
+ computed_network[computed_network > 0] = 1
1311
+ else:
1312
+ computed_network = computed_network[t, :, :].copy()
1313
+
1314
+ if self.origin is not None:
1315
+ computed_network = computed_network * (1 - self.origin)
1316
+ origin_contours = get_contours(self.origin)
1317
+ complete_network = np.logical_or(origin_contours, computed_network).astype(np.uint8)
1318
+ complete_network = keep_one_connected_component(complete_network)
1319
+
1320
+ if detect_pseudopods:
1321
+ # Make sure that removing pseudopods do not cut the network:
1322
+ without_pseudopods = complete_network * (1 - pseudopod_vid[t])
1323
+ only_connected_network = keep_one_connected_component(without_pseudopods)
1324
+ # # Option A: To add these cutting regions to the pseudopods do:
1325
+ pseudopods = (1 - only_connected_network) * complete_network
1326
+ pseudopod_vid[t] = pseudopods
1327
+ self.network_dynamics[t] = complete_network
1328
+
1329
+ # # Option B: To add these cutting regions to the network:
1330
+ # # Differentiate pseudopods that cut the network from the 'true ones'
1331
+ # # Dilate pseudopods and restrein them to the
1332
+ # pseudopods = cv2.dilate(pseudopod_vid[t], kernel=Ellipse((15, 15)).create().astype(np.uint8),
1333
+ # iterations=1) * self.binary[t, :, :]
1334
+ # nb, numbered_pseudopods = cv2.connectedComponents(pseudopods)
1335
+ # pseudopods = np.zeros_like(pseudopod_vid[t])
1336
+ # for p_i in range(1, nb + 1):
1337
+ # pseudo_i = numbered_pseudopods == p_i
1338
+ # nb_i, remainings, stats, centro = cv2.connectedComponentsWithStats(
1339
+ # complete_network * (1 - pseudo_i.astype(np.uint8)))
1340
+ # if (stats[:, 4] > pseudopod_min_size).sum() == 2:
1341
+ # pseudopods[pseudo_i] = 1
1342
+ # fragmented = np.nonzero(stats[:, 4] <= pseudopod_min_size)[0]
1343
+ # pseudopods[np.isin(remainings, fragmented)] = 1
1344
+ # pseudopod_vid[t] = pseudopods
1345
+ # complete_network[pseudopods > 0] = 1
1346
+ # self.network_dynamics[t] = complete_network
1347
+
1348
+
1349
+ imtoshow = self.visu[t, ...]
1350
+ eroded_binary = cv2.erode(self.network_dynamics[t, ...], cross_33)
1351
+ net_coord = np.nonzero(self.network_dynamics[t, ...] - eroded_binary)
1352
+ imtoshow[net_coord[0], net_coord[1], :] = (34, 34, 158)
1353
+ if show_seg:
1354
+ cv2.imshow("", cv2.resize(imtoshow, (1000, 1000)))
1355
+ cv2.waitKey(1)
1356
+ else:
1357
+ self.visu[t, ...] = imtoshow
1358
+ if show_seg:
1359
+ cv2.destroyAllWindows()
1360
+
1361
+ network_coord = smallest_memory_array(np.nonzero(self.network_dynamics), "uint")
1362
+
1363
+ if detect_pseudopods:
1364
+ self.network_dynamics[pseudopod_vid > 0] = 2
1365
+ pseudopod_coord = smallest_memory_array(np.nonzero(pseudopod_vid), "uint")
1366
+ if self.vars['save_coord_network']:
1367
+ np.save(
1368
+ f"coord_tubular_network{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1369
+ network_coord)
1370
+
1371
+ if detect_pseudopods:
1372
+ np.save(
1373
+ f"coord_pseudopods{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy",
1374
+ pseudopod_coord)
1375
+
1376
+ def graph_extraction(self):
1377
+ if self.vars['graph_extraction'] and not self.vars['network_analysis'] and not self.vars['save_coord_network']:
1378
+ self.network_dynamics = self.binary
1379
+ _, _, _, origin_centroid = cv2.connectedComponentsWithStats(self.origin)
1380
+ origin_centroid = np.round((origin_centroid[1, 1], origin_centroid[1, 0])).astype(np.uint64)
1381
+ for t in np.arange(self.one_descriptor_per_arena["first_move"], self.dims[0]): # 20):#
1382
+
1383
+
1384
+ if self.origin is not None:
1385
+ computed_network = self.network_dynamics[t, ...] * (1 - self.origin)
1386
+ origin_contours = get_contours(self.origin)
1387
+ computed_network = np.logical_or(origin_contours, computed_network).astype(np.uint8)
1388
+ else:
1389
+ origin_contours = None
1390
+ computed_network = self.network_dynamics[t, ...].astype(np.uint8)
1391
+ computed_network = keep_one_connected_component(computed_network)
1392
+ pad_network, pad_origin = add_padding([computed_network, self.origin])
1393
+ pad_origin_centroid = origin_centroid + 1
1394
+ pad_skeleton, pad_distances, pad_origin_contours = get_skeleton_and_widths(pad_network, pad_origin,
1395
+ pad_origin_centroid)
1396
+ edge_id = EdgeIdentification(pad_skeleton, pad_distances)
1397
+ edge_id.run_edge_identification()
1398
+ if pad_origin_contours is not None:
1399
+ origin_contours = remove_padding([pad_origin_contours])[0]
1400
+ edge_id.make_vertex_table(origin_contours, self.network_dynamics[t, ...] == 2)
1401
+ edge_id.make_edge_table(self.converted_video[:, t])
1402
+
1403
+
1404
+ edge_id.vertex_table = np.hstack((np.repeat(t, edge_id.vertex_table.shape[0])[:, None], edge_id.vertex_table))
1405
+ edge_id.edge_table = np.hstack((np.repeat(t, edge_id.edge_table.shape[0])[:, None], edge_id.edge_table))
1406
+ if t == self.one_descriptor_per_arena["first_move"]:
1407
+ vertex_table = edge_id.vertex_table.copy()
1408
+ edge_table = edge_id.edge_table.copy()
1409
+ else:
1410
+ vertex_table = np.vstack((vertex_table, edge_id.vertex_table))
1411
+ edge_table = np.vstack((edge_table, edge_id.edge_table))
1412
+
1413
+ vertex_table = pd.DataFrame(vertex_table, columns=["t", "y", "x", "vertex_id", "is_tip", "origin",
1414
+ "vertex_connected"])
1415
+ edge_table = pd.DataFrame(edge_table,
1416
+ columns=["t", "edge_id", "vertex1", "vertex2", "length", "average_width", "intensity", "betweenness_centrality"])
1417
+ vertex_table.to_csv(
1418
+ f"vertex_table{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.csv")
1419
+ edge_table.to_csv(
1420
+ f"edge_table{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.csv")
1421
+
1422
+
1423
+ def memory_allocation_for_cytoscillations(self):
1424
+ try:
1425
+ period_in_frame_nb = int(self.vars['expected_oscillation_period'] / self.time_interval)
1426
+ if period_in_frame_nb < 2:
1427
+ period_in_frame_nb = 2
1428
+ necessary_memory = self.converted_video.shape[0] * self.converted_video.shape[1] * \
1429
+ self.converted_video.shape[2] * 64 * 4 * 1.16415e-10
1430
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1431
+ if len(self.converted_video.shape) == 4:
1432
+ self.converted_video = self.converted_video[:, :, :, 0]
1433
+ average_intensities = np.mean(self.converted_video, (1, 2))
1434
+ if self.vars['lose_accuracy_to_save_memory'] or (necessary_memory > available_memory):
1435
+ oscillations_video = np.zeros(self.converted_video.shape, dtype=np.float16)
1436
+ for cy in np.arange(self.converted_video.shape[1]):
1437
+ for cx in np.arange(self.converted_video.shape[2]):
1438
+ oscillations_video[:, cy, cx] = np.round(np.gradient(self.converted_video[:, cy, cx, ...]/average_intensities,
1439
+ period_in_frame_nb), 3).astype(np.float16)
1440
+ else:
1441
+ oscillations_video = np.gradient(self.converted_video/average_intensities, period_in_frame_nb, axis=0)
1442
+ # check if conv change here
1443
+ self.check_converted_video_type()
1444
+ if len(self.converted_video.shape) == 3:
1445
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video), axis=3)
1446
+ oscillations_video = np.sign(oscillations_video)
1447
+ return oscillations_video
1448
+ except Exception as exc:
1449
+ logging.error(f"{exc}. Retrying to allocate for 10 minutes before crashing. ")
1450
+ return None
1451
+
1452
+
1453
+ def study_cytoscillations(self, show_seg):
1454
+ if pd.isna(self.one_descriptor_per_arena["first_move"]):
1455
+ if not self.vars['lose_accuracy_to_save_memory']:
1456
+ self.check_converted_video_type()
1457
+ if self.vars['oscilacyto_analysis']:
1458
+ self.one_row_per_frame['mean_cluster_area'] = pd.NA
1459
+ self.one_row_per_frame['cluster_number'] = pd.NA
1460
+ else:
1461
+ if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
1462
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting oscillation analysis.")
1463
+ oscillations_video = None
1464
+ staring_time = default_timer()
1465
+ current_time = staring_time
1466
+ while oscillations_video is None and (current_time - staring_time) < 600:
1467
+ oscillations_video = self.memory_allocation_for_cytoscillations()
1468
+ if oscillations_video is None:
1469
+ sleep(30)
1470
+ current_time = default_timer()
1471
+
1472
+ within_range = (1 - self.binary[0, :, :]) * self.borders
1473
+ within_range = self.binary * within_range
1474
+ oscillations_video *= within_range
1475
+ del within_range
1476
+ oscillations_video += 1
1477
+ oscillations_video = oscillations_video.astype(np.uint8)
1478
+
1479
+ dotted_image = np.ones(self.converted_video.shape[1:3], np.uint8)
1480
+ for cy in np.arange(dotted_image.shape[0]):
1481
+ if cy % 2 != 0:
1482
+ dotted_image[cy, :] = 0
1483
+ for cx in np.arange(dotted_image.shape[1]):
1484
+ if cx % 2 != 0:
1485
+ dotted_image[:, cx] = 0
1486
+
1487
+ if self.start is None:
1488
+ self.start = 0
1489
+
1490
+ for t in np.arange(self.dims[0]):
1491
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1492
+ contours = self.binary[t, :, :] - eroded_binary
1493
+ contours_idx = np.nonzero(contours)
1494
+ imtoshow = deepcopy(self.converted_video[t, ...])
1495
+ imtoshow[contours_idx[0], contours_idx[1], :] = self.vars['contour_color']
1496
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
1497
+ if self.one_descriptor_per_arena["is_growth_isotropic"] == 1:
1498
+ if t < self.one_descriptor_per_arena["iso_digi_transi"]:
1499
+ imtoshow[contours_idx[0], contours_idx[1], 2] = 255
1500
+ oscillations_image = np.zeros(self.dims[1:], np.uint8)
1501
+ if t >= self.start:
1502
+ # Add in or ef if a pixel has at least 4 neighbor in or ef
1503
+ neigh_comp = CompareNeighborsWithValue(oscillations_video[t, :, :], connectivity=8, data_type=np.int8)
1504
+ neigh_comp.is_inf(1, and_itself=False)
1505
+ neigh_comp.is_sup(1, and_itself=False)
1506
+ # Not verified if influx is really influx (resp efflux)
1507
+ influx = neigh_comp.sup_neighbor_nb
1508
+ efflux = neigh_comp.inf_neighbor_nb
1509
+
1510
+ # Only keep pixels having at least 4 positive (resp. negative) neighbors
1511
+ influx[influx <= 4] = 0
1512
+ efflux[efflux <= 4] = 0
1513
+ influx[influx > 4] = 1
1514
+ efflux[efflux > 4] = 1
1515
+ if np.any(influx) or np.any(efflux):
1516
+ influx, in_stats, in_centroids = cc(influx)
1517
+ efflux, ef_stats, ef_centroids = cc(efflux)
1518
+ # Only keep clusters larger than 'minimal_oscillating_cluster_size' pixels (smaller are considered as noise
1519
+ in_smalls = np.nonzero(in_stats[:, 4] < self.vars['minimal_oscillating_cluster_size'])[0]
1520
+ if len(in_smalls) > 0:
1521
+ influx[np.isin(influx, in_smalls)] = 0
1522
+ in_stats = in_stats[:in_smalls[0], :]
1523
+ in_centroids = in_centroids[:in_smalls[0], :]
1524
+ ef_smalls = np.nonzero(ef_stats[:, 4] < self.vars['minimal_oscillating_cluster_size'])[0]
1525
+ if len(ef_smalls) > 0:
1526
+ efflux[np.isin(efflux, ef_smalls)] = 0
1527
+ ef_stats = ef_stats[:(ef_smalls[0]), :]
1528
+ ef_centroids = ef_centroids[:(ef_smalls[0]), :]
1529
+ in_idx = np.nonzero(influx) # NEW
1530
+ ef_idx = np.nonzero(efflux) # NEW
1531
+ oscillations_image[in_idx[0], in_idx[1]] = 1 # NEW
1532
+ oscillations_image[ef_idx[0], ef_idx[1]] = 2 # NEW
1533
+ # Prepare the image for display
1534
+ influx *= dotted_image
1535
+ efflux *= dotted_image
1536
+ in_idx = np.nonzero(influx)
1537
+ ef_idx = np.nonzero(efflux)
1538
+ imtoshow[in_idx[0], in_idx[1], :2] = 153 # Green: influx, intensity increase
1539
+ imtoshow[in_idx[0], in_idx[1], 2] = 0
1540
+ imtoshow[ef_idx[0], ef_idx[1], 1:] = 0 # Blue: efflux, intensity decrease
1541
+ imtoshow[ef_idx[0], ef_idx[1], 0] = 204
1542
+ oscillations_video[t, :, :] = oscillations_image
1543
+ self.converted_video[t, ...] = deepcopy(imtoshow)
1544
+ if show_seg:
1545
+ im_to_show = cv2.resize(imtoshow, (540, 540))
1546
+ cv2.imshow("shape_motion", im_to_show)
1547
+ cv2.waitKey(1)
1548
+ if show_seg:
1549
+ cv2.destroyAllWindows()
1550
+ if self.vars['save_coord_thickening_slimming']:
1551
+ np.save(f"coord_thickening{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy", smallest_memory_array(np.nonzero(oscillations_video == 1), "uint"))
1552
+ np.save(f"coord_slimming{self.one_descriptor_per_arena['arena']}_t{self.dims[0]}_y{self.dims[1]}_x{self.dims[2]}.npy", smallest_memory_array(np.nonzero(oscillations_video == 2), "uint"))
1553
+
1554
+
1555
+ if self.vars['oscilacyto_analysis']:
1556
+ # To get the median oscillatory period of each oscillating cluster,
1557
+ # we create a dict containing two lists (for influx and efflux)
1558
+ # Each list element correspond to a cluster and stores :
1559
+ # All pixel coordinates of that cluster, their corresponding lifespan, their time of disappearing
1560
+ # Row number will give the size. Euclidean distance between pix coord, the wave distance
1561
+ self.clusters_final_data = np.empty((0, 6),
1562
+ dtype=np.float32) # ["mean_pixel_period", "phase", "total_size", "edge_distance", cy, cx]
1563
+ period_tracking = np.zeros(self.converted_video.shape[1:3], dtype=np.uint32)
1564
+ efflux_study = ClusterFluxStudy(self.converted_video.shape[:3])
1565
+ influx_study = ClusterFluxStudy(self.converted_video.shape[:3])
1566
+
1567
+ if self.start is None:
1568
+ self.start = 0
1569
+ if self.vars['fractal_analysis']:
1570
+ if os.path.exists(f"oscillating_clusters_temporal_dynamics.h5"):
1571
+ remove_h5_key(f"oscillating_clusters_temporal_dynamics.h5",
1572
+ f"arena{self.one_descriptor_per_arena['arena']}")
1573
+ cluster_id_matrix = np.zeros(self.dims[1:], dtype =np.uint64)
1574
+ named_cluster_number = 0
1575
+ mean_cluster_area = np.zeros(oscillations_video.shape[0])
1576
+ pat_tracker = PercentAndTimeTracker(self.dims[0], compute_with_elements_number=True)
1577
+ for t in np.arange(np.max((self.start, self.lost_frames)), self.dims[0]): # np.arange(21): #
1578
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1579
+ contours = self.binary[t, :, :] - eroded_binary
1580
+ oscillations_image = oscillations_video[t, ...]
1581
+ influx = (oscillations_image == 1).astype(np.uint8)
1582
+ efflux = (oscillations_image == 2).astype(np.uint8)
1583
+ in_idx = np.nonzero(influx) # NEW
1584
+ ef_idx = np.nonzero(efflux)
1585
+ influx, in_stats, in_centroids = cc(influx)
1586
+ efflux, ef_stats, ef_centroids = cc(efflux)
1587
+ in_stats = in_stats[1:]
1588
+ in_centroids = in_centroids[1:]
1589
+ ef_stats = ef_stats[1:]
1590
+ ef_centroids = ef_centroids[1:]
1591
+ # Sum the number of connected components minus the background to get the number of clusters
1592
+ oscillating_cluster_number = in_stats.shape[0] + ef_stats.shape[0]
1593
+ updated_cluster_names = [0]
1594
+ if oscillating_cluster_number > 0:
1595
+ current_percentage, eta = pat_tracker.get_progress(t, element_number=oscillating_cluster_number)
1596
+ logging.info(
1597
+ f"Arena n°{self.one_descriptor_per_arena['arena']}, Oscillatory cluster computation: {current_percentage}%{eta}")
1598
+ if self.vars['fractal_analysis']:
1599
+ # New analysis to get the surface dynamic of every oscillatory cluster: Part 2 openning:
1600
+ network_at_t = np.zeros(self.dims[1:], dtype=np.uint8)
1601
+ network_idx = self.network_dynamics[:, self.network_dynamics[0, :] == t]
1602
+ network_at_t[network_idx[1, :], network_idx[2, :]] = 1
1603
+ shapes = np.zeros(self.dims[1:], dtype=np.uint32)
1604
+ shapes[in_idx[0], in_idx[1]] = influx[in_idx[0], in_idx[1]]
1605
+ max_in = in_stats.shape[0]
1606
+ shapes[ef_idx[0], ef_idx[1]] = max_in + efflux[ef_idx[0], ef_idx[1]]
1607
+ centers = np.vstack((in_centroids, ef_centroids))
1608
+ cluster_dynamic = np.zeros((int(oscillating_cluster_number) - 1, 13), dtype=np.float64)
1609
+ for clust_i in np.arange(oscillating_cluster_number - 1, dtype=np.uint32): # 120)):# #92
1610
+ cluster = clust_i + 1
1611
+ # cluster = 1
1612
+ # print(cluster)
1613
+ current_cluster_img = (shapes == cluster).astype(np.uint8)
1614
+ # I/ Find out which names the current cluster had at t-1
1615
+ cluster_previous_names = np.unique(current_cluster_img * cluster_id_matrix)
1616
+ cluster_previous_names = cluster_previous_names[cluster_previous_names != 0]
1617
+ # II/ Find out if the current cluster name had already been analyzed at t
1618
+ # If there no match with the saved cluster_id_matrix, assign cluster ID
1619
+ if t == 0 or len(cluster_previous_names) == 0:
1620
+ # logging.info("New cluster")
1621
+ named_cluster_number += 1
1622
+ cluster_names = [named_cluster_number]
1623
+ # If there is at least 1 match with the saved cluster_id_matrix, we keep the cluster_previous_name(s)
1624
+ else:
1625
+ cluster_names = cluster_previous_names.tolist()
1626
+ # Handle cluster division if necessary
1627
+ if np.any(np.isin(updated_cluster_names, cluster_names)):
1628
+ named_cluster_number += 1
1629
+ cluster_names = [named_cluster_number]
1630
+
1631
+ # Get flow direction:
1632
+ if np.unique(oscillations_image * current_cluster_img)[1] == 1:
1633
+ flow = 1
1634
+ else:
1635
+ flow = - 1
1636
+ # Update cluster ID matrix for the current frame
1637
+ coords = np.nonzero(current_cluster_img)
1638
+ cluster_id_matrix[coords[0], coords[1]] = cluster_names[0]
1639
+
1640
+ # Save the current cluster areas:
1641
+ inner_network = current_cluster_img * network_at_t
1642
+ inner_network_area = inner_network.sum()
1643
+ zoomed_binary, side_lengths = prepare_box_counting(current_cluster_img,
1644
+ side_threshold=self.vars[
1645
+ 'fractal_box_side_threshold'],
1646
+ zoom_step=self.vars[
1647
+ 'fractal_zoom_step'],
1648
+ contours=True)
1649
+ box_count_dim, r_value, box_nb = box_counting_dimension(zoomed_binary, side_lengths)
1650
+
1651
+ if np.any(inner_network):
1652
+ zoomed_binary, side_lengths = prepare_box_counting(inner_network,
1653
+ side_threshold=self.vars[
1654
+ 'fractal_box_side_threshold'],
1655
+ zoom_step=self.vars[
1656
+ 'fractal_zoom_step'],
1657
+ contours=False)
1658
+ inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb = box_counting_dimension(
1659
+ zoomed_binary, side_lengths)
1660
+ else:
1661
+ inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb = 0, 0, 0
1662
+ # Calculate centroid and add to centroids list
1663
+ centroid_x, centroid_y = centers[cluster, :]
1664
+ if self.vars['output_in_mm']:
1665
+ cluster_dynamic[clust_i, :] = np.array(
1666
+ (t * self.time_interval, cluster_names[0], flow, centroid_y, centroid_x,
1667
+ current_cluster_img.sum() * self.vars['average_pixel_size'],
1668
+ inner_network_area * self.vars['average_pixel_size'], box_count_dim, r_value,
1669
+ box_nb, inner_network_box_count_dim, inner_net_r_value, inner_net_box_nb),
1670
+ dtype=np.float64)
1671
+ else:
1672
+ cluster_dynamic[clust_i, :] = np.array((t, cluster_names[0], flow, centroid_y,
1673
+ centroid_x, current_cluster_img.sum(),
1674
+ inner_network_area, box_count_dim, r_value,
1675
+ box_nb, inner_network_box_count_dim,
1676
+ inner_net_r_value, inner_net_box_nb),
1677
+ dtype=np.float64)
1678
+
1679
+ updated_cluster_names = np.append(updated_cluster_names, cluster_names)
1680
+ vstack_h5_array(f"oscillating_clusters_temporal_dynamics.h5",
1681
+ cluster_dynamic, key=f"arena{self.one_descriptor_per_arena['arena']}")
1682
+
1683
+ # Reset cluster_id_matrix for the next frame
1684
+ cluster_id_matrix *= self.binary[t, :, :]
1685
+
1686
+ period_tracking, self.clusters_final_data = efflux_study.update_flux(t, contours, efflux,
1687
+ period_tracking,
1688
+ self.clusters_final_data)
1689
+ period_tracking, self.clusters_final_data = influx_study.update_flux(t, contours, influx,
1690
+ period_tracking,
1691
+ self.clusters_final_data)
1692
+
1693
+ mean_cluster_area[t] = np.mean(np.concatenate((in_stats[:, 4], ef_stats[:, 4])))
1694
+ if self.vars['output_in_mm']:
1695
+ self.clusters_final_data[:, 1] *= self.time_interval # phase
1696
+ self.clusters_final_data[:, 2] *= self.vars['average_pixel_size'] # size
1697
+ self.clusters_final_data[:, 3] *= np.sqrt(self.vars['average_pixel_size']) # distance
1698
+ self.one_row_per_frame['mean_cluster_area'] = mean_cluster_area * self.vars['average_pixel_size']
1699
+ self.one_row_per_frame['cluster_number'] = named_cluster_number
1700
+
1701
+ del oscillations_video
1702
+
1703
+
1704
+ def fractal_descriptions(self):
1705
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['fractal_analysis']:
1706
+ logging.info(f"Arena n°{self.one_descriptor_per_arena['arena']}. Starting fractal analysis.")
1707
+
1708
+ if self.vars['network_analysis']:
1709
+ box_counting_dimensions = np.zeros((self.dims[0], 7), dtype=np.float64)
1710
+ else:
1711
+ box_counting_dimensions = np.zeros((self.dims[0], 3), dtype=np.float64)
1712
+
1713
+ for t in np.arange(self.dims[0]):
1714
+ if self.vars['network_analysis']:
1715
+ box_counting_dimensions[t, 0] = self.network_dynamics[t, ...].sum()
1716
+ zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...], side_threshold=self.vars[
1717
+ 'fractal_box_side_threshold'], zoom_step=self.vars['fractal_zoom_step'], contours=True)
1718
+ box_counting_dimensions[t, 1], box_counting_dimensions[t, 2], box_counting_dimensions[
1719
+ t, 3] = box_counting_dimension(zoomed_binary, side_lengths)
1720
+ zoomed_binary, side_lengths = prepare_box_counting(self.network_dynamics[t, ...],
1721
+ side_threshold=self.vars[
1722
+ 'fractal_box_side_threshold'],
1723
+ zoom_step=self.vars['fractal_zoom_step'],
1724
+ contours=False)
1725
+ box_counting_dimensions[t, 4], box_counting_dimensions[t, 5], box_counting_dimensions[
1726
+ t, 6] = box_counting_dimension(zoomed_binary, side_lengths)
1727
+ else:
1728
+ zoomed_binary, side_lengths = prepare_box_counting(self.binary[t, ...],
1729
+ side_threshold=self.vars['fractal_box_side_threshold'],
1730
+ zoom_step=self.vars['fractal_zoom_step'], contours=True)
1731
+ box_counting_dimensions[t, :] = box_counting_dimension(zoomed_binary, side_lengths)
1732
+
1733
+ if self.vars['network_analysis']:
1734
+ self.one_row_per_frame["inner_network_size"] = box_counting_dimensions[:, 0]
1735
+ self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 1]
1736
+ self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
1737
+ self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 3]
1738
+ self.one_row_per_frame["inner_network_fractal_dimension"] = box_counting_dimensions[:, 4]
1739
+ self.one_row_per_frame["inner_network_fractal_r_value"] = box_counting_dimensions[:, 5]
1740
+ self.one_row_per_frame["inner_network_fractal_box_nb"] = box_counting_dimensions[:, 6]
1741
+ if self.vars['output_in_mm']:
1742
+ self.one_row_per_frame["inner_network_size"] *= self.vars['average_pixel_size']
1743
+ else:
1744
+ self.one_row_per_frame["fractal_dimension"] = box_counting_dimensions[:, 0]
1745
+ self.one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[:, 1]
1746
+ self.one_row_per_frame["fractal_r_value"] = box_counting_dimensions[:, 2]
1747
+
1748
+ if self.vars['network_analysis'] or self.vars['save_coord_network']:
1749
+ del self.network_dynamics
1750
+
1751
+ def get_descriptors_summary(self):
1752
+ potential_descriptors = ["area", "perimeter", "circularity", "rectangularity", "total_hole_area", "solidity",
1753
+ "convexity", "eccentricity", "euler_number", "standard_deviation_y",
1754
+ "standard_deviation_x", "skewness_y", "skewness_x", "kurtosis_y", "kurtosis_x",
1755
+ "major_axis_len", "minor_axis_len", "axes_orientation"]
1756
+
1757
+ self.one_descriptor_per_arena["final_area"] = self.binary[-1, :, :].sum()
1758
+
1759
+ def save_efficiency_tests(self):
1760
+ # Provide images allowing to assess the analysis efficiency
1761
+ if self.dims[0] > 1:
1762
+ after_one_tenth_of_time = np.ceil(self.dims[0] / 10).astype(np.uint64)
1763
+ else:
1764
+ after_one_tenth_of_time = 0
1765
+
1766
+ last_good_detection = self.dims[0] - 1
1767
+ if self.dims[0] > self.lost_frames:
1768
+ if self.vars['do_threshold_segmentation']:
1769
+ last_good_detection -= self.lost_frames
1770
+ else:
1771
+ last_good_detection = 0
1772
+ if self.visu is None:
1773
+ if len(self.converted_video.shape) == 3:
1774
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
1775
+ axis=3)
1776
+ self.efficiency_test_1 = deepcopy(self.converted_video[after_one_tenth_of_time, ...])
1777
+ self.efficiency_test_2 = deepcopy(self.converted_video[last_good_detection, ...])
1778
+ else:
1779
+ self.efficiency_test_1 = deepcopy(self.visu[after_one_tenth_of_time, :, :, :])
1780
+ self.efficiency_test_2 = deepcopy(self.visu[last_good_detection, :, :, :])
1781
+
1782
+ position = (25, self.dims[1] // 2)
1783
+ text = str(self.one_descriptor_per_arena['arena'])
1784
+ eroded_binary = cv2.erode(self.binary[after_one_tenth_of_time, :, :], cross_33)
1785
+ contours = np.nonzero(self.binary[after_one_tenth_of_time, :, :] - eroded_binary)
1786
+ self.efficiency_test_1[contours[0], contours[1], :] = self.vars['contour_color']
1787
+ self.efficiency_test_1 = cv2.putText(self.efficiency_test_1, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
1788
+ (self.vars["contour_color"], self.vars["contour_color"],
1789
+ self.vars["contour_color"], 255), 3)
1790
+
1791
+ eroded_binary = cv2.erode(self.binary[last_good_detection, :, :], cross_33)
1792
+ contours = np.nonzero(self.binary[last_good_detection, :, :] - eroded_binary)
1793
+ self.efficiency_test_2[contours[0], contours[1], :] = self.vars['contour_color']
1794
+ self.efficiency_test_2 = cv2.putText(self.efficiency_test_2, text, position, cv2.FONT_HERSHEY_SIMPLEX, 1,
1795
+ (self.vars["contour_color"], self.vars["contour_color"],
1796
+ self.vars["contour_color"], 255), 3)
1797
+
1798
+ def save_video(self):
1799
+
1800
+ if self.vars['save_processed_videos']:
1801
+ self.check_converted_video_type()
1802
+ if len(self.converted_video.shape) == 3:
1803
+ self.converted_video = np.stack((self.converted_video, self.converted_video, self.converted_video),
1804
+ axis=3)
1805
+ for t in np.arange(self.dims[0]):
1806
+
1807
+ eroded_binary = cv2.erode(self.binary[t, :, :], cross_33)
1808
+ contours = np.nonzero(self.binary[t, :, :] - eroded_binary)
1809
+ self.converted_video[t, contours[0], contours[1], :] = self.vars['contour_color']
1810
+ if "iso_digi_transi" in self.one_descriptor_per_arena.keys():
1811
+ if self.vars['iso_digi_analysis'] and not self.vars['several_blob_per_arena'] and not pd.isna(self.one_descriptor_per_arena["iso_digi_transi"]):
1812
+ if self.one_descriptor_per_arena["is_growth_isotropic"] == 1:
1813
+ if t < self.one_descriptor_per_arena["iso_digi_transi"]:
1814
+ self.converted_video[t, contours[0], contours[1], :] = 0, 0, 255
1815
+ del self.binary
1816
+ del self.surfarea
1817
+ del self.borders
1818
+ del self.origin
1819
+ del self.origin_idx
1820
+ del self.mean_intensity_per_frame
1821
+ del self.erodila_disk
1822
+ collect()
1823
+ if self.visu is None:
1824
+ true_frame_width = self.dims[2]
1825
+ if len(self.vars['background_list']) == 0:
1826
+ self.background = None
1827
+ else:
1828
+ self.background = self.vars['background_list'][self.one_descriptor_per_arena['arena'] - 1]
1829
+ self.visu = video2numpy(f"ind_{self.one_descriptor_per_arena['arena']}.npy", None, self.background, true_frame_width)
1830
+ if len(self.visu.shape) == 3:
1831
+ self.visu = np.stack((self.visu, self.visu, self.visu), axis=3)
1832
+ self.converted_video = np.concatenate((self.visu, self.converted_video), axis=2)
1833
+ # self.visu = None
1834
+
1835
+ if np.any(self.one_row_per_frame['time'] > 0):
1836
+ position = (5, self.dims[1] - 5)
1837
+ for t in np.arange(self.dims[0]):
1838
+ image = self.converted_video[t, ...]
1839
+ text = str(self.one_row_per_frame['time'][t]) + " min"
1840
+ image = cv2.putText(image, # numpy array on which text is written
1841
+ text, # text
1842
+ position, # position at which writing has to start
1843
+ cv2.FONT_HERSHEY_SIMPLEX, # font family
1844
+ 1, # font size
1845
+ (self.vars["contour_color"], self.vars["contour_color"], self.vars["contour_color"], 255), #(209, 80, 0, 255),
1846
+ 2) # font stroke
1847
+ self.converted_video[t, ...] = image
1848
+ vid_name = f"ind_{self.one_descriptor_per_arena['arena']}{self.vars['videos_extension']}"
1849
+ write_video(self.converted_video, vid_name, is_color=True, fps=self.vars['video_fps'])
1850
+ # self.converted_video = None
1851
+
1852
+ def save_results(self):
1853
+ self.save_efficiency_tests()
1854
+ self.save_video()
1855
+ if self.vars['several_blob_per_arena']:
1856
+ try:
1857
+ with open(f"one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv", 'w') as file:
1858
+ self.one_row_per_frame.to_csv(file, sep=';', index=False, lineterminator='\n')
1859
+ except PermissionError:
1860
+ logging.error(f"Never let one_row_per_frame_arena{self.one_descriptor_per_arena['arena']}.csv open when Cellects runs")
1861
+
1862
+ create_new_csv: bool = False
1863
+ if os.path.isfile("one_row_per_arena.csv"):
1864
+ try:
1865
+ with open(f"one_row_per_arena.csv", 'r') as file:
1866
+ stats = pd.read_csv(file, header=0, sep=";")
1867
+ except PermissionError:
1868
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1869
+
1870
+ if len(self.one_descriptor_per_arena) == len(stats.columns) - 1:
1871
+ try:
1872
+ with open(f"one_row_per_arena.csv", 'w') as file:
1873
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), 1:] = self.one_descriptor_per_arena.values()
1874
+ # if len(self.vars['analyzed_individuals']) == 1:
1875
+ # stats = pd.DataFrame(self.one_descriptor_per_arena, index=[0])
1876
+ # else:
1877
+ # stats = pd.DataFrame.from_dict(self.one_descriptor_per_arena)
1878
+ # stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
1879
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1880
+ except PermissionError:
1881
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1882
+ else:
1883
+ create_new_csv = True
1884
+ else:
1885
+ create_new_csv = True
1886
+ if create_new_csv:
1887
+ with open(f"one_row_per_arena.csv", 'w') as file:
1888
+ stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
1889
+ columns=list(self.one_descriptor_per_arena.keys()))
1890
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
1891
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1892
+ if not self.vars['keep_unaltered_videos'] and os.path.isfile(f"ind_{self.one_descriptor_per_arena['arena']}.npy"):
1893
+ os.remove(f"ind_{self.one_descriptor_per_arena['arena']}.npy")
1894
+
1895
+ def change_results_of_one_arena(self):
1896
+ self.save_video()
1897
+ # I/ Update/Create one_row_per_arena.csv
1898
+ create_new_csv: bool = False
1899
+ if os.path.isfile("one_row_per_arena.csv"):
1900
+ try:
1901
+ with open(f"one_row_per_arena.csv", 'r') as file:
1902
+ stats = pd.read_csv(file, header=0, sep=";")
1903
+ for stat_name, stat_value in self.one_descriptor_per_arena.items():
1904
+ if stat_name in stats.columns:
1905
+ stats.loc[(self.one_descriptor_per_arena['arena'] - 1), stat_name] = np.uint32(self.one_descriptor_per_arena[stat_name])
1906
+ with open(f"one_row_per_arena.csv", 'w') as file:
1907
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1908
+ except PermissionError:
1909
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1910
+ except Exception as e:
1911
+ logging.error(f"{e}")
1912
+ create_new_csv = True
1913
+ # if len(self.one_descriptor_per_arena) == len(stats.columns):
1914
+ # try:
1915
+ # with open(f"one_row_per_arena.csv", 'w') as file:
1916
+ # stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = self.one_descriptor_per_arena.values()
1917
+ # # stats.to_csv("stats.csv", sep=';', index=False, lineterminator='\n')
1918
+ # stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1919
+ # except PermissionError:
1920
+ # logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1921
+ # else:
1922
+ # create_new_csv = True
1923
+ else:
1924
+ create_new_csv = True
1925
+ if create_new_csv:
1926
+ logging.info("Create a new one_row_per_arena.csv file")
1927
+ try:
1928
+ with open(f"one_row_per_arena.csv", 'w') as file:
1929
+ stats = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(self.one_descriptor_per_arena))),
1930
+ columns=list(self.one_descriptor_per_arena.keys()))
1931
+ stats.iloc[(self.one_descriptor_per_arena['arena'] - 1), :] = np.array(list(self.one_descriptor_per_arena.values()), dtype=np.uint32)
1932
+ stats.to_csv(file, sep=';', index=False, lineterminator='\n')
1933
+ except PermissionError:
1934
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1935
+
1936
+ # II/ Update/Create one_row_per_frame.csv
1937
+ create_new_csv = False
1938
+ if os.path.isfile("one_row_per_frame.csv"):
1939
+ try:
1940
+ with open(f"one_row_per_frame.csv", 'r') as file:
1941
+ descriptors = pd.read_csv(file, header=0, sep=";")
1942
+ for stat_name, stat_value in self.one_row_per_frame.items():
1943
+ if stat_name in descriptors.columns:
1944
+ descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0] - 1), stat_name] = self.one_row_per_frame.loc[:, stat_name].values[:]
1945
+ with open(f"one_row_per_frame.csv", 'w') as file:
1946
+ descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1947
+ # with open(f"one_row_per_frame.csv", 'w') as file:
1948
+ # for descriptor in descriptors.keys():
1949
+ # descriptors.loc[
1950
+ # ((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]),
1951
+ # descriptor] = self.one_row_per_frame[descriptor]
1952
+ # descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1953
+
1954
+
1955
+
1956
+ # if len(self.one_row_per_frame.columns) == len(descriptors.columns):
1957
+ # with open(f"one_row_per_frame.csv", 'w') as file:
1958
+ # # NEW
1959
+ # for descriptor in descriptors.keys():
1960
+ # descriptors.loc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), descriptor] = self.one_row_per_frame[descriptor]
1961
+ # # Old
1962
+ # # descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
1963
+ # descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1964
+ # else:
1965
+ # create_new_csv = True
1966
+ except PermissionError:
1967
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1968
+ except Exception as e:
1969
+ logging.error(f"{e}")
1970
+ create_new_csv = True
1971
+ else:
1972
+ create_new_csv = True
1973
+ if create_new_csv:
1974
+ logging.info("Create a new one_row_per_frame.csv file")
1975
+ try:
1976
+ with open(f"one_row_per_frame.csv", 'w') as file:
1977
+ descriptors = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']) * self.dims[0], len(self.one_row_per_frame.columns))),
1978
+ columns=list(self.one_row_per_frame.keys()))
1979
+ descriptors.iloc[((self.one_descriptor_per_arena['arena'] - 1) * self.dims[0]):((self.one_descriptor_per_arena['arena']) * self.dims[0]), :] = self.one_row_per_frame
1980
+ descriptors.to_csv(file, sep=';', index=False, lineterminator='\n')
1981
+ except PermissionError:
1982
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1983
+
1984
+ # III/ Update/Create one_row_per_oscillating_cluster.csv
1985
+ if not pd.isna(self.one_descriptor_per_arena["first_move"]) and self.vars['oscilacyto_analysis']:
1986
+ oscil_i = pd.DataFrame(
1987
+ np.c_[np.repeat(self.one_descriptor_per_arena['arena'], self.clusters_final_data.shape[0]), self.clusters_final_data],
1988
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1989
+ if os.path.isfile("one_row_per_oscillating_cluster.csv"):
1990
+ try:
1991
+ with open(f"one_row_per_oscillating_cluster.csv", 'r') as file:
1992
+ one_row_per_oscillating_cluster = pd.read_csv(file, header=0, sep=";")
1993
+ with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
1994
+ one_row_per_oscillating_cluster_before = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] < self.one_descriptor_per_arena['arena']]
1995
+ one_row_per_oscillating_cluster_after = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] > self.one_descriptor_per_arena['arena']]
1996
+ one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster_before, oscil_i, one_row_per_oscillating_cluster_after))
1997
+ one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
1998
+
1999
+ # one_row_per_oscillating_cluster = one_row_per_oscillating_cluster[one_row_per_oscillating_cluster['arena'] != self.one_descriptor_per_arena['arena']]
2000
+ # one_row_per_oscillating_cluster = pd.concat((one_row_per_oscillating_cluster, oscil_i))
2001
+ # one_row_per_oscillating_cluster.to_csv(file, sep=';', index=False, lineterminator='\n')
2002
+ except PermissionError:
2003
+ logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
2004
+ else:
2005
+ try:
2006
+ with open(f"one_row_per_oscillating_cluster.csv", 'w') as file:
2007
+ oscil_i.to_csv(file, sep=';', index=False, lineterminator='\n')
2008
+ except PermissionError:
2009
+ logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
2010
+