cellects 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +155 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +31 -0
  7. cellects/core/cellects_threads.py +1451 -0
  8. cellects/core/motion_analysis.py +2010 -0
  9. cellects/core/one_image_analysis.py +1061 -0
  10. cellects/core/one_video_per_blob.py +540 -0
  11. cellects/core/program_organizer.py +1316 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +790 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +2066 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/image_segmentation.py +706 -0
  29. cellects/image_analysis/morphological_operations.py +1635 -0
  30. cellects/image_analysis/network_functions.py +1757 -0
  31. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  32. cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
  33. cellects/image_analysis/shape_descriptors.py +1016 -0
  34. cellects/utils/__init__.py +0 -0
  35. cellects/utils/decorators.py +14 -0
  36. cellects/utils/formulas.py +637 -0
  37. cellects/utils/load_display_save.py +1054 -0
  38. cellects/utils/utilitarian.py +490 -0
  39. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  40. cellects-0.1.2.dist-info/METADATA +132 -0
  41. cellects-0.1.2.dist-info/RECORD +44 -0
  42. cellects-0.1.2.dist-info/WHEEL +5 -0
  43. cellects-0.1.2.dist-info/entry_points.txt +2 -0
  44. cellects-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1451 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Cellects graphical user interface interacts with computational scripts through threads.
4
+ Especially, each thread calls one or several methods of the class named "program_organizer",
5
+ which regroup all available computation of the software.
6
+ These threads are started from a children of WindowType, run methods from program_organizer and send messages and
7
+ results to the corresponding children of WindowType, allowing, for instance, to display a result in the interface.
8
+ """
9
+
10
+ import logging
11
+ import weakref
12
+ from multiprocessing import Queue, Process, Manager
13
+ import os
14
+ import time
15
+ from glob import glob
16
+ from timeit import default_timer
17
+ from copy import deepcopy
18
+ import cv2
19
+ from numba.typed import Dict as TDict
20
+ import numpy as np
21
+ import pandas as pd
22
+ from PySide6 import QtCore
23
+ from cellects.image_analysis.morphological_operations import cross_33, Ellipse
24
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination, apply_filter
25
+ from cellects.utils.load_display_save import read_and_rotate
26
+ from cellects.utils.formulas import bracket_to_uint8_image_contrast
27
+ from cellects.utils.utilitarian import PercentAndTimeTracker, reduce_path_len
28
+ from cellects.core.one_video_per_blob import OneVideoPerBlob
29
+ from cellects.utils.load_display_save import write_video
30
+ from cellects.core.motion_analysis import MotionAnalysis
31
+
32
+
33
+ class LoadDataToRunCellectsQuicklyThread(QtCore.QThread):
34
+ message_from_thread = QtCore.Signal(str)
35
+
36
+ def __init__(self, parent=None):
37
+ super(LoadDataToRunCellectsQuicklyThread, self).__init__(parent)
38
+ self.setParent(parent)
39
+
40
+ def run(self):
41
+ self.parent().po.look_for_data()
42
+ self.parent().po.load_data_to_run_cellects_quickly()
43
+ if self.parent().po.first_exp_ready_to_run:
44
+ self.message_from_thread.emit("Data found, Video tracking window and Run all directly are available")
45
+ else:
46
+ self.message_from_thread.emit("")
47
+
48
+
49
+ class LookForDataThreadInFirstW(QtCore.QThread):
50
+ def __init__(self, parent=None):
51
+ super(LookForDataThreadInFirstW, self).__init__(parent)
52
+ self.setParent(parent)
53
+
54
+ def run(self):
55
+ self.parent().po.look_for_data()
56
+
57
+
58
+ class LoadFirstFolderIfSeveralThread(QtCore.QThread):
59
+ message_when_thread_finished = QtCore.Signal(bool)
60
+ def __init__(self, parent=None):
61
+ super(LoadFirstFolderIfSeveralThread, self).__init__(parent)
62
+ self.setParent(parent)
63
+
64
+ def run(self):
65
+ self.parent().po.load_data_to_run_cellects_quickly()
66
+ if not self.parent().po.first_exp_ready_to_run:
67
+ self.parent().po.get_first_image()
68
+ self.message_when_thread_finished.emit(self.parent().po.first_exp_ready_to_run)
69
+
70
+
71
+ class GetFirstImThread(QtCore.QThread):
72
+ message_when_thread_finished = QtCore.Signal(bool)
73
+ def __init__(self, parent=None):
74
+ """
75
+ This class read the first image of the (first of the) selected analysis.
76
+ According to the first_detection_frame value,it can be another image
77
+ If this is the first time a first image is read, it also gather the following variables:
78
+ - img_number
79
+ - dims (video dimensions: time, y, x)
80
+ - raw_images (whether images are in a raw format)
81
+ If the selected analysis contains videos instead of images, it opens the first video
82
+ and read the first_detection_frame th image.
83
+ :param parent: An object containing all necessary variables.
84
+ """
85
+ super(GetFirstImThread, self).__init__(parent)
86
+ self.setParent(parent)
87
+
88
+ def run(self):
89
+ self.parent().po.get_first_image()
90
+ self.message_when_thread_finished.emit(True)
91
+
92
+
93
+ class GetLastImThread(QtCore.QThread):
94
+ def __init__(self, parent=None):
95
+ super(GetLastImThread, self).__init__(parent)
96
+ self.setParent(parent)
97
+
98
+ def run(self):
99
+ self.parent().po.get_last_image()
100
+
101
+
102
+ class UpdateImageThread(QtCore.QThread):
103
+ message_when_thread_finished = QtCore.Signal(bool)
104
+
105
+ def __init__(self, parent=None):
106
+ super(UpdateImageThread, self).__init__(parent)
107
+ self.setParent(parent)
108
+
109
+ def run(self):
110
+ # I/ If this thread runs from user input, get the right coordinates
111
+ # and convert them to fit the displayed image size
112
+ user_input = len(self.parent().imageanalysiswindow.saved_coord) > 0 or len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0
113
+ if user_input:
114
+ if len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0:
115
+ idx = self.parent().imageanalysiswindow.temporary_mask_coord
116
+ else:
117
+ idx = self.parent().imageanalysiswindow.saved_coord
118
+ if len(idx) < 2:
119
+ user_input = False
120
+ else:
121
+ # Convert coordinates:
122
+ self.parent().imageanalysiswindow.display_image.update_image_scaling_factors()
123
+ sf = self.parent().imageanalysiswindow.display_image.scaling_factors
124
+ idx = np.array(((np.round(idx[0][0] * sf[0]), np.round(idx[0][1] * sf[1])), (np.round(idx[1][0] * sf[0]), np.round(idx[1][1] * sf[1]))), dtype=np.int64)
125
+ min_y = np.min(idx[:, 0])
126
+ max_y = np.max(idx[:, 0])
127
+ min_x = np.min(idx[:, 1])
128
+ max_x = np.max(idx[:, 1])
129
+ if max_y > self.parent().imageanalysiswindow.drawn_image.shape[0]:
130
+ max_y = self.parent().imageanalysiswindow.drawn_image.shape[0] - 1
131
+ if max_x > self.parent().imageanalysiswindow.drawn_image.shape[1]:
132
+ max_x = self.parent().imageanalysiswindow.drawn_image.shape[1] - 1
133
+ if min_y < 0:
134
+ min_y = 0
135
+ if min_x < 0:
136
+ min_x = 0
137
+
138
+ if len(self.parent().imageanalysiswindow.temporary_mask_coord) == 0:
139
+ # not_load
140
+ # II/ If this thread aims at saving the last user input and displaying all user inputs:
141
+ # Update the drawn_image according to every saved masks
142
+ # 1) The segmentation mask
143
+ # 2) The back_mask and bio_mask
144
+ # 3) The automatically detected video contours
145
+ # (re-)Initialize drawn image
146
+ self.parent().imageanalysiswindow.drawn_image = deepcopy(self.parent().po.current_image)
147
+ if self.parent().imageanalysiswindow.drawn_image.size < 1000000:
148
+ contour_width = 3
149
+ else:
150
+ contour_width = 6
151
+ # 1) The segmentation mask
152
+ logging.info('Add the segmentation mask to the image')
153
+ if self.parent().imageanalysiswindow.is_first_image_flag:
154
+ im_combinations = self.parent().po.first_image.im_combinations
155
+ im_mean = self.parent().po.first_image.image.mean()
156
+ else:
157
+ im_combinations = self.parent().po.last_image.im_combinations
158
+ im_mean = self.parent().po.last_image.bgr.mean()
159
+ # If there are image combinations, get the current corresponding binary image
160
+ if im_combinations is not None and len(im_combinations) != 0:
161
+ binary_idx = im_combinations[self.parent().po.current_combination_id]["binary_image"]
162
+ # If it concerns the last image, only keep the contour coordinates
163
+
164
+ cv2.eroded_binary = cv2.erode(binary_idx, cross_33)
165
+ binary_idx = binary_idx - cv2.eroded_binary
166
+ binary_idx = cv2.dilate(binary_idx, kernel=cross_33, iterations=contour_width)
167
+ binary_idx = np.nonzero(binary_idx)
168
+ # Color these coordinates in magenta on bright images, and in pink on dark images
169
+ if im_mean > 126:
170
+ # logging.info('Color the segmentation mask in magenta')
171
+ self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((20, 0, 150), dtype=np.uint8)
172
+ else:
173
+ # logging.info('Color the segmentation mask in pink')
174
+ self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((94, 0, 213), dtype=np.uint8)
175
+ if user_input:# save
176
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
177
+ if self.parent().imageanalysiswindow.back1_bio2 == 0:
178
+ logging.info("Save the user drawn mask of the current arena")
179
+ if self.parent().po.vars['arena_shape'] == 'circle':
180
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create().astype(np.uint8)
181
+ mask[min_y:max_y, min_x:max_x, ...] = ellipse
182
+ else:
183
+ mask[min_y:max_y, min_x:max_x] = 1
184
+ else:
185
+ logging.info("Save the user drawn mask of Cell or Back")
186
+
187
+ if self.parent().imageanalysiswindow.back1_bio2 == 2:
188
+ if self.parent().po.all['starting_blob_shape'] == 'circle':
189
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create().astype(np.uint8)
190
+ mask[min_y:max_y, min_x:max_x, ...] = ellipse
191
+ else:
192
+ mask[min_y:max_y, min_x:max_x] = 1
193
+ else:
194
+ mask[min_y:max_y, min_x:max_x] = 1
195
+ mask = np.nonzero(mask)
196
+
197
+ if self.parent().imageanalysiswindow.back1_bio2 == 1:
198
+ self.parent().imageanalysiswindow.back_masks_number += 1
199
+ self.parent().imageanalysiswindow.back_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_back_names[0]
200
+ elif self.parent().imageanalysiswindow.back1_bio2 == 2:
201
+ self.parent().imageanalysiswindow.bio_masks_number += 1
202
+ self.parent().imageanalysiswindow.bio_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_bio_names[0]
203
+ elif self.parent().imageanalysiswindow.manual_delineation_flag:
204
+ self.parent().imageanalysiswindow.arena_masks_number += 1
205
+ self.parent().imageanalysiswindow.arena_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_arena_names[0]
206
+ # 2)a) Apply all these masks to the drawn image:
207
+
208
+ back_coord = np.nonzero(self.parent().imageanalysiswindow.back_mask)
209
+
210
+ bio_coord = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
211
+
212
+ if self.parent().imageanalysiswindow.arena_mask is not None:
213
+ arena_coord = np.nonzero(self.parent().imageanalysiswindow.arena_mask)
214
+ self.parent().imageanalysiswindow.drawn_image[arena_coord[0], arena_coord[1], :] = np.repeat(self.parent().po.vars['contour_color'], 3).astype(np.uint8)
215
+
216
+ self.parent().imageanalysiswindow.drawn_image[back_coord[0], back_coord[1], :] = np.array((224, 160, 81), dtype=np.uint8)
217
+
218
+ self.parent().imageanalysiswindow.drawn_image[bio_coord[0], bio_coord[1], :] = np.array((17, 160, 212), dtype=np.uint8)
219
+
220
+ image = self.parent().imageanalysiswindow.drawn_image
221
+ # 3) The automatically detected video contours
222
+ if self.parent().imageanalysiswindow.delineation_done: # add a mask of the video contour
223
+ # logging.info("Draw the delineation mask of each arena")
224
+ for contour_i in range(len(self.parent().po.top)):
225
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
226
+ min_cy = self.parent().po.top[contour_i]
227
+ max_cy = self.parent().po.bot[contour_i]
228
+ min_cx = self.parent().po.left[contour_i]
229
+ max_cx = self.parent().po.right[contour_i]
230
+ text = f"{contour_i + 1}"
231
+ position = (self.parent().po.left[contour_i] + 25, self.parent().po.top[contour_i] + (self.parent().po.bot[contour_i] - self.parent().po.top[contour_i]) // 2)
232
+ image = cv2.putText(image, # numpy array on which text is written
233
+ text, # text
234
+ position, # position at which writing has to start
235
+ cv2.FONT_HERSHEY_SIMPLEX, # font family
236
+ 1, # font size
237
+ (138, 95, 18, 255),
238
+ # (209, 80, 0, 255), # font color
239
+ 2) # font stroke
240
+ if (max_cy - min_cy) < 0 or (max_cx - min_cx) < 0:
241
+ self.parent().imageanalysiswindow.message.setText("Error: the shape number or the detection is wrong")
242
+ if self.parent().po.vars['arena_shape'] == 'circle':
243
+ ellipse = Ellipse((max_cy - min_cy, max_cx - min_cx)).create().astype(np.uint8)
244
+ ellipse = cv2.morphologyEx(ellipse, cv2.MORPH_GRADIENT, cross_33)
245
+ mask[min_cy:max_cy, min_cx:max_cx, ...] = ellipse
246
+ else:
247
+ mask[(min_cy, max_cy), min_cx:max_cx] = 1
248
+ mask[min_cy:max_cy, (min_cx, max_cx)] = 1
249
+ mask = cv2.dilate(mask, kernel=cross_33, iterations=contour_width)
250
+
251
+ mask = np.nonzero(mask)
252
+ image[mask[0], mask[1], :] = np.array((138, 95, 18), dtype=np.uint8)# self.parent().po.vars['contour_color']
253
+
254
+ else: #load
255
+ if user_input:
256
+ # III/ If this thread runs from user input: update the drawn_image according to the current user input
257
+ # Just add the mask to drawn_image as quick as possible
258
+ # Add user defined masks
259
+ # Take the drawn image and add the temporary mask to it
260
+ image = deepcopy(self.parent().imageanalysiswindow.drawn_image)
261
+ if self.parent().imageanalysiswindow.back1_bio2 == 0:
262
+ # logging.info("Dynamic drawing of the arena outline")
263
+ if self.parent().po.vars['arena_shape'] == 'circle':
264
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create()
265
+ ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
266
+ image[min_y:max_y, min_x:max_x, ...] *= (1 - ellipse)
267
+ image[min_y:max_y, min_x:max_x, ...] += ellipse
268
+ else:
269
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
270
+ mask[min_y:max_y, min_x:max_x] = 1
271
+ mask = np.nonzero(mask)
272
+ image[mask[0], mask[1], :] = np.array((0, 0, 0), dtype=np.uint8)
273
+ else:
274
+ # logging.info("Dynamic drawing of Cell or Back")
275
+ if self.parent().imageanalysiswindow.back1_bio2 == 2:
276
+ if self.parent().po.all['starting_blob_shape'] == 'circle':
277
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create()
278
+ ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
279
+ image[min_y:max_y, min_x:max_x, ...] *= (1 - ellipse)
280
+ ellipse[:, :, :] *= np.array((17, 160, 212), dtype=np.uint8)
281
+ image[min_y:max_y, min_x:max_x, ...] += ellipse
282
+ else:
283
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
284
+ mask[min_y:max_y, min_x:max_x] = 1
285
+ mask = np.nonzero(mask)
286
+ image[mask[0], mask[1], :] = np.array((17, 160, 212), dtype=np.uint8)
287
+ else:
288
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
289
+ mask[min_y:max_y, min_x:max_x] = 1
290
+ mask = np.nonzero(mask)
291
+ image[mask[0], mask[1], :] = np.array((224, 160, 81), dtype=np.uint8)
292
+
293
+ self.parent().imageanalysiswindow.display_image.update_image(image)
294
+ self.message_when_thread_finished.emit(True)
295
+
296
+
297
+ class FirstImageAnalysisThread(QtCore.QThread):
298
+ message_from_thread = QtCore.Signal(str)
299
+ message_when_thread_finished = QtCore.Signal(bool)
300
+
301
+ def __init__(self, parent=None):
302
+ super(FirstImageAnalysisThread, self).__init__(parent)
303
+ self.setParent(parent)
304
+
305
+ def run(self):
306
+ tic = default_timer()
307
+ biomask = None
308
+ backmask = None
309
+ if self.parent().imageanalysiswindow.bio_masks_number != 0:
310
+ shape_nb, ordered_image = cv2.connectedComponents((self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8))
311
+ shape_nb -= 1
312
+ biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
313
+ else:
314
+ shape_nb = 0
315
+ if self.parent().imageanalysiswindow.back_masks_number != 0:
316
+ backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
317
+ if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2 or shape_nb == self.parent().po.sample_number:
318
+ self.message_from_thread.emit("Image segmentation, wait 30 seconds at most")
319
+ if not self.parent().imageanalysiswindow.asking_first_im_parameters_flag and self.parent().po.all['scale_with_image_or_cells'] == 0 and self.parent().po.all["set_spot_size"]:
320
+ self.parent().po.get_average_pixel_size()
321
+ spot_size = self.parent().po.starting_blob_hsize_in_pixels
322
+ else:
323
+ spot_size = None
324
+ self.parent().po.all["bio_mask"] = biomask
325
+ self.parent().po.all["back_mask"] = backmask
326
+ self.parent().po.fast_image_segmentation(is_first_image=True, biomask=biomask, backmask=backmask, spot_size=spot_size)
327
+ if shape_nb == self.parent().po.sample_number and self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] != self.parent().po.sample_number:
328
+ self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] = shape_nb
329
+ self.parent().po.first_image.shape_number = shape_nb
330
+ self.parent().po.first_image.validated_shapes = (self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8)
331
+ self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['binary_image'] = self.parent().po.first_image.validated_shapes
332
+ else:
333
+ self.message_from_thread.emit("Generating analysis options, wait...")
334
+ if self.parent().po.vars["color_number"] > 2:
335
+ kmeans_clust_nb = self.parent().po.vars["color_number"]
336
+ if self.parent().po.carefully:
337
+ self.message_from_thread.emit("Generating analysis options, wait less than 30 minutes")
338
+ else:
339
+ self.message_from_thread.emit("Generating analysis options, a few minutes")
340
+ else:
341
+ kmeans_clust_nb = None
342
+ if self.parent().po.carefully:
343
+ self.message_from_thread.emit("Generating analysis options, wait a few minutes")
344
+ else:
345
+ self.message_from_thread.emit("Generating analysis options, around 1 minute")
346
+ if self.parent().imageanalysiswindow.asking_first_im_parameters_flag:
347
+ self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
348
+ several_blob_per_arena=None,
349
+ spot_shape=None, spot_size=None,
350
+ kmeans_clust_nb=kmeans_clust_nb,
351
+ biomask=self.parent().po.all["bio_mask"],
352
+ backmask=self.parent().po.all["back_mask"],
353
+ color_space_dictionaries=None,
354
+ carefully=self.parent().po.carefully)
355
+ else:
356
+ if self.parent().po.all['scale_with_image_or_cells'] == 0:
357
+ self.parent().po.get_average_pixel_size()
358
+ else:
359
+ self.parent().po.starting_blob_hsize_in_pixels = None
360
+ self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
361
+ several_blob_per_arena=self.parent().po.vars['several_blob_per_arena'],
362
+ spot_shape=self.parent().po.all['starting_blob_shape'],
363
+ spot_size=self.parent().po.starting_blob_hsize_in_pixels,
364
+ kmeans_clust_nb=kmeans_clust_nb,
365
+ biomask=self.parent().po.all["bio_mask"],
366
+ backmask=self.parent().po.all["back_mask"],
367
+ color_space_dictionaries=None,
368
+ carefully=self.parent().po.carefully)
369
+
370
+ logging.info(f" image analysis lasted {default_timer() - tic} secondes")
371
+ logging.info(f" image analysis lasted {np.round((default_timer() - tic) / 60)} minutes")
372
+ self.message_when_thread_finished.emit(True)
373
+
374
+
375
+ class LastImageAnalysisThread(QtCore.QThread):
376
+ message_from_thread = QtCore.Signal(str)
377
+ message_when_thread_finished = QtCore.Signal(bool)
378
+
379
+ def __init__(self, parent=None):
380
+ super(LastImageAnalysisThread, self).__init__(parent)
381
+ self.setParent(parent)
382
+
383
+ def run(self):
384
+ self.parent().po.cropping(False)
385
+ self.parent().po.get_background_to_subtract()
386
+ biomask = None
387
+ backmask = None
388
+ if self.parent().imageanalysiswindow.bio_masks_number != 0:
389
+ biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
390
+ if self.parent().imageanalysiswindow.back_masks_number != 0:
391
+ backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
392
+ if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2:
393
+ self.message_from_thread.emit("Image segmentation, wait...")
394
+ self.parent().po.fast_image_segmentation(is_first_image=False, biomask=biomask, backmask=backmask)
395
+ else:
396
+ self.message_from_thread.emit("Generating analysis options, wait...")
397
+ if self.parent().po.vars['several_blob_per_arena']:
398
+ concomp_nb = [self.parent().po.sample_number, self.parent().po.first_image.size // 50]
399
+ max_shape_size = .75 * self.parent().po.first_image.size
400
+ total_surfarea = .99 * self.parent().po.first_image.size
401
+ else:
402
+ concomp_nb = [self.parent().po.sample_number, self.parent().po.sample_number * 200]
403
+ if self.parent().po.all['are_zigzag'] == "columns":
404
+ inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.y_boundaries)))
405
+ elif self.parent().po.all['are_zigzag'] == "rows":
406
+ inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
407
+ else:
408
+ dist1 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.y_boundaries)))
409
+ dist2 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
410
+ inter_dist = np.max(dist1, dist2)
411
+ if self.parent().po.all['starting_blob_shape'] == "circle":
412
+ max_shape_size = np.pi * np.square(inter_dist)
413
+ else:
414
+ max_shape_size = np.square(2 * inter_dist)
415
+ total_surfarea = max_shape_size * self.parent().po.sample_number
416
+ out_of_arenas = None
417
+ if self.parent().po.all['are_gravity_centers_moving'] != 1:
418
+ out_of_arenas = np.ones_like(self.parent().po.videos.first_image.validated_shapes)
419
+ for blob_i in np.arange(len(self.parent().po.vars['analyzed_individuals'])):
420
+ out_of_arenas[self.parent().po.top[blob_i]: (self.parent().po.bot[blob_i] + 1),
421
+ self.parent().po.left[blob_i]: (self.parent().po.right[blob_i] + 1)] = 0
422
+ ref_image = self.parent().po.first_image.validated_shapes
423
+ self.parent().po.first_image.generate_subtract_background(self.parent().po.vars['convert_for_motion'])
424
+ kmeans_clust_nb = None
425
+ self.parent().po.last_image.find_last_im_csc(concomp_nb, total_surfarea, max_shape_size, out_of_arenas,
426
+ ref_image, self.parent().po.first_image.subtract_background,
427
+ kmeans_clust_nb, biomask, backmask, color_space_dictionaries=None,
428
+ carefully=self.parent().po.carefully)
429
+ self.message_when_thread_finished.emit(True)
430
+
431
+
432
+ class CropScaleSubtractDelineateThread(QtCore.QThread):
433
+ message_from_thread = QtCore.Signal(str)
434
+ message_when_thread_finished = QtCore.Signal(str)
435
+
436
+ def __init__(self, parent=None):
437
+ super(CropScaleSubtractDelineateThread, self).__init__(parent)
438
+ self.setParent(parent)
439
+
440
+ def run(self):
441
+ logging.info("Start cropping if required")
442
+ self.parent().po.cropping(is_first_image=True)
443
+ self.parent().po.cropping(is_first_image=False)
444
+ self.parent().po.get_average_pixel_size()
445
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
446
+ os.remove('Data to run Cellects quickly.pkl')
447
+ logging.info("Save data to run Cellects quickly")
448
+ self.parent().po.data_to_save['first_image'] = True
449
+ self.parent().po.save_data_to_run_cellects_quickly()
450
+ self.parent().po.data_to_save['first_image'] = False
451
+ if not self.parent().po.vars['several_blob_per_arena']:
452
+ logging.info("Check whether the detected shape number is ok")
453
+ nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.parent().po.first_image.validated_shapes)
454
+ y_lim = self.parent().po.first_image.y_boundaries
455
+ if ((nb - 1) != self.parent().po.sample_number or np.any(stats[:, 4] == 1)):
456
+ self.message_from_thread.emit("Image analysis failed to detect the right cell(s) number: restart the analysis.")
457
+ elif len(np.nonzero(y_lim == - 1)) != len(np.nonzero(y_lim == 1)):
458
+ self.message_from_thread.emit("Automatic arena delineation cannot work if one cell touches the image border.")
459
+ self.parent().po.first_image.y_boundaries = None
460
+ else:
461
+ logging.info("Start automatic video delineation")
462
+ analysis_status = self.parent().po.delineate_each_arena()
463
+ self.message_when_thread_finished.emit(analysis_status["message"])
464
+ else:
465
+ logging.info("Start automatic video delineation")
466
+ analysis_status = self.parent().po.delineate_each_arena()
467
+ self.message_when_thread_finished.emit(analysis_status["message"])
468
+
469
+
470
+ class SaveManualDelineationThread(QtCore.QThread):
471
+
472
+ def __init__(self, parent=None):
473
+ super(SaveManualDelineationThread, self).__init__(parent)
474
+ self.setParent(parent)
475
+
476
+ def run(self):
477
+ self.parent().po.left = np.arange(self.parent().po.sample_number)
478
+ self.parent().po.right = np.arange(self.parent().po.sample_number)
479
+ self.parent().po.top = np.arange(self.parent().po.sample_number)
480
+ self.parent().po.bot = np.arange(self.parent().po.sample_number)
481
+ for arena in np.arange(1, self.parent().po.sample_number + 1):
482
+ y, x = np.nonzero(self.parent().imageanalysiswindow.arena_mask == arena)
483
+ self.parent().po.left[arena - 1] = np.min(x)
484
+ self.parent().po.right[arena - 1] = np.max(x)
485
+ self.parent().po.top[arena - 1] = np.min(y)
486
+ self.parent().po.bot[arena - 1] = np.max(y)
487
+
488
+ logging.info("Save data to run Cellects quickly")
489
+ self.parent().po.data_to_save['coordinates'] = True
490
+ self.parent().po.save_data_to_run_cellects_quickly()
491
+ self.parent().po.data_to_save['coordinates'] = False
492
+
493
+ logging.info("Save manual video delineation")
494
+ self.parent().po.vars['analyzed_individuals'] = np.arange(self.parent().po.sample_number) + 1
495
+ self.parent().po.videos = OneVideoPerBlob(self.parent().po.first_image, self.parent().po.starting_blob_hsize_in_pixels, self.parent().po.all['raw_images'])
496
+ self.parent().po.videos.left = self.parent().po.left
497
+ self.parent().po.videos.right = self.parent().po.right
498
+ self.parent().po.videos.top = self.parent().po.top
499
+ self.parent().po.videos.bot = self.parent().po.bot
500
+
501
+
502
+ class GetExifDataThread(QtCore.QThread):
503
+
504
+ def __init__(self, parent=None):
505
+ super(GetExifDataThread, self).__init__(parent)
506
+ self.setParent(parent)
507
+
508
+ def run(self):
509
+ self.parent().po.extract_exif()
510
+
511
+
512
+ class FinalizeImageAnalysisThread(QtCore.QThread):
513
+
514
+ def __init__(self, parent=None):
515
+ super(FinalizeImageAnalysisThread, self).__init__(parent)
516
+ self.setParent(parent)
517
+
518
+ def run(self):
519
+ self.parent().po.get_background_to_subtract()
520
+
521
+ self.parent().po.get_origins_and_backgrounds_lists()
522
+
523
+ if self.parent().po.last_image is None:
524
+ self.parent().po.get_last_image()
525
+ self.parent().po.fast_image_segmentation(False)
526
+ self.parent().po.find_if_lighter_background()
527
+ logging.info("The current (or the first) folder is ready to run")
528
+ self.parent().po.first_exp_ready_to_run = True
529
+ self.parent().po.data_to_save['coordinates'] = True
530
+ self.parent().po.data_to_save['exif'] = True
531
+ self.parent().po.save_data_to_run_cellects_quickly()
532
+ self.parent().po.data_to_save['coordinates'] = False
533
+ self.parent().po.data_to_save['exif'] = False
534
+
535
+
536
+ class SaveAllVarsThread(QtCore.QThread):
537
+
538
+ def __init__(self, parent=None):
539
+ super(SaveAllVarsThread, self).__init__(parent)
540
+ self.setParent(parent)
541
+
542
+ def run(self):
543
+ self.parent().po.save_variable_dict()
544
+
545
+ #self.parent().po.all['global_pathway']
546
+ #os.getcwd()
547
+
548
+ self.set_current_folder()
549
+ self.parent().po.save_data_to_run_cellects_quickly(new_one_if_does_not_exist=False)
550
+ #if os.access(f"", os.R_OK):
551
+ # self.parent().po.save_data_to_run_cellects_quickly()
552
+ #else:
553
+ # logging.error(f"No permission access to write in {os.getcwd()}")
554
+
555
+ def set_current_folder(self):
556
+ if self.parent().po.all['folder_number'] > 1: # len(self.parent().po.all['folder_list']) > 1: # len(self.parent().po.all['folder_list']) > 0:
557
+ logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
558
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
559
+ self.parent().po.all['folder_list'][0])
560
+ else:
561
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
562
+ logging.info(f"Use {curr_path} folder")
563
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
564
+
565
+
566
+ class OneArenaThread(QtCore.QThread):
567
+ message_from_thread_starting = QtCore.Signal(str)
568
+ image_from_thread = QtCore.Signal(dict)
569
+ when_loading_finished = QtCore.Signal(bool)
570
+ when_detection_finished = QtCore.Signal(str)
571
+
572
+ def __init__(self, parent=None):
573
+ super(OneArenaThread, self).__init__(parent)
574
+ self.setParent(parent)
575
+ self._isRunning = False
576
+
577
+ def run(self):
578
+ continue_analysis = True
579
+ self._isRunning = True
580
+ self.message_from_thread_starting.emit("Video loading, wait...")
581
+
582
+ self.set_current_folder()
583
+ print(self.parent().po.vars['convert_for_motion'])
584
+ if not self.parent().po.first_exp_ready_to_run:
585
+ self.parent().po.load_data_to_run_cellects_quickly()
586
+ if not self.parent().po.first_exp_ready_to_run:
587
+ #Need a look for data when Data to run Cellects quickly.pkl and 1 folder selected amon several
588
+ continue_analysis = self.pre_processing()
589
+ if continue_analysis:
590
+ print(self.parent().po.vars['convert_for_motion'])
591
+ memory_diff = self.parent().po.update_available_core_nb()
592
+ if self.parent().po.cores == 0:
593
+ self.message_from_thread_starting.emit(f"Analyzing one arena requires {memory_diff}GB of additional RAM to run")
594
+ else:
595
+ if self.parent().po.motion is None or self.parent().po.load_quick_full == 0:
596
+ self.load_one_arena()
597
+ if self.parent().po.load_quick_full > 0:
598
+ if self.parent().po.motion.start is not None:
599
+ logging.info("One arena detection has started")
600
+ self.detection()
601
+ if self.parent().po.load_quick_full > 1:
602
+ logging.info("One arena post-processing has started")
603
+ self.post_processing()
604
+ else:
605
+ self.when_detection_finished.emit("Detection done, read to see the result")
606
+ else:
607
+ self.message_from_thread_starting.emit(f"The current parameters failed to detect the cell(s) motion")
608
+
609
+ def stop(self):
610
+ self._isRunning = False
611
+
612
+ def set_current_folder(self):
613
+ if self.parent().po.all['folder_number'] > 1:
614
+ logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
615
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
616
+ self.parent().po.all['folder_list'][0])
617
+ else:
618
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
619
+ logging.info(f"Use {curr_path} folder")
620
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
621
+
622
+ def pre_processing(self):
623
+ logging.info("Pre-processing has started")
624
+ analysis_status = {"continue": True, "message": ""}
625
+
626
+ self.parent().po.get_first_image()
627
+ self.parent().po.fast_image_segmentation(is_first_image=True)
628
+ if len(self.parent().po.vars['analyzed_individuals']) != self.parent().po.first_image.shape_number:
629
+ self.message_from_thread_starting.emit(f"Wrong specimen number: (re)do the complete analysis.")
630
+ analysis_status["continue"] = False
631
+ else:
632
+ self.parent().po.cropping(is_first_image=True)
633
+ self.parent().po.get_average_pixel_size()
634
+ analysis_status = self.parent().po.delineate_each_arena()
635
+ if not analysis_status["continue"]:
636
+ self.message_from_thread_starting.emit(analysis_status["message"])
637
+ logging.error(analysis_status['message'])
638
+ else:
639
+ self.parent().po.data_to_save['exif'] = True
640
+ self.parent().po.save_data_to_run_cellects_quickly()
641
+ self.parent().po.data_to_save['exif'] = False
642
+ self.parent().po.get_background_to_subtract()
643
+ if len(self.parent().po.vars['analyzed_individuals']) != len(self.parent().po.top):
644
+ self.message_from_thread_starting.emit(f"Wrong specimen number: (re)do the complete analysis.")
645
+ analysis_status["continue"] = False
646
+ else:
647
+ self.parent().po.get_origins_and_backgrounds_lists()
648
+ self.parent().po.get_last_image()
649
+ self.parent().po.fast_image_segmentation(False)
650
+ self.parent().po.find_if_lighter_backgnp.round()
651
+ logging.info("The current (or the first) folder is ready to run")
652
+ self.parent().po.first_exp_ready_to_run = True
653
+ return analysis_status["continue"]
654
+
655
+ def load_one_arena(self):
656
+ arena = self.parent().po.all['arena']
657
+ i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
658
+ save_loaded_video: bool = False
659
+ if not os.path.isfile(f'ind_{arena}.npy') or self.parent().po.all['overwrite_unaltered_videos']:
660
+ logging.info(f"Starting to load arena n°{arena} from images")
661
+ add_to_c = 1
662
+ self.parent().po.one_arenate_done = True
663
+ i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
664
+ if self.parent().po.vars['lose_accuracy_to_save_memory']:
665
+ self.parent().po.converted_video = np.zeros(
666
+ (len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
667
+ dtype=np.uint8)
668
+ else:
669
+ self.parent().po.converted_video = np.zeros(
670
+ (len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
671
+ dtype=float)
672
+ if not self.parent().po.vars['already_greyscale']:
673
+ self.parent().po.visu = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
674
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c, 3), dtype=np.uint8)
675
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
676
+ if self.parent().po.vars['lose_accuracy_to_save_memory']:
677
+ self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
678
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=np.uint8)
679
+ else:
680
+ self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
681
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=float)
682
+ first_dict = TDict()
683
+ second_dict = TDict()
684
+ c_spaces = []
685
+ for k, v in self.parent().po.vars['convert_for_motion'].items():
686
+ if k != 'logical' and v.sum() > 0:
687
+ if k[-1] != '2':
688
+ first_dict[k] = v
689
+ c_spaces.append(k)
690
+ else:
691
+ second_dict[k[:-1]] = v
692
+ c_spaces.append(k[:-1])
693
+ prev_img = None
694
+ background = None
695
+ background2 = None
696
+ pat_tracker = PercentAndTimeTracker(self.parent().po.vars['img_number'])
697
+ for image_i, image_name in enumerate(self.parent().po.data_list):
698
+ current_percentage, eta = pat_tracker.get_progress()
699
+ is_landscape = self.parent().po.first_image.image.shape[0] < self.parent().po.first_image.image.shape[1]
700
+ img = read_and_rotate(image_name, prev_img, self.parent().po.all['raw_images'], is_landscape)
701
+ # img = self.parent().po.videos.read_and_rotate(image_name, prev_img)
702
+ prev_img = deepcopy(img)
703
+ if self.parent().po.first_image.cropped:
704
+ img = img[self.parent().po.first_image.crop_coord[0]:self.parent().po.first_image.crop_coord[1],
705
+ self.parent().po.first_image.crop_coord[2]:self.parent().po.first_image.crop_coord[3], :]
706
+ img = img[self.parent().po.top[arena - 1]: (self.parent().po.bot[arena - 1] + add_to_c),
707
+ self.parent().po.left[arena - 1]: (self.parent().po.right[arena - 1] + add_to_c), :]
708
+
709
+ self.image_from_thread.emit({"message": f"Video loading: {current_percentage}%{eta}", "current_image": img})
710
+ if self.parent().po.vars['already_greyscale']:
711
+ if self.parent().po.reduce_image_dim:
712
+ self.parent().po.converted_video[image_i, ...] = img[:, :, 0]
713
+ else:
714
+ self.parent().po.converted_video[image_i, ...] = img
715
+ else:
716
+ self.parent().po.visu[image_i, ...] = img
717
+
718
+ if self.parent().po.vars['subtract_background']:
719
+ background = self.parent().po.vars['background_list'][i]
720
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
721
+ background2 = self.parent().po.vars['background_list2'][i]
722
+ greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces,
723
+ first_dict,
724
+ second_dict,background,background2,
725
+ self.parent().po.vars[
726
+ 'lose_accuracy_to_save_memory'])
727
+
728
+ if self.parent().po.vars['filter_spec'] is not None and self.parent().po.vars['filter_spec']['filter1_type'] != "":
729
+ greyscale_image = apply_filter(greyscale_image,
730
+ self.parent().po.vars['filter_spec']['filter1_type'],
731
+ self.parent().po.vars['filter_spec']['filter1_param'],
732
+ self.parent().po.vars['lose_accuracy_to_save_memory'])
733
+ if greyscale_image2 is not None and self.parent().po.vars['filter_spec']['filter2_type'] != "":
734
+ greyscale_image2 = apply_filter(greyscale_image2,
735
+ self.parent().po.vars['filter_spec']['filter2_type'],
736
+ self.parent().po.vars['filter_spec']['filter2_param'],
737
+ self.parent().po.vars['lose_accuracy_to_save_memory'])
738
+ self.parent().po.converted_video[image_i, ...] = greyscale_image
739
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
740
+ self.parent().po.converted_video2[image_i, ...] = greyscale_image2
741
+
742
+
743
+
744
+ # csc = OneImageAnalysis(img)
745
+ # if self.parent().po.vars['subtract_background']:
746
+ # if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
747
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
748
+ # self.parent().po.vars['background_list'][i],
749
+ # self.parent().po.vars['background_list2'][i])
750
+ # else:
751
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
752
+ # self.parent().po.vars['background_list'][i], None)
753
+ # else:
754
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
755
+ # # self.parent().po.converted_video[image_i, ...] = csc.image
756
+ # if self.parent().po.vars['lose_accuracy_to_save_memory']:
757
+ # self.parent().po.converted_video[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image)
758
+ # else:
759
+ # self.parent().po.converted_video[image_i, ...] = csc.image
760
+ # if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
761
+ # if self.parent().po.vars['lose_accuracy_to_save_memory']:
762
+ # self.parent().po.converted_video2[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image2)
763
+ # else:
764
+ # self.parent().po.converted_video2[image_i, ...] = csc.image2
765
+
766
+
767
+
768
+ # self.parent().po.load_one_arena(arena)
769
+ save_loaded_video = True
770
+ if self.parent().po.vars['already_greyscale']:
771
+ self.videos_in_ram = self.parent().po.converted_video
772
+ else:
773
+ if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
774
+ self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video)]
775
+ else:
776
+ self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video), deepcopy(self.parent().po.converted_video2)]
777
+
778
+ # videos = [self.parent().po.video.copy(), self.parent().po.converted_video.copy()]
779
+ else:
780
+ logging.info(f"Starting to load arena n°{arena} from .npy saved file")
781
+ self.videos_in_ram = None
782
+ l = [i, arena, self.parent().po.vars, False, False, False, self.videos_in_ram]
783
+ self.parent().po.motion = MotionAnalysis(l)
784
+ r = weakref.ref(self.parent().po.motion)
785
+
786
+ if self.videos_in_ram is None:
787
+ self.parent().po.converted_video = deepcopy(self.parent().po.motion.converted_video)
788
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
789
+ self.parent().po.converted_video2 = deepcopy(self.parent().po.motion.converted_video2)
790
+ self.parent().po.motion.get_origin_shape()
791
+
792
+ if self.parent().po.motion.dims[0] >= 40:
793
+ step = self.parent().po.motion.dims[0] // 20
794
+ else:
795
+ step = 1
796
+ if self.parent().po.motion.start >= (self.parent().po.motion.dims[0] - step - 1):
797
+ self.parent().po.motion.start = None
798
+ else:
799
+ self.parent().po.motion.get_covering_duration(step)
800
+ self.when_loading_finished.emit(save_loaded_video)
801
+
802
+ if self.parent().po.motion.visu is None:
803
+ visu = self.parent().po.motion.converted_video
804
+ visu -= np.min(visu)
805
+ visu = 255 * (visu / np.max(visu))
806
+ visu = np.round(visu).astype(np.uint8)
807
+ if len(visu.shape) == 3:
808
+ visu = np.stack((visu, visu, visu), axis=3)
809
+ self.parent().po.motion.visu = visu
810
+
811
+ def detection(self):
812
+ self.message_from_thread_starting.emit(f"Quick video segmentation")
813
+ self.parent().po.motion.converted_video = deepcopy(self.parent().po.converted_video)
814
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
815
+ self.parent().po.motion.converted_video2 = deepcopy(self.parent().po.converted_video2)
816
+ # self.parent().po.motion.detection(compute_all_possibilities=True)
817
+ self.parent().po.motion.detection(compute_all_possibilities=self.parent().po.all['compute_all_options'])
818
+ if self.parent().po.all['compute_all_options']:
819
+ self.parent().po.computed_video_options = np.ones(5, bool)
820
+ else:
821
+ self.parent().po.computed_video_options = np.zeros(5, bool)
822
+ self.parent().po.computed_video_options[self.parent().po.all['video_option']] = True
823
+ # if self.parent().po.vars['color_number'] > 2:
824
+
825
+ def post_processing(self):
826
+ self.parent().po.motion.smoothed_video = None
827
+ # if self.parent().po.vars['already_greyscale']:
828
+ # if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
829
+ # self.videos_in_ram = self.parent().po.converted_video
830
+ # else:
831
+ # self.videos_in_ram = self.parent().po.converted_video, self.parent().po.converted_video2
832
+ # else:
833
+ # if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
834
+ # videos_in_ram = self.parent().po.visu, self.parent().po.converted_video
835
+ # else:
836
+ # videos_in_ram = self.parent().po.visu, self.parent().po.converted_video, \
837
+ # self.parent().po.converted_video2
838
+
839
+ if self.parent().po.vars['color_number'] > 2:
840
+ analyses_to_compute = [0]
841
+ else:
842
+ if self.parent().po.all['compute_all_options']:
843
+ analyses_to_compute = np.arange(5)
844
+ else:
845
+ logging.info(f"option: {self.parent().po.all['video_option']}")
846
+ analyses_to_compute = [self.parent().po.all['video_option']]
847
+ time_parameters = [self.parent().po.motion.start, self.parent().po.motion.step,
848
+ self.parent().po.motion.lost_frames, self.parent().po.motion.substantial_growth]
849
+
850
+ args = [self.parent().po.all['arena'] - 1, self.parent().po.all['arena'], self.parent().po.vars,
851
+ False, False, False, self.videos_in_ram]
852
+ if self.parent().po.vars['do_fading']:
853
+ self.parent().po.newly_explored_area = np.zeros((self.parent().po.motion.dims[0], 5), np.unp.int64)
854
+ for seg_i in analyses_to_compute:
855
+ analysis_i = MotionAnalysis(args)
856
+ r = weakref.ref(analysis_i)
857
+ analysis_i.segmentation = np.zeros(analysis_i.converted_video.shape[:3], dtype=np.uint8)
858
+ if self.parent().po.all['compute_all_options']:
859
+ if seg_i == 0:
860
+ analysis_i.segmentation = self.parent().po.motion.segmentation
861
+ else:
862
+ if seg_i == 1:
863
+ mask = self.parent().po.motion.luminosity_segmentation
864
+ elif seg_i == 2:
865
+ mask = self.parent().po.motion.gradient_segmentation
866
+ elif seg_i == 3:
867
+ mask = self.parent().po.motion.logical_and
868
+ elif seg_i == 4:
869
+ mask = self.parent().po.motion.logical_or
870
+ analysis_i.segmentation[mask[0], mask[1], mask[2]] = 1
871
+ else:
872
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
873
+ analysis_i.segmentation = self.parent().po.motion.segmentation
874
+
875
+ analysis_i.start = time_parameters[0]
876
+ analysis_i.step = time_parameters[1]
877
+ analysis_i.lost_frames = time_parameters[2]
878
+ analysis_i.substantial_growth = time_parameters[3]
879
+ analysis_i.origin_idx = self.parent().po.motion.origin_idx
880
+ analysis_i.initialize_post_processing()
881
+ analysis_i.t = analysis_i.start
882
+ # print_progress = ForLoopCounter(self.start)
883
+
884
+ while self._isRunning and analysis_i.t < analysis_i.binary.shape[0]:
885
+ # analysis_i.update_shape(True)
886
+ analysis_i.update_shape(False)
887
+ contours = np.nonzero(
888
+ cv2.morphologyEx(analysis_i.binary[analysis_i.t - 1, :, :], cv2.MORPH_GRADIENT, cross_33))
889
+ current_image = deepcopy(self.parent().po.motion.visu[analysis_i.t - 1, :, :, :])
890
+ current_image[contours[0], contours[1], :] = self.parent().po.vars['contour_color']
891
+ self.image_from_thread.emit(
892
+ {"message": f"Tracking option n°{seg_i + 1}. Image number: {analysis_i.t - 1}",
893
+ "current_image": current_image})
894
+ if analysis_i.start is None:
895
+ analysis_i.binary = np.repeat(np.expand_dims(analysis_i.origin, 0),
896
+ analysis_i.converted_video.shape[0], axis=0)
897
+ if self.parent().po.vars['color_number'] > 2:
898
+ self.message_from_thread_starting.emit(
899
+ f"Failed to detect motion. Redo image analysis (with only 2 colors?)")
900
+ else:
901
+ self.message_from_thread_starting.emit(f"Tracking option n°{seg_i + 1} failed to detect motion")
902
+
903
+ if self.parent().po.all['compute_all_options']:
904
+ if seg_i == 0:
905
+ self.parent().po.motion.segmentation = analysis_i.binary
906
+ elif seg_i == 1:
907
+ self.parent().po.motion.luminosity_segmentation = np.nonzero(analysis_i.binary)
908
+ elif seg_i == 2:
909
+ self.parent().po.motion.gradient_segmentation = np.nonzero(analysis_i.binary)
910
+ elif seg_i == 3:
911
+ self.parent().po.motion.logical_and = np.nonzero(analysis_i.binary)
912
+ elif seg_i == 4:
913
+ self.parent().po.motion.logical_or = np.nonzero(analysis_i.binary)
914
+ else:
915
+ self.parent().po.motion.segmentation = analysis_i.binary
916
+
917
+ # self.message_from_thread_starting.emit("If there are problems, change some parameters and try again")
918
+ self.when_detection_finished.emit("Post processing done, read to see the result")
919
+
920
+
921
+
922
+ class VideoReaderThread(QtCore.QThread):
923
+ message_from_thread = QtCore.Signal(dict)
924
+
925
+ def __init__(self, parent=None):
926
+ super(VideoReaderThread, self).__init__(parent)
927
+ self.setParent(parent)
928
+
929
+ def run(self):
930
+ video_analysis = deepcopy(self.parent().po.motion.visu)
931
+ self.message_from_thread.emit(
932
+ {"current_image": video_analysis[0, ...], "message": f"Video preparation, wait..."})
933
+ if self.parent().po.load_quick_full > 0:
934
+
935
+ if self.parent().po.all['compute_all_options']:
936
+ if self.parent().po.all['video_option'] == 0:
937
+ video_mask = self.parent().po.motion.segmentation
938
+ else:
939
+ if self.parent().po.all['video_option'] == 1:
940
+ mask = self.parent().po.motion.luminosity_segmentation
941
+ elif self.parent().po.all['video_option'] == 2:
942
+ mask = self.parent().po.motion.gradient_segmentation
943
+ elif self.parent().po.all['video_option'] == 3:
944
+ mask = self.parent().po.motion.logical_and
945
+ elif self.parent().po.all['video_option'] == 4:
946
+ mask = self.parent().po.motion.logical_or
947
+ video_mask = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
948
+ video_mask[mask[0], mask[1], mask[2]] = 1
949
+ else:
950
+ video_mask = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
951
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
952
+ video_mask = self.parent().po.motion.segmentation
953
+
954
+ if self.parent().po.load_quick_full == 1:
955
+ video_mask = np.cumsum(video_mask.astype(np.uint32), axis=0)
956
+ video_mask[video_mask > 0] = 1
957
+ video_mask = video_mask.astype(np.uint8)
958
+ logging.info(f"sum: {video_mask.sum()}")
959
+ # timings = genfromtxt("timings.csv")
960
+ for t in np.arange(self.parent().po.motion.dims[0]):
961
+ mask = cv2.morphologyEx(video_mask[t, ...], cv2.MORPH_GRADIENT, cross_33)
962
+ mask = np.stack((mask, mask, mask), axis=2)
963
+ # current_image[current_image > 0] = self.parent().po.vars['contour_color']
964
+ current_image = deepcopy(video_analysis[t, ...])
965
+ current_image[mask > 0] = self.parent().po.vars['contour_color']
966
+ self.message_from_thread.emit(
967
+ {"current_image": current_image, "message": f"Reading in progress... Image number: {t}"}) #, "time": timings[t]
968
+ time.sleep(1 / 50)
969
+ self.message_from_thread.emit({"current_image": current_image, "message": ""})#, "time": timings[t]
970
+
971
+
972
+ class ChangeOneRepResultThread(QtCore.QThread):
973
+ message_from_thread = QtCore.Signal(str)
974
+
975
+ def __init__(self, parent=None):
976
+ super(ChangeOneRepResultThread, self).__init__(parent)
977
+ self.setParent(parent)
978
+
979
+ def run(self):
980
+ self.message_from_thread.emit(
981
+ f"Arena n°{self.parent().po.all['arena']}: modifying its results...")
982
+ # self.parent().po.motion2 = deepcopy(self.parent().po.motion)
983
+ if self.parent().po.motion.start is None:
984
+ self.parent().po.motion.binary = np.repeat(np.expand_dims(self.parent().po.motion.origin, 0),
985
+ self.parent().po.motion.converted_video.shape[0], axis=0).astype(np.uint8)
986
+ else:
987
+ if self.parent().po.all['compute_all_options']:
988
+ if self.parent().po.all['video_option'] == 0:
989
+ self.parent().po.motion.binary = self.parent().po.motion.segmentation
990
+ else:
991
+ if self.parent().po.all['video_option'] == 1:
992
+ mask = self.parent().po.motion.luminosity_segmentation
993
+ elif self.parent().po.all['video_option'] == 2:
994
+ mask = self.parent().po.motion.gradient_segmentation
995
+ elif self.parent().po.all['video_option'] == 3:
996
+ mask = self.parent().po.motion.logical_and
997
+ elif self.parent().po.all['video_option'] == 4:
998
+ mask = self.parent().po.motion.logical_or
999
+ self.parent().po.motion.binary = np.zeros(self.parent().po.motion.dims, dtype=np.uint8)
1000
+ self.parent().po.motion.binary[mask[0], mask[1], mask[2]] = 1
1001
+ else:
1002
+ self.parent().po.motion.binary = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
1003
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
1004
+ self.parent().po.motion.binary = self.parent().po.motion.segmentation
1005
+
1006
+ if self.parent().po.vars['do_fading']:
1007
+ self.parent().po.motion.newly_explored_area = self.parent().po.newly_explored_area[:, self.parent().po.all['video_option']]
1008
+ self.parent().po.motion.max_distance = 9 * self.parent().po.vars['detection_range_factor']
1009
+ self.parent().po.motion.get_descriptors_from_binary(release_memory=False)
1010
+ self.parent().po.motion.detect_growth_transitions()
1011
+ self.parent().po.motion.networks_detection(False)
1012
+ self.parent().po.motion.study_cytoscillations(False)
1013
+ self.parent().po.motion.fractal_descriptions()
1014
+ self.parent().po.motion.get_descriptors_summary()
1015
+ self.parent().po.motion.change_results_of_one_arena()
1016
+ self.parent().po.motion = None
1017
+ # self.parent().po.motion = None
1018
+ self.message_from_thread.emit("")
1019
+
1020
+
1021
+ class WriteVideoThread(QtCore.QThread):
1022
+ # message_from_thread_in_thread = QtCore.Signal(bool)
1023
+ def __init__(self, parent=None):
1024
+ super(WriteVideoThread, self).__init__(parent)
1025
+ self.setParent(parent)
1026
+
1027
+ def run(self):
1028
+ # self.message_from_thread_in_thread.emit({True})
1029
+ arena = self.parent().po.all['arena']
1030
+ if not self.parent().po.vars['already_greyscale']:
1031
+ write_video(self.parent().po.visu, f'ind_{arena}.npy')
1032
+ else:
1033
+ write_video(self.parent().po.converted_video, f'ind_{arena}.npy')
1034
+
1035
+
1036
+ class RunAllThread(QtCore.QThread):
1037
+ message_from_thread = QtCore.Signal(str)
1038
+ image_from_thread = QtCore.Signal(dict)
1039
+
1040
+ def __init__(self, parent=None):
1041
+ super(RunAllThread, self).__init__(parent)
1042
+ self.setParent(parent)
1043
+
1044
+ def run(self):
1045
+ analysis_status = {"continue": True, "message": ""}
1046
+ message = self.set_current_folder(0)
1047
+
1048
+ if self.parent().po.first_exp_ready_to_run:
1049
+
1050
+ self.message_from_thread.emit(message + ": Write videos...")
1051
+ if not self.parent().po.vars['several_blob_per_arena'] and self.parent().po.sample_number != len(self.parent().po.bot):
1052
+ analysis_status["continue"] = False
1053
+ analysis_status["message"] = f"Wrong specimen number: redo the first image analysis."
1054
+ self.message_from_thread.emit(f"Wrong specimen number: restart Cellects and do another analysis.")
1055
+ else:
1056
+ analysis_status = self.run_video_writing(message)
1057
+ if analysis_status["continue"]:
1058
+ self.message_from_thread.emit(message + ": Analyse all videos...")
1059
+ analysis_status = self.run_motion_analysis(message)
1060
+ if analysis_status["continue"]:
1061
+ if self.parent().po.all['folder_number'] > 1:
1062
+ self.parent().po.all['folder_list'] = self.parent().po.all['folder_list'][1:]
1063
+ self.parent().po.all['sample_number_per_folder'] = self.parent().po.all['sample_number_per_folder'][1:]
1064
+ else:
1065
+ self.parent().po.look_for_data()
1066
+
1067
+ if analysis_status["continue"] and (not self.parent().po.first_exp_ready_to_run or self.parent().po.all['folder_number'] > 1):
1068
+ folder_number = np.max((len(self.parent().po.all['folder_list']), 1))
1069
+
1070
+ for exp_i in np.arange(folder_number):
1071
+ if len(self.parent().po.all['folder_list']) > 0:
1072
+ logging.info(self.parent().po.all['folder_list'][exp_i])
1073
+ self.parent().po.first_im = None
1074
+ self.parent().po.first_image = None
1075
+ self.parent().po.last_im = None
1076
+ self.parent().po.last_image = None
1077
+ self.parent().po.videos = None
1078
+ self.parent().po.top = None
1079
+
1080
+ message = self.set_current_folder(exp_i)
1081
+ self.message_from_thread.emit(f'{message}, pre-processing...')
1082
+ self.parent().po.load_data_to_run_cellects_quickly()
1083
+ if not self.parent().po.first_exp_ready_to_run:
1084
+ analysis_status = self.pre_processing()
1085
+ if analysis_status["continue"]:
1086
+ self.message_from_thread.emit(message + ": Write videos from images before analysis...")
1087
+ if not self.parent().po.vars['several_blob_per_arena'] and self.parent().po.sample_number != len(self.parent().po.bot):
1088
+ self.message_from_thread.emit(f"Wrong specimen number: first image analysis is mandatory.")
1089
+ analysis_status["continue"] = False
1090
+ analysis_status["message"] = f"Wrong specimen number: first image analysis is mandatory."
1091
+ else:
1092
+ analysis_status = self.run_video_writing(message)
1093
+ if analysis_status["continue"]:
1094
+ self.message_from_thread.emit(message + ": Starting analysis...")
1095
+ analysis_status = self.run_motion_analysis(message)
1096
+
1097
+ if not analysis_status["continue"]:
1098
+ # self.message_from_thread.emit(analysis_status["message"])
1099
+ break
1100
+ # if not continue_analysis:
1101
+ # self.message_from_thread.emit(f"Error: wrong folder or parameters")
1102
+ # break
1103
+ # if not enough_memory:
1104
+ # self.message_from_thread.emit(f"Error: not enough memory")
1105
+ # break
1106
+ print(self.parent().po.vars['convert_for_motion'])
1107
+ if analysis_status["continue"]:
1108
+ if self.parent().po.all['folder_number'] > 1:
1109
+ self.message_from_thread.emit(f"Exp {self.parent().po.all['folder_list'][0]} to {self.parent().po.all['folder_list'][-1]} analyzed.")
1110
+ else:
1111
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
1112
+ self.message_from_thread.emit(f'Exp {curr_path}, analyzed.')
1113
+ else:
1114
+ logging.error(message + " " + analysis_status["message"])
1115
+ self.message_from_thread.emit(message + " " + analysis_status["message"])
1116
+
1117
+ def set_current_folder(self, exp_i):
1118
+ if self.parent().po.all['folder_number'] > 1:
1119
+ logging.info(f"Use {self.parent().po.all['folder_list'][exp_i]} folder")
1120
+
1121
+ message = f"{str(self.parent().po.all['global_pathway'])[:6]} ... {self.parent().po.all['folder_list'][exp_i]}"
1122
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][exp_i],
1123
+ self.parent().po.all['folder_list'][exp_i])
1124
+ else:
1125
+ message = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
1126
+ logging.info(f"Use {message} folder")
1127
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
1128
+ return message
1129
+
1130
+ def pre_processing(self):
1131
+ analysis_status = {"continue": True, "message": ""}
1132
+ logging.info("Pre-processing has started")
1133
+ if len(self.parent().po.data_list) > 0:
1134
+ self.parent().po.get_first_image()
1135
+ self.parent().po.fast_image_segmentation(True)
1136
+ self.parent().po.cropping(is_first_image=True)
1137
+ self.parent().po.get_average_pixel_size()
1138
+ try:
1139
+ analysis_status = self.parent().po.delineate_each_arena()
1140
+ except ValueError:
1141
+ analysis_status[
1142
+ "message"] = f"Failed to detect the right cell(s) number: the first image analysis is mandatory."
1143
+ analysis_status["continue"] = False
1144
+
1145
+ if analysis_status["continue"]:
1146
+ self.parent().po.data_to_save['exif'] = True
1147
+ self.parent().po.save_data_to_run_cellects_quickly()
1148
+ self.parent().po.data_to_save['exif'] = False
1149
+ # self.parent().po.extract_exif()
1150
+ self.parent().po.get_background_to_subtract()
1151
+ if len(self.parent().po.vars['analyzed_individuals']) != len(self.parent().po.top):
1152
+ analysis_status["message"] = f"Failed to detect the right cell(s) number: the first image analysis is mandatory."
1153
+ analysis_status["continue"] = False
1154
+ elif self.parent().po.top is None and self.parent().imageanalysiswindow.manual_delineation_flag:
1155
+ analysis_status["message"] = f"Auto video delineation failed, use manual delineation tool"
1156
+ analysis_status["continue"] = False
1157
+ else:
1158
+ self.parent().po.get_origins_and_backgrounds_lists()
1159
+ self.parent().po.get_last_image()
1160
+ self.parent().po.fast_image_segmentation(is_first_image=False)
1161
+ self.parent().po.find_if_lighter_backgnp.round()
1162
+ return analysis_status
1163
+ else:
1164
+ analysis_status["message"] = f"Wrong folder or parameters"
1165
+ analysis_status["continue"] = False
1166
+ return analysis_status
1167
+
1168
+ def run_video_writing(self, message):
1169
+ analysis_status = {"continue": True, "message": ""}
1170
+ look_for_existing_videos = glob('ind_' + '*' + '.npy')
1171
+ there_already_are_videos = len(look_for_existing_videos) == len(self.parent().po.vars['analyzed_individuals'])
1172
+ logging.info(f"{len(look_for_existing_videos)} .npy video files found for {len(self.parent().po.vars['analyzed_individuals'])} arenas to analyze")
1173
+ do_write_videos = not there_already_are_videos or (
1174
+ there_already_are_videos and self.parent().po.all['overwrite_unaltered_videos'])
1175
+ if do_write_videos:
1176
+ logging.info(f"Starting video writing")
1177
+ # self.videos.write_videos_as_np_arrays(self.data_list, self.vars['convert_for_motion'], in_colors=self.vars['save_in_colors'])
1178
+ in_colors = not self.parent().po.vars['already_greyscale']
1179
+ self.parent().po.videos = OneVideoPerBlob(self.parent().po.first_image,
1180
+ self.parent().po.starting_blob_hsize_in_pixels,
1181
+ self.parent().po.all['raw_images'])
1182
+ self.parent().po.videos.left = self.parent().po.left
1183
+ self.parent().po.videos.right = self.parent().po.right
1184
+ self.parent().po.videos.top = self.parent().po.top
1185
+ self.parent().po.videos.bot = self.parent().po.bot
1186
+ self.parent().po.videos.first_image.shape_number = self.parent().po.sample_number
1187
+ bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining = self.parent().po.videos.prepare_video_writing(
1188
+ self.parent().po.data_list, self.parent().po.vars['min_ram_free'], in_colors)
1189
+ if analysis_status["continue"]:
1190
+ # Check that there is enough available RAM for one video par bunch and ROM for all videos
1191
+ if video_nb_per_bunch > 0 and rom_memory_required is None:
1192
+ pat_tracker1 = PercentAndTimeTracker(bunch_nb * self.parent().po.vars['img_number'])
1193
+ pat_tracker2 = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']))
1194
+ arena_percentage = 0
1195
+ is_landscape = self.parent().po.first_image.image.shape[0] < self.parent().po.first_image.image.shape[1]
1196
+ for bunch in np.arange(bunch_nb):
1197
+ # Update the labels of arenas and the video_bunch to write
1198
+ if bunch == (bunch_nb - 1) and remaining > 0:
1199
+ arena = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining)
1200
+ else:
1201
+ arena = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch)
1202
+ if self.parent().po.videos.use_list_of_vid:
1203
+ video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in arena]
1204
+ else:
1205
+ video_bunch = np.zeros(np.append(sizes[0, :], len(arena)), dtype=np.uint8)
1206
+ prev_img = None
1207
+ images_done = bunch * self.parent().po.vars['img_number']
1208
+ for image_i, image_name in enumerate(self.parent().po.data_list):
1209
+ image_percentage, remaining_time = pat_tracker1.get_progress(image_i + images_done)
1210
+ self.message_from_thread.emit(message + f" Step 1/2: Video writing ({np.round((image_percentage + arena_percentage) / 2, 2)}%)")
1211
+ if not os.path.exists(image_name):
1212
+ raise FileNotFoundError(image_name)
1213
+ img = read_and_rotate(image_name, prev_img, self.parent().po.all['raw_images'], is_landscape, self.parent().po.first_image.crop_coord)
1214
+ prev_img = deepcopy(img)
1215
+ if self.parent().po.vars['already_greyscale'] and self.parent().po.reduce_image_dim:
1216
+ img = img[:, :, 0]
1217
+
1218
+ for arena_i, arena_name in enumerate(arena):
1219
+ try:
1220
+ sub_img = img[self.parent().po.top[arena_name]: (self.parent().po.bot[arena_name] + 1),
1221
+ self.parent().po.left[arena_name]: (self.parent().po.right[arena_name] + 1), ...]
1222
+ if self.parent().po.videos.use_list_of_vid:
1223
+ video_bunch[arena_i][image_i, ...] = sub_img
1224
+ else:
1225
+ if len(video_bunch.shape) == 5:
1226
+ video_bunch[image_i, :, :, :, arena_i] = sub_img
1227
+ else:
1228
+ video_bunch[image_i, :, :, arena_i] = sub_img
1229
+ except ValueError:
1230
+ analysis_status["message"] = f"One (or more) image has a different size (restart)"
1231
+ analysis_status["continue"] = False
1232
+ logging.info(f"In the {message} folder: one (or more) image has a different size (restart)")
1233
+ break
1234
+ if not analysis_status["continue"]:
1235
+ break
1236
+ if not analysis_status["continue"]:
1237
+ break
1238
+ if analysis_status["continue"]:
1239
+ for arena_i, arena_name in enumerate(arena):
1240
+ try:
1241
+ arena_percentage, eta = pat_tracker2.get_progress()
1242
+ self.message_from_thread.emit(message + f" Step 1/2: Video writing ({np.round((image_percentage + arena_percentage) / 2, 2)}%)")# , ETA {remaining_time}
1243
+ if self.parent().po.videos.use_list_of_vid:
1244
+ np.save(vid_names[arena_name], video_bunch[arena_i])
1245
+ else:
1246
+ if len(video_bunch.shape) == 5:
1247
+ np.save(vid_names[arena_name], video_bunch[:, :, :, :, arena_i])
1248
+ else:
1249
+ np.save(vid_names[arena_name], video_bunch[:, :, :, arena_i])
1250
+ except OSError:
1251
+ self.message_from_thread.emit(message + f"full disk memory, clear space and retry")
1252
+ logging.info(f"Bunch n°{bunch + 1} over {bunch_nb} saved.")
1253
+ logging.info("When they exist, do not overwrite unaltered video")
1254
+ self.parent().po.all['overwrite_unaltered_videos'] = False
1255
+ self.parent().po.save_variable_dict()
1256
+ self.parent().po.save_data_to_run_cellects_quickly()
1257
+ analysis_status["message"] = f"Video writing complete."
1258
+ if self.parent().po.videos is not None:
1259
+ del self.parent().po.videos
1260
+ return analysis_status
1261
+ else:
1262
+ analysis_status["continue"] = False
1263
+ if video_nb_per_bunch == 0:
1264
+ memory_diff = self.parent().po.update_available_core_nb()
1265
+ ram_message = f"{memory_diff}GB of additional RAM"
1266
+ if rom_memory_required is not None:
1267
+ rom_message = f"at least {rom_memory_required}GB of free ROM"
1268
+
1269
+ if video_nb_per_bunch == 0 and rom_memory_required is not None:
1270
+ analysis_status["message"] = f"Requires {ram_message} and {rom_message} to run"
1271
+ # self.message_from_thread.emit(f"Analyzing {message} requires {ram_message} and {rom_message} to run")
1272
+ elif video_nb_per_bunch == 0:
1273
+ analysis_status["message"] = f"Requires {ram_message} to run"
1274
+ # self.message_from_thread.emit(f"Analyzing {message} requires {ram_message} to run")
1275
+ elif rom_memory_required is not None:
1276
+ analysis_status["message"] = f"Requires {rom_message} to run"
1277
+ # self.message_from_thread.emit(f"Analyzing {message} requires {rom_message} to run")
1278
+ logging.info(f"Cellects is not writing videos: insufficient memory")
1279
+ return analysis_status
1280
+ else:
1281
+ return analysis_status
1282
+
1283
+
1284
+ else:
1285
+ logging.info(f"Cellects is not writing videos: unnecessary")
1286
+ analysis_status["message"] = f"Cellects is not writing videos: unnecessary"
1287
+ return analysis_status
1288
+
1289
+ def run_motion_analysis(self, message):
1290
+ analysis_status = {"continue": True, "message": ""}
1291
+ logging.info(f"Starting motion analysis with the detection method n°{self.parent().po.all['video_option']}")
1292
+ self.parent().po.instantiate_tables()
1293
+ try:
1294
+ memory_diff = self.parent().po.update_available_core_nb()
1295
+ if self.parent().po.cores > 0: # i.e. enough memory
1296
+ if not self.parent().po.all['do_multiprocessing'] or self.parent().po.cores == 1:
1297
+ self.message_from_thread.emit(f"{message} Step 2/2: Video analysis")
1298
+ logging.info("fStarting sequential analysis")
1299
+ tiii = default_timer()
1300
+ pat_tracker = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']))
1301
+ for i, arena in enumerate(self.parent().po.vars['analyzed_individuals']):
1302
+
1303
+ l = [i, arena, self.parent().po.vars, True, True, False, None]
1304
+ # l = [0, 1, self.parent().po.vars, True, False, False, None]
1305
+ analysis_i = MotionAnalysis(l)
1306
+ r = weakref.ref(analysis_i)
1307
+ if not self.parent().po.vars['several_blob_per_arena']:
1308
+ # Save basic statistics
1309
+ self.parent().po.update_one_row_per_arena(i, analysis_i.one_descriptor_per_arena)
1310
+
1311
+
1312
+ # Save descriptors in long_format
1313
+ self.parent().po.update_one_row_per_frame(i * self.parent().po.vars['img_number'], arena * self.parent().po.vars['img_number'], analysis_i.one_row_per_frame)
1314
+
1315
+ # Save cytosol_oscillations
1316
+ if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
1317
+ if self.parent().po.vars['oscilacyto_analysis']:
1318
+ oscil_i = pd.DataFrame(
1319
+ np.c_[np.repeat(arena,
1320
+ analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
1321
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1322
+ if self.parent().po.one_row_per_oscillating_cluster is None:
1323
+ self.parent().po.one_row_per_oscillating_cluster = oscil_i
1324
+ else:
1325
+ self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, oscil_i))
1326
+
1327
+ # Save efficiency visualization
1328
+ self.parent().po.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
1329
+ analysis_i.efficiency_test_2)
1330
+ # Emit message to the interface
1331
+ current_percentage, eta = pat_tracker.get_progress()
1332
+ self.image_from_thread.emit({"current_image": self.parent().po.last_image.bgr,
1333
+ "message": f"{message} Step 2/2: analyzed {arena} out of {len(self.parent().po.vars['analyzed_individuals'])} arenas ({current_percentage}%){eta}"})
1334
+ del analysis_i
1335
+ logging.info(f"Sequential analysis lasted {(default_timer() - tiii)/ 60} minutes")
1336
+ else:
1337
+ self.message_from_thread.emit(
1338
+ f"{message}, Step 2/2: Analyse all videos using {self.parent().po.cores} cores...")
1339
+
1340
+ logging.info("fStarting analysis in parallel")
1341
+
1342
+ # new
1343
+ tiii = default_timer()
1344
+ arena_number = len(self.parent().po.vars['analyzed_individuals'])
1345
+ self.advance = 0
1346
+ self.pat_tracker = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']),
1347
+ core_number=self.parent().po.cores)
1348
+
1349
+ fair_core_workload = arena_number // self.parent().po.cores
1350
+ cores_with_1_more = arena_number % self.parent().po.cores
1351
+ EXTENTS_OF_SUBRANGES = []
1352
+ bound = 0
1353
+ parallel_organization = [fair_core_workload + 1 for _ in range(cores_with_1_more)] + [fair_core_workload for _ in range(self.parent().po.cores - cores_with_1_more)]
1354
+ # Emit message to the interface
1355
+ self.image_from_thread.emit({"current_image": self.parent().po.last_image.bgr,
1356
+ "message": f"{message} Step 2/2: Analysis running on {self.parent().po.cores} CPU cores"})
1357
+ for i, extent_size in enumerate(parallel_organization):
1358
+ EXTENTS_OF_SUBRANGES.append((bound, bound := bound + extent_size))
1359
+
1360
+ try:
1361
+ PROCESSES = []
1362
+ subtotals = Manager().Queue()# Queue()
1363
+ for extent in EXTENTS_OF_SUBRANGES:
1364
+ # print(extent)
1365
+ p = Process(target=motion_analysis_process, args=(extent[0], extent[1], self.parent().po.vars, subtotals))
1366
+ p.start()
1367
+ PROCESSES.append(p)
1368
+
1369
+ for p in PROCESSES:
1370
+ p.join()
1371
+
1372
+ self.message_from_thread.emit(f"{message}, Step 2/2: Saving all results...")
1373
+ for i in range(subtotals.qsize()):
1374
+ grouped_results = subtotals.get()
1375
+ for j, results_i in enumerate(grouped_results):
1376
+ if not self.parent().po.vars['several_blob_per_arena']:
1377
+ # Save basic statistics
1378
+ self.parent().po.update_one_row_per_arena(results_i['i'], results_i['one_row_per_arena'])
1379
+ # Save descriptors in long_format
1380
+ self.parent().po.update_one_row_per_frame(results_i['i'] * self.parent().po.vars['img_number'],
1381
+ results_i['arena'] * self.parent().po.vars['img_number'],
1382
+ results_i['one_row_per_frame'])
1383
+ if not pd.isna(results_i['first_move']):
1384
+ # Save cytosol_oscillations
1385
+ if self.parent().po.vars['oscilacyto_analysis']:
1386
+ if self.parent().po.one_row_per_oscillating_cluster is None:
1387
+ self.parent().po.one_row_per_oscillating_cluster = results_i['one_row_per_oscillating_cluster']
1388
+ else:
1389
+ self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, results_i['one_row_per_oscillating_cluster']))
1390
+
1391
+ # Save efficiency visualization
1392
+ self.parent().po.add_analysis_visualization_to_first_and_last_images(results_i['i'], results_i['efficiency_test_1'],
1393
+ results_i['efficiency_test_2'])
1394
+ self.image_from_thread.emit(
1395
+ {"current_image": self.parent().po.last_image.bgr,
1396
+ "message": f"{message} Step 2/2: analyzed {len(self.parent().po.vars['analyzed_individuals'])} out of {len(self.parent().po.vars['analyzed_individuals'])} arenas ({100}%)"})
1397
+
1398
+ logging.info(f"Parallel analysis lasted {(default_timer() - tiii)/ 60} minutes")
1399
+ except MemoryError:
1400
+ analysis_status["continue"] = False
1401
+ analysis_status["message"] = f"Not enough memory, reduce the core number for parallel analysis"
1402
+ self.message_from_thread.emit(f"Analyzing {message} requires to reduce the core number for parallel analysis")
1403
+ return analysis_status
1404
+ self.parent().po.save_tables()
1405
+ return analysis_status
1406
+ else:
1407
+ analysis_status["continue"] = False
1408
+ analysis_status["message"] = f"Requires an additional {memory_diff}GB of RAM to run"
1409
+ self.message_from_thread.emit(f"Analyzing {message} requires an additional {memory_diff}GB of RAM to run")
1410
+ return analysis_status
1411
+ except MemoryError:
1412
+ analysis_status["continue"] = False
1413
+ analysis_status["message"] = f"Requires additional memory to run"
1414
+ self.message_from_thread.emit(f"Analyzing {message} requires additional memory to run")
1415
+ return analysis_status
1416
+
1417
+
1418
+ def motion_analysis_process(lower_bound: int, upper_bound: int, vars: dict, subtotals: Queue) -> None:
1419
+ grouped_results = []
1420
+ for i in range(lower_bound, upper_bound):
1421
+ analysis_i = MotionAnalysis([i, i + 1, vars, True, True, False, None])
1422
+ r = weakref.ref(analysis_i)
1423
+ results_i = dict()
1424
+ results_i['arena'] = analysis_i.one_descriptor_per_arena['arena']
1425
+ results_i['i'] = analysis_i.one_descriptor_per_arena['arena'] - 1
1426
+ arena = results_i['arena']
1427
+ i = arena - 1
1428
+ if not vars['several_blob_per_arena']:
1429
+ # Save basic statistics
1430
+ results_i['one_row_per_arena'] = analysis_i.one_descriptor_per_arena
1431
+ # Save descriptors in long_format
1432
+ results_i['one_row_per_frame'] = analysis_i.one_row_per_frame
1433
+ # Save cytosol_oscillations
1434
+
1435
+ results_i['first_move'] = analysis_i.one_descriptor_per_arena["first_move"]
1436
+ if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
1437
+ if vars['oscilacyto_analysis']:
1438
+ results_i['clusters_final_data'] = analysis_i.clusters_final_data
1439
+ results_i['one_row_per_oscillating_cluster'] = pd.DataFrame(
1440
+ np.c_[np.repeat(arena, analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
1441
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1442
+ if vars['fractal_analysis']:
1443
+ results_i['fractal_box_sizes'] = pd.DataFrame(analysis_i.fractal_boxes,
1444
+ columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
1445
+
1446
+ # Save efficiency visualization
1447
+ results_i['efficiency_test_1'] = analysis_i.efficiency_test_1
1448
+ results_i['efficiency_test_2'] = analysis_i.efficiency_test_2
1449
+ grouped_results.append(results_i)
1450
+
1451
+ subtotals.put(grouped_results)