cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1464 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Cellects graphical user interface interacts with computational scripts through threads.
4
+ Especially, each thread calls one or several methods of the class named "program_organizer",
5
+ which regroup all available computation of the software.
6
+ These threads are started from a children of WindowType, run methods from program_organizer and send messages and
7
+ results to the corresponding children of WindowType, allowing, for instance, to display a result in the interface.
8
+ """
9
+
10
+ import logging
11
+ import weakref
12
+ from multiprocessing import Queue, Process, Manager
13
+ import os
14
+ import time
15
+ from glob import glob
16
+ from timeit import default_timer
17
+ from copy import deepcopy
18
+ import cv2
19
+ from numba.typed import Dict as TDict
20
+ import numpy as np
21
+ import pandas as pd
22
+ from PySide6 import QtCore
23
+ from cellects.image_analysis.morphological_operations import cross_33, Ellipse
24
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination
25
+ from cellects.utils.load_display_save import read_and_rotate
26
+ from cellects.utils.utilitarian import PercentAndTimeTracker, reduce_path_len
27
+ from cellects.core.one_video_per_blob import OneVideoPerBlob
28
+ from cellects.utils.load_display_save import write_video
29
+ from cellects.core.motion_analysis import MotionAnalysis
30
+
31
+
32
+ class LoadDataToRunCellectsQuicklyThread(QtCore.QThread):
33
+ message_from_thread = QtCore.Signal(str)
34
+
35
+ def __init__(self, parent=None):
36
+ super(LoadDataToRunCellectsQuicklyThread, self).__init__(parent)
37
+ self.setParent(parent)
38
+
39
+ def run(self):
40
+ self.parent().po.look_for_data()
41
+ self.parent().po.load_data_to_run_cellects_quickly()
42
+ if self.parent().po.first_exp_ready_to_run:
43
+ self.message_from_thread.emit("Data found, Video tracking window and Run all directly are available")
44
+ else:
45
+ self.message_from_thread.emit("")
46
+
47
+
48
+ class LookForDataThreadInFirstW(QtCore.QThread):
49
+ def __init__(self, parent=None):
50
+ super(LookForDataThreadInFirstW, self).__init__(parent)
51
+ self.setParent(parent)
52
+
53
+ def run(self):
54
+ self.parent().po.look_for_data()
55
+
56
+
57
+ class LoadFirstFolderIfSeveralThread(QtCore.QThread):
58
+ message_when_thread_finished = QtCore.Signal(bool)
59
+ def __init__(self, parent=None):
60
+ super(LoadFirstFolderIfSeveralThread, self).__init__(parent)
61
+ self.setParent(parent)
62
+
63
+ def run(self):
64
+ self.parent().po.load_data_to_run_cellects_quickly()
65
+ if not self.parent().po.first_exp_ready_to_run:
66
+ self.parent().po.get_first_image()
67
+ self.message_when_thread_finished.emit(self.parent().po.first_exp_ready_to_run)
68
+
69
+
70
+ class GetFirstImThread(QtCore.QThread):
71
+ message_when_thread_finished = QtCore.Signal(bool)
72
+ def __init__(self, parent=None):
73
+ """
74
+ This class read the first image of the (first of the) selected analysis.
75
+ According to the first_detection_frame value,it can be another image
76
+ If this is the first time a first image is read, it also gather the following variables:
77
+ - img_number
78
+ - dims (video dimensions: time, y, x)
79
+ - raw_images (whether images are in a raw format)
80
+ If the selected analysis contains videos instead of images, it opens the first video
81
+ and read the first_detection_frame th image.
82
+ :param parent: An object containing all necessary variables.
83
+ """
84
+ super(GetFirstImThread, self).__init__(parent)
85
+ self.setParent(parent)
86
+
87
+ def run(self):
88
+ self.parent().po.get_first_image()
89
+ self.message_when_thread_finished.emit(True)
90
+
91
+
92
+ class GetLastImThread(QtCore.QThread):
93
+ def __init__(self, parent=None):
94
+ super(GetLastImThread, self).__init__(parent)
95
+ self.setParent(parent)
96
+
97
+ def run(self):
98
+ self.parent().po.get_last_image()
99
+
100
+
101
+ class UpdateImageThread(QtCore.QThread):
102
+ message_when_thread_finished = QtCore.Signal(bool)
103
+
104
+ def __init__(self, parent=None):
105
+ super(UpdateImageThread, self).__init__(parent)
106
+ self.setParent(parent)
107
+
108
+ def run(self):
109
+ # I/ If this thread runs from user input, get the right coordinates
110
+ # and convert them to fit the displayed image size
111
+ user_input = len(self.parent().imageanalysiswindow.saved_coord) > 0 or len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0
112
+ if user_input:
113
+ if len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0:
114
+ idx = self.parent().imageanalysiswindow.temporary_mask_coord
115
+ else:
116
+ idx = self.parent().imageanalysiswindow.saved_coord
117
+ if len(idx) < 2:
118
+ user_input = False
119
+ else:
120
+ # Convert coordinates:
121
+ self.parent().imageanalysiswindow.display_image.update_image_scaling_factors()
122
+ sf = self.parent().imageanalysiswindow.display_image.scaling_factors
123
+ idx = np.array(((np.round(idx[0][0] * sf[0]), np.round(idx[0][1] * sf[1])), (np.round(idx[1][0] * sf[0]), np.round(idx[1][1] * sf[1]))), dtype=np.int64)
124
+ min_y = np.min(idx[:, 0])
125
+ max_y = np.max(idx[:, 0])
126
+ min_x = np.min(idx[:, 1])
127
+ max_x = np.max(idx[:, 1])
128
+ if max_y > self.parent().imageanalysiswindow.drawn_image.shape[0]:
129
+ max_y = self.parent().imageanalysiswindow.drawn_image.shape[0] - 1
130
+ if max_x > self.parent().imageanalysiswindow.drawn_image.shape[1]:
131
+ max_x = self.parent().imageanalysiswindow.drawn_image.shape[1] - 1
132
+ if min_y < 0:
133
+ min_y = 0
134
+ if min_x < 0:
135
+ min_x = 0
136
+
137
+ if len(self.parent().imageanalysiswindow.temporary_mask_coord) == 0:
138
+ # not_load
139
+ # II/ If this thread aims at saving the last user input and displaying all user inputs:
140
+ # Update the drawn_image according to every saved masks
141
+ # 1) The segmentation mask
142
+ # 2) The back_mask and bio_mask
143
+ # 3) The automatically detected video contours
144
+ # (re-)Initialize drawn image
145
+ self.parent().imageanalysiswindow.drawn_image = deepcopy(self.parent().po.current_image)
146
+ if self.parent().imageanalysiswindow.drawn_image.size < 1000000:
147
+ contour_width = 3
148
+ else:
149
+ contour_width = 6
150
+ # 1) The segmentation mask
151
+ logging.info('Add the segmentation mask to the image')
152
+ if self.parent().imageanalysiswindow.is_first_image_flag:
153
+ im_combinations = self.parent().po.first_image.im_combinations
154
+ im_mean = self.parent().po.first_image.image.mean()
155
+ else:
156
+ im_combinations = self.parent().po.last_image.im_combinations
157
+ im_mean = self.parent().po.last_image.bgr.mean()
158
+ # If there are image combinations, get the current corresponding binary image
159
+ if im_combinations is not None and len(im_combinations) != 0:
160
+ binary_idx = im_combinations[self.parent().po.current_combination_id]["binary_image"]
161
+ # If it concerns the last image, only keep the contour coordinates
162
+
163
+ cv2.eroded_binary = cv2.erode(binary_idx, cross_33)
164
+ binary_idx = binary_idx - cv2.eroded_binary
165
+ binary_idx = cv2.dilate(binary_idx, kernel=cross_33, iterations=contour_width)
166
+ binary_idx = np.nonzero(binary_idx)
167
+ # Color these coordinates in magenta on bright images, and in pink on dark images
168
+ if im_mean > 126:
169
+ # logging.info('Color the segmentation mask in magenta')
170
+ self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((20, 0, 150), dtype=np.uint8)
171
+ else:
172
+ # logging.info('Color the segmentation mask in pink')
173
+ self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((94, 0, 213), dtype=np.uint8)
174
+ if user_input:# save
175
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
176
+ if self.parent().imageanalysiswindow.back1_bio2 == 0:
177
+ logging.info("Save the user drawn mask of the current arena")
178
+ if self.parent().po.vars['arena_shape'] == 'circle':
179
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create().astype(np.uint8)
180
+ mask[min_y:max_y, min_x:max_x, ...] = ellipse
181
+ else:
182
+ mask[min_y:max_y, min_x:max_x] = 1
183
+ else:
184
+ logging.info("Save the user drawn mask of Cell or Back")
185
+
186
+ if self.parent().imageanalysiswindow.back1_bio2 == 2:
187
+ if self.parent().po.all['starting_blob_shape'] == 'circle':
188
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create().astype(np.uint8)
189
+ mask[min_y:max_y, min_x:max_x, ...] = ellipse
190
+ else:
191
+ mask[min_y:max_y, min_x:max_x] = 1
192
+ else:
193
+ mask[min_y:max_y, min_x:max_x] = 1
194
+ mask = np.nonzero(mask)
195
+
196
+ if self.parent().imageanalysiswindow.back1_bio2 == 1:
197
+ self.parent().imageanalysiswindow.back_masks_number += 1
198
+ self.parent().imageanalysiswindow.back_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_back_names[0]
199
+ elif self.parent().imageanalysiswindow.back1_bio2 == 2:
200
+ self.parent().imageanalysiswindow.bio_masks_number += 1
201
+ self.parent().imageanalysiswindow.bio_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_bio_names[0]
202
+ elif self.parent().imageanalysiswindow.manual_delineation_flag:
203
+ self.parent().imageanalysiswindow.arena_masks_number += 1
204
+ self.parent().imageanalysiswindow.arena_mask[mask[0], mask[1]] = self.parent().imageanalysiswindow.available_arena_names[0]
205
+ # 2)a) Apply all these masks to the drawn image:
206
+
207
+ back_coord = np.nonzero(self.parent().imageanalysiswindow.back_mask)
208
+
209
+ bio_coord = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
210
+
211
+ if self.parent().imageanalysiswindow.arena_mask is not None:
212
+ arena_coord = np.nonzero(self.parent().imageanalysiswindow.arena_mask)
213
+ self.parent().imageanalysiswindow.drawn_image[arena_coord[0], arena_coord[1], :] = np.repeat(self.parent().po.vars['contour_color'], 3).astype(np.uint8)
214
+
215
+ self.parent().imageanalysiswindow.drawn_image[back_coord[0], back_coord[1], :] = np.array((224, 160, 81), dtype=np.uint8)
216
+
217
+ self.parent().imageanalysiswindow.drawn_image[bio_coord[0], bio_coord[1], :] = np.array((17, 160, 212), dtype=np.uint8)
218
+
219
+ image = self.parent().imageanalysiswindow.drawn_image
220
+ # 3) The automatically detected video contours
221
+ if self.parent().imageanalysiswindow.delineation_done: # add a mask of the video contour
222
+ # logging.info("Draw the delineation mask of each arena")
223
+ for contour_i in range(len(self.parent().po.top)):
224
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
225
+ min_cy = self.parent().po.top[contour_i]
226
+ max_cy = self.parent().po.bot[contour_i]
227
+ min_cx = self.parent().po.left[contour_i]
228
+ max_cx = self.parent().po.right[contour_i]
229
+ text = f"{contour_i + 1}"
230
+ position = (self.parent().po.left[contour_i] + 25, self.parent().po.top[contour_i] + (self.parent().po.bot[contour_i] - self.parent().po.top[contour_i]) // 2)
231
+ image = cv2.putText(image, # numpy array on which text is written
232
+ text, # text
233
+ position, # position at which writing has to start
234
+ cv2.FONT_HERSHEY_SIMPLEX, # font family
235
+ 1, # font size
236
+ (138, 95, 18, 255),
237
+ # (209, 80, 0, 255), # font color
238
+ 2) # font stroke
239
+ if (max_cy - min_cy) < 0 or (max_cx - min_cx) < 0:
240
+ self.parent().imageanalysiswindow.message.setText("Error: the shape number or the detection is wrong")
241
+ if self.parent().po.vars['arena_shape'] == 'circle':
242
+ ellipse = Ellipse((max_cy - min_cy, max_cx - min_cx)).create().astype(np.uint8)
243
+ ellipse = cv2.morphologyEx(ellipse, cv2.MORPH_GRADIENT, cross_33)
244
+ mask[min_cy:max_cy, min_cx:max_cx, ...] = ellipse
245
+ else:
246
+ mask[(min_cy, max_cy), min_cx:max_cx] = 1
247
+ mask[min_cy:max_cy, (min_cx, max_cx)] = 1
248
+ mask = cv2.dilate(mask, kernel=cross_33, iterations=contour_width)
249
+
250
+ mask = np.nonzero(mask)
251
+ image[mask[0], mask[1], :] = np.array((138, 95, 18), dtype=np.uint8)# self.parent().po.vars['contour_color']
252
+
253
+ else: #load
254
+ if user_input:
255
+ # III/ If this thread runs from user input: update the drawn_image according to the current user input
256
+ # Just add the mask to drawn_image as quick as possible
257
+ # Add user defined masks
258
+ # Take the drawn image and add the temporary mask to it
259
+ image = deepcopy(self.parent().imageanalysiswindow.drawn_image)
260
+ if self.parent().imageanalysiswindow.back1_bio2 == 0:
261
+ # logging.info("Dynamic drawing of the arena outline")
262
+ if self.parent().po.vars['arena_shape'] == 'circle':
263
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create()
264
+ ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
265
+ image[min_y:max_y, min_x:max_x, ...] *= (1 - ellipse)
266
+ image[min_y:max_y, min_x:max_x, ...] += ellipse
267
+ else:
268
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
269
+ mask[min_y:max_y, min_x:max_x] = 1
270
+ mask = np.nonzero(mask)
271
+ image[mask[0], mask[1], :] = np.array((0, 0, 0), dtype=np.uint8)
272
+ else:
273
+ # logging.info("Dynamic drawing of Cell or Back")
274
+ if self.parent().imageanalysiswindow.back1_bio2 == 2:
275
+ if self.parent().po.all['starting_blob_shape'] == 'circle':
276
+ ellipse = Ellipse((max_y - min_y, max_x - min_x)).create()
277
+ ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
278
+ image[min_y:max_y, min_x:max_x, ...] *= (1 - ellipse)
279
+ ellipse[:, :, :] *= np.array((17, 160, 212), dtype=np.uint8)
280
+ image[min_y:max_y, min_x:max_x, ...] += ellipse
281
+ else:
282
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
283
+ mask[min_y:max_y, min_x:max_x] = 1
284
+ mask = np.nonzero(mask)
285
+ image[mask[0], mask[1], :] = np.array((17, 160, 212), dtype=np.uint8)
286
+ else:
287
+ mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
288
+ mask[min_y:max_y, min_x:max_x] = 1
289
+ mask = np.nonzero(mask)
290
+ image[mask[0], mask[1], :] = np.array((224, 160, 81), dtype=np.uint8)
291
+
292
+ self.parent().imageanalysiswindow.display_image.update_image(image)
293
+ self.message_when_thread_finished.emit(True)
294
+
295
+
296
+ class FirstImageAnalysisThread(QtCore.QThread):
297
+ message_from_thread = QtCore.Signal(str)
298
+ message_when_thread_finished = QtCore.Signal(bool)
299
+
300
+ def __init__(self, parent=None):
301
+ super(FirstImageAnalysisThread, self).__init__(parent)
302
+ self.setParent(parent)
303
+
304
+ def run(self):
305
+ tic = default_timer()
306
+ biomask = None
307
+ backmask = None
308
+ if self.parent().imageanalysiswindow.bio_masks_number != 0:
309
+ shape_nb, ordered_image = cv2.connectedComponents((self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8))
310
+ shape_nb -= 1
311
+ biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
312
+ else:
313
+ shape_nb = 0
314
+ if self.parent().imageanalysiswindow.back_masks_number != 0:
315
+ backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
316
+ if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2 or shape_nb == self.parent().po.sample_number:
317
+ self.message_from_thread.emit("Image segmentation, wait 30 seconds at most")
318
+ if not self.parent().imageanalysiswindow.asking_first_im_parameters_flag and self.parent().po.all['scale_with_image_or_cells'] == 0 and self.parent().po.all["set_spot_size"]:
319
+ self.parent().po.get_average_pixel_size()
320
+ spot_size = self.parent().po.starting_blob_hsize_in_pixels
321
+ else:
322
+ spot_size = None
323
+ self.parent().po.all["bio_mask"] = biomask
324
+ self.parent().po.all["back_mask"] = backmask
325
+ self.parent().po.fast_image_segmentation(is_first_image=True, biomask=biomask, backmask=backmask, spot_size=spot_size)
326
+ if shape_nb == self.parent().po.sample_number and self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] != self.parent().po.sample_number:
327
+ self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] = shape_nb
328
+ self.parent().po.first_image.shape_number = shape_nb
329
+ self.parent().po.first_image.validated_shapes = (self.parent().imageanalysiswindow.bio_mask > 0).astype(np.uint8)
330
+ self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['binary_image'] = self.parent().po.first_image.validated_shapes
331
+ else:
332
+ self.message_from_thread.emit("Generating analysis options, wait...")
333
+ if self.parent().po.vars["color_number"] > 2:
334
+ kmeans_clust_nb = self.parent().po.vars["color_number"]
335
+ if self.parent().po.carefully:
336
+ self.message_from_thread.emit("Generating analysis options, wait less than 30 minutes")
337
+ else:
338
+ self.message_from_thread.emit("Generating analysis options, a few minutes")
339
+ else:
340
+ kmeans_clust_nb = None
341
+ if self.parent().po.carefully:
342
+ self.message_from_thread.emit("Generating analysis options, wait a few minutes")
343
+ else:
344
+ self.message_from_thread.emit("Generating analysis options, around 1 minute")
345
+ if self.parent().imageanalysiswindow.asking_first_im_parameters_flag:
346
+ self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
347
+ several_blob_per_arena=None,
348
+ spot_shape=None, spot_size=None,
349
+ kmeans_clust_nb=kmeans_clust_nb,
350
+ biomask=self.parent().po.all["bio_mask"],
351
+ backmask=self.parent().po.all["back_mask"],
352
+ color_space_dictionaries=None,
353
+ carefully=self.parent().po.carefully)
354
+ else:
355
+ if self.parent().po.all['scale_with_image_or_cells'] == 0:
356
+ self.parent().po.get_average_pixel_size()
357
+ else:
358
+ self.parent().po.starting_blob_hsize_in_pixels = None
359
+ self.parent().po.first_image.find_first_im_csc(sample_number=self.parent().po.sample_number,
360
+ several_blob_per_arena=self.parent().po.vars['several_blob_per_arena'],
361
+ spot_shape=self.parent().po.all['starting_blob_shape'],
362
+ spot_size=self.parent().po.starting_blob_hsize_in_pixels,
363
+ kmeans_clust_nb=kmeans_clust_nb,
364
+ biomask=self.parent().po.all["bio_mask"],
365
+ backmask=self.parent().po.all["back_mask"],
366
+ color_space_dictionaries=None,
367
+ carefully=self.parent().po.carefully)
368
+
369
+ logging.info(f" image analysis lasted {default_timer() - tic} secondes")
370
+ logging.info(f" image analysis lasted {np.round((default_timer() - tic) / 60)} minutes")
371
+ self.message_when_thread_finished.emit(True)
372
+
373
+
374
+ class LastImageAnalysisThread(QtCore.QThread):
375
+ message_from_thread = QtCore.Signal(str)
376
+ message_when_thread_finished = QtCore.Signal(bool)
377
+
378
+ def __init__(self, parent=None):
379
+ super(LastImageAnalysisThread, self).__init__(parent)
380
+ self.setParent(parent)
381
+
382
+ def run(self):
383
+ self.parent().po.cropping(False)
384
+ self.parent().po.get_background_to_subtract()
385
+ biomask = None
386
+ backmask = None
387
+ if self.parent().imageanalysiswindow.bio_masks_number != 0:
388
+ biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
389
+ if self.parent().imageanalysiswindow.back_masks_number != 0:
390
+ backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
391
+ if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2:
392
+ self.message_from_thread.emit("Image segmentation, wait...")
393
+ self.parent().po.fast_image_segmentation(is_first_image=False, biomask=biomask, backmask=backmask)
394
+ else:
395
+ self.message_from_thread.emit("Generating analysis options, wait...")
396
+ if self.parent().po.vars['several_blob_per_arena']:
397
+ concomp_nb = [self.parent().po.sample_number, self.parent().po.first_image.size // 50]
398
+ max_shape_size = .75 * self.parent().po.first_image.size
399
+ total_surfarea = .99 * self.parent().po.first_image.size
400
+ else:
401
+ concomp_nb = [self.parent().po.sample_number, self.parent().po.sample_number * 200]
402
+ if self.parent().po.all['are_zigzag'] == "columns":
403
+ inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.y_boundaries)))
404
+ elif self.parent().po.all['are_zigzag'] == "rows":
405
+ inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
406
+ else:
407
+ dist1 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.y_boundaries)))
408
+ dist2 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
409
+ inter_dist = np.max(dist1, dist2)
410
+ if self.parent().po.all['starting_blob_shape'] == "circle":
411
+ max_shape_size = np.pi * np.square(inter_dist)
412
+ else:
413
+ max_shape_size = np.square(2 * inter_dist)
414
+ total_surfarea = max_shape_size * self.parent().po.sample_number
415
+ out_of_arenas = None
416
+ if self.parent().po.all['are_gravity_centers_moving'] != 1:
417
+ out_of_arenas = np.ones_like(self.parent().po.videos.first_image.validated_shapes)
418
+ for blob_i in np.arange(len(self.parent().po.vars['analyzed_individuals'])):
419
+ out_of_arenas[self.parent().po.top[blob_i]: (self.parent().po.bot[blob_i] + 1),
420
+ self.parent().po.left[blob_i]: (self.parent().po.right[blob_i] + 1)] = 0
421
+ ref_image = self.parent().po.first_image.validated_shapes
422
+ self.parent().po.first_image.generate_subtract_background(self.parent().po.vars['convert_for_motion'])
423
+ kmeans_clust_nb = None
424
+ self.parent().po.last_image.find_last_im_csc(concomp_nb, total_surfarea, max_shape_size, out_of_arenas,
425
+ ref_image, self.parent().po.first_image.subtract_background,
426
+ kmeans_clust_nb, biomask, backmask, color_space_dictionaries=None,
427
+ carefully=self.parent().po.carefully)
428
+ self.message_when_thread_finished.emit(True)
429
+
430
+
431
+ class CropScaleSubtractDelineateThread(QtCore.QThread):
432
+ message_from_thread = QtCore.Signal(str)
433
+ message_when_thread_finished = QtCore.Signal(str)
434
+
435
+ def __init__(self, parent=None):
436
+ super(CropScaleSubtractDelineateThread, self).__init__(parent)
437
+ self.setParent(parent)
438
+
439
+ def run(self):
440
+ logging.info("Start cropping if required")
441
+ self.parent().po.cropping(is_first_image=True)
442
+ self.parent().po.cropping(is_first_image=False)
443
+ self.parent().po.get_average_pixel_size()
444
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
445
+ os.remove('Data to run Cellects quickly.pkl')
446
+ logging.info("Save data to run Cellects quickly")
447
+ self.parent().po.data_to_save['first_image'] = True
448
+ self.parent().po.save_data_to_run_cellects_quickly()
449
+ self.parent().po.data_to_save['first_image'] = False
450
+ if not self.parent().po.vars['several_blob_per_arena']:
451
+ logging.info("Check whether the detected shape number is ok")
452
+ nb, shapes, stats, centroids = cv2.connectedComponentsWithStats(self.parent().po.first_image.validated_shapes)
453
+ y_lim = self.parent().po.first_image.y_boundaries
454
+ if ((nb - 1) != self.parent().po.sample_number or np.any(stats[:, 4] == 1)):
455
+ self.message_from_thread.emit("Image analysis failed to detect the right cell(s) number: restart the analysis.")
456
+ elif len(np.nonzero(y_lim == - 1)) != len(np.nonzero(y_lim == 1)):
457
+ self.message_from_thread.emit("Automatic arena delineation cannot work if one cell touches the image border.")
458
+ self.parent().po.first_image.y_boundaries = None
459
+ else:
460
+ logging.info("Start automatic video delineation")
461
+ analysis_status = self.parent().po.delineate_each_arena()
462
+ self.message_when_thread_finished.emit(analysis_status["message"])
463
+ else:
464
+ logging.info("Start automatic video delineation")
465
+ analysis_status = self.parent().po.delineate_each_arena()
466
+ self.message_when_thread_finished.emit(analysis_status["message"])
467
+
468
+
469
+ class SaveManualDelineationThread(QtCore.QThread):
470
+
471
+ def __init__(self, parent=None):
472
+ super(SaveManualDelineationThread, self).__init__(parent)
473
+ self.setParent(parent)
474
+
475
+ def run(self):
476
+ self.parent().po.left = np.arange(self.parent().po.sample_number)
477
+ self.parent().po.right = np.arange(self.parent().po.sample_number)
478
+ self.parent().po.top = np.arange(self.parent().po.sample_number)
479
+ self.parent().po.bot = np.arange(self.parent().po.sample_number)
480
+ for arena in np.arange(1, self.parent().po.sample_number + 1):
481
+ y, x = np.nonzero(self.parent().imageanalysiswindow.arena_mask == arena)
482
+ self.parent().po.left[arena - 1] = np.min(x)
483
+ self.parent().po.right[arena - 1] = np.max(x)
484
+ self.parent().po.top[arena - 1] = np.min(y)
485
+ self.parent().po.bot[arena - 1] = np.max(y)
486
+
487
+ logging.info("Save data to run Cellects quickly")
488
+ self.parent().po.data_to_save['coordinates'] = True
489
+ self.parent().po.save_data_to_run_cellects_quickly()
490
+ self.parent().po.data_to_save['coordinates'] = False
491
+
492
+ logging.info("Save manual video delineation")
493
+ self.parent().po.vars['analyzed_individuals'] = np.arange(self.parent().po.sample_number) + 1
494
+ self.parent().po.videos = OneVideoPerBlob(self.parent().po.first_image, self.parent().po.starting_blob_hsize_in_pixels, self.parent().po.all['raw_images'])
495
+ self.parent().po.videos.left = self.parent().po.left
496
+ self.parent().po.videos.right = self.parent().po.right
497
+ self.parent().po.videos.top = self.parent().po.top
498
+ self.parent().po.videos.bot = self.parent().po.bot
499
+
500
+
501
+ class GetExifDataThread(QtCore.QThread):
502
+
503
+ def __init__(self, parent=None):
504
+ super(GetExifDataThread, self).__init__(parent)
505
+ self.setParent(parent)
506
+
507
+ def run(self):
508
+ self.parent().po.extract_exif()
509
+
510
+
511
+ class FinalizeImageAnalysisThread(QtCore.QThread):
512
+
513
+ def __init__(self, parent=None):
514
+ super(FinalizeImageAnalysisThread, self).__init__(parent)
515
+ self.setParent(parent)
516
+
517
+ def run(self):
518
+ self.parent().po.get_background_to_subtract()
519
+
520
+ self.parent().po.get_origins_and_backgrounds_lists()
521
+
522
+ if self.parent().po.last_image is None:
523
+ self.parent().po.get_last_image()
524
+ self.parent().po.fast_image_segmentation(False)
525
+ self.parent().po.find_if_lighter_background()
526
+ logging.info("The current (or the first) folder is ready to run")
527
+ self.parent().po.first_exp_ready_to_run = True
528
+ self.parent().po.data_to_save['coordinates'] = True
529
+ self.parent().po.data_to_save['exif'] = True
530
+ self.parent().po.save_data_to_run_cellects_quickly()
531
+ self.parent().po.data_to_save['coordinates'] = False
532
+ self.parent().po.data_to_save['exif'] = False
533
+
534
+
535
+ class SaveAllVarsThread(QtCore.QThread):
536
+
537
+ def __init__(self, parent=None):
538
+ super(SaveAllVarsThread, self).__init__(parent)
539
+ self.setParent(parent)
540
+
541
+ def run(self):
542
+ self.parent().po.save_variable_dict()
543
+
544
+ #self.parent().po.all['global_pathway']
545
+ #os.getcwd()
546
+
547
+ self.set_current_folder()
548
+ self.parent().po.save_data_to_run_cellects_quickly(new_one_if_does_not_exist=False)
549
+ #if os.access(f"", os.R_OK):
550
+ # self.parent().po.save_data_to_run_cellects_quickly()
551
+ #else:
552
+ # logging.error(f"No permission access to write in {os.getcwd()}")
553
+
554
+ def set_current_folder(self):
555
+ if self.parent().po.all['folder_number'] > 1: # len(self.parent().po.all['folder_list']) > 1: # len(self.parent().po.all['folder_list']) > 0:
556
+ logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
557
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
558
+ self.parent().po.all['folder_list'][0])
559
+ else:
560
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
561
+ logging.info(f"Use {curr_path} folder")
562
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
563
+
564
+
565
+ class OneArenaThread(QtCore.QThread):
566
+ message_from_thread_starting = QtCore.Signal(str)
567
+ image_from_thread = QtCore.Signal(dict)
568
+ when_loading_finished = QtCore.Signal(bool)
569
+ when_detection_finished = QtCore.Signal(str)
570
+
571
+ def __init__(self, parent=None):
572
+ super(OneArenaThread, self).__init__(parent)
573
+ self.setParent(parent)
574
+ self._isRunning = False
575
+
576
+ def run(self):
577
+ continue_analysis = True
578
+ self._isRunning = True
579
+ self.message_from_thread_starting.emit("Video loading, wait...")
580
+
581
+ self.set_current_folder() #DOIT être fait à chaque écriture
582
+ print(self.parent().po.vars['convert_for_motion'])
583
+ if not self.parent().po.first_exp_ready_to_run:
584
+ self.parent().po.load_data_to_run_cellects_quickly()
585
+ if not self.parent().po.first_exp_ready_to_run:
586
+ #Need a look for data when Data to run Cellects quickly.pkl and 1 folder selected amon several
587
+ continue_analysis = self.pre_processing()
588
+ if continue_analysis:
589
+ print(self.parent().po.vars['convert_for_motion'])
590
+ memory_diff = self.parent().po.update_available_core_nb()
591
+ if self.parent().po.cores == 0:
592
+ self.message_from_thread_starting.emit(f"Analyzing one arena requires {memory_diff}GB of additional RAM to run")
593
+ else:
594
+ if self.parent().po.motion is None or self.parent().po.load_quick_full == 0:
595
+ self.load_one_arena()
596
+ if self.parent().po.load_quick_full > 0:
597
+ if self.parent().po.motion.start is not None:
598
+ logging.info("One arena detection has started")
599
+ self.detection()
600
+ if self.parent().po.load_quick_full > 1:
601
+ logging.info("One arena post-processing has started")
602
+ self.post_processing()
603
+ else:
604
+ self.when_detection_finished.emit("Detection done, read to see the result")
605
+ else:
606
+ self.message_from_thread_starting.emit(f"The current parameters failed to detect the cell(s) motion")
607
+
608
+ def stop(self):
609
+ self._isRunning = False
610
+
611
+ def set_current_folder(self):
612
+ # if isinstance(self.parent().po.all['sample_number_per_folder'], int):
613
+ # self.parent().po.all['folder_number'] = 1
614
+ # self.parent().po.look_for_data()
615
+ if self.parent().po.all['folder_number'] > 1: # len(self.parent().po.all['folder_list']) > 1: # len(self.parent().po.all['folder_list']) > 0:
616
+ logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
617
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
618
+ self.parent().po.all['folder_list'][0])
619
+ else:
620
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
621
+ logging.info(f"Use {curr_path} folder")
622
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
623
+ # logging.info("Look for images/videos data")
624
+ # self.parent().po.look_for_data()
625
+ # if len(self.parent().po.all['folder_list']) > 1: # len(self.parent().po.all['folder_list']) > 0:
626
+ # logging.info("Update sub-folder")
627
+ # self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
628
+ # self.parent().po.all['folder_list'][0])
629
+ # else:
630
+ # self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
631
+
632
+ def pre_processing(self):
633
+ logging.info("Pre-processing has started")
634
+ analysis_status = {"continue": True, "message": ""}
635
+ # Thinds to save and load here:
636
+ # situations : 1° on a tout fait et osed de succeed to load
637
+ # if not self.parent().po.first_exp_ready_to_run réglé
638
+ # 2° on a rien fait et succeed
639
+ # 3° on a rien fait et non succeed
640
+ # 4° Ce dosser n'est pas le premier'
641
+ # if self.parent().po.succeed_to_data_to_run_cellects_quickly:
642
+ # self.message_from_thread_starting.emit(f"Do image analysis first, by clicking Next on the first window")
643
+ # if not self.parent().po.all['overwrite_cellects_data'] and os.path.isfile(f'Data to run Cellects quickly.pkl'):
644
+ # success = self.parent().po.load_data_to_run_cellects_quickly()
645
+ # if not success:
646
+ # self.message_from_thread_starting.emit(f"Do image analysis first, by clicking Next on the first window")
647
+ # else:
648
+
649
+ self.parent().po.get_first_image()
650
+ self.parent().po.fast_image_segmentation(is_first_image=True)
651
+ if len(self.parent().po.vars['analyzed_individuals']) != self.parent().po.first_image.shape_number:
652
+ self.message_from_thread_starting.emit(f"Wrong specimen number: (re)do the complete analysis.")
653
+ analysis_status["continue"] = False
654
+ else:
655
+ self.parent().po.cropping(is_first_image=True)
656
+ self.parent().po.get_average_pixel_size()
657
+ analysis_status = self.parent().po.delineate_each_arena()
658
+ if not analysis_status["continue"]:
659
+ self.message_from_thread_starting.emit(analysis_status["message"])
660
+ logging.error(analysis_status['message'])
661
+ else:
662
+ self.parent().po.data_to_save['exif'] = True
663
+ self.parent().po.save_data_to_run_cellects_quickly()
664
+ self.parent().po.data_to_save['exif'] = False
665
+ # self.parent().po.extract_exif()
666
+ self.parent().po.get_background_to_subtract()
667
+ if len(self.parent().po.vars['analyzed_individuals']) != len(self.parent().po.top):
668
+ self.message_from_thread_starting.emit(f"Wrong specimen number: (re)do the complete analysis.")
669
+ analysis_status["continue"] = False
670
+ else:
671
+ self.parent().po.get_origins_and_backgrounds_lists()
672
+ self.parent().po.get_last_image()
673
+ self.parent().po.fast_image_segmentation(False)
674
+ # self.parent().po.type_csc_dict()
675
+ self.parent().po.find_if_lighter_backgnp.round()
676
+ logging.info("The current (or the first) folder is ready to run")
677
+ self.parent().po.first_exp_ready_to_run = True
678
+ return analysis_status["continue"]
679
+
680
+ def load_one_arena(self):
681
+ arena = self.parent().po.all['arena']
682
+ i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
683
+ save_loaded_video: bool = False
684
+ if not os.path.isfile(f'ind_{arena}.npy') or self.parent().po.all['overwrite_unaltered_videos']:
685
+ logging.info(f"Starting to load arena n°{arena} from images")
686
+ add_to_c = 1
687
+ self.parent().po.one_arenate_done = True
688
+ i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
689
+ if self.parent().po.vars['lose_accuracy_to_save_memory']:
690
+ self.parent().po.converted_video = np.zeros(
691
+ (len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
692
+ dtype=np.uint8)
693
+ else:
694
+ self.parent().po.converted_video = np.zeros(
695
+ (len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
696
+ dtype=float)
697
+ if not self.parent().po.vars['already_greyscale']:
698
+ self.parent().po.visu = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
699
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c, 3), dtype=np.uint8)
700
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
701
+ if self.parent().po.vars['lose_accuracy_to_save_memory']:
702
+ self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
703
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=np.uint8)
704
+ else:
705
+ self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
706
+ self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=float)
707
+ first_dict = TDict()
708
+ second_dict = TDict()
709
+ c_spaces = []
710
+ for k, v in self.parent().po.vars['convert_for_motion'].items():
711
+ if k != 'logical' and v.sum() > 0:
712
+ if k[-1] != '2':
713
+ first_dict[k] = v
714
+ c_spaces.append(k)
715
+ else:
716
+ second_dict[k[:-1]] = v
717
+ c_spaces.append(k[:-1])
718
+ prev_img = None
719
+ background = None
720
+ background2 = None
721
+ pat_tracker = PercentAndTimeTracker(self.parent().po.vars['img_number'])
722
+ for image_i, image_name in enumerate(self.parent().po.data_list):
723
+ current_percentage, eta = pat_tracker.get_progress()
724
+ is_landscape = self.parent().po.first_image.image.shape[0] < self.parent().po.first_image.image.shape[1]
725
+ img = read_and_rotate(image_name, prev_img, self.parent().po.all['raw_images'], is_landscape)
726
+ # img = self.parent().po.videos.read_and_rotate(image_name, prev_img)
727
+ prev_img = deepcopy(img)
728
+ if self.parent().po.first_image.cropped:
729
+ img = img[self.parent().po.first_image.crop_coord[0]:self.parent().po.first_image.crop_coord[1],
730
+ self.parent().po.first_image.crop_coord[2]:self.parent().po.first_image.crop_coord[3], :]
731
+ img = img[self.parent().po.top[arena - 1]: (self.parent().po.bot[arena - 1] + add_to_c),
732
+ self.parent().po.left[arena - 1]: (self.parent().po.right[arena - 1] + add_to_c), :]
733
+
734
+ self.image_from_thread.emit({"message": f"Video loading: {current_percentage}%{eta}", "current_image": img})
735
+ if self.parent().po.vars['already_greyscale']:
736
+ if self.parent().po.reduce_image_dim:
737
+ self.parent().po.converted_video[image_i, ...] = img[:, :, 0]
738
+ else:
739
+ self.parent().po.converted_video[image_i, ...] = img
740
+ else:
741
+ self.parent().po.visu[image_i, ...] = img
742
+
743
+ if self.parent().po.vars['subtract_background']:
744
+ background = self.parent().po.vars['background_list'][i]
745
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
746
+ background2 = self.parent().po.vars['background_list2'][i]
747
+ greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces,
748
+ first_dict,
749
+ second_dict,background,background2,
750
+ self.parent().po.vars[
751
+ 'lose_accuracy_to_save_memory'])
752
+ self.parent().po.converted_video[image_i, ...] = greyscale_image
753
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
754
+ self.parent().po.converted_video2[image_i, ...] = greyscale_image2
755
+
756
+
757
+
758
+ # csc = OneImageAnalysis(img)
759
+ # if self.parent().po.vars['subtract_background']:
760
+ # if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
761
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
762
+ # self.parent().po.vars['background_list'][i],
763
+ # self.parent().po.vars['background_list2'][i])
764
+ # else:
765
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
766
+ # self.parent().po.vars['background_list'][i], None)
767
+ # else:
768
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
769
+ # # self.parent().po.converted_video[image_i, ...] = csc.image
770
+ # if self.parent().po.vars['lose_accuracy_to_save_memory']:
771
+ # self.parent().po.converted_video[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image)
772
+ # else:
773
+ # self.parent().po.converted_video[image_i, ...] = csc.image
774
+ # if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
775
+ # if self.parent().po.vars['lose_accuracy_to_save_memory']:
776
+ # self.parent().po.converted_video2[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image2)
777
+ # else:
778
+ # self.parent().po.converted_video2[image_i, ...] = csc.image2
779
+
780
+
781
+
782
+ # self.parent().po.load_one_arena(arena)
783
+ save_loaded_video = True
784
+ if self.parent().po.vars['already_greyscale']:
785
+ self.videos_in_ram = self.parent().po.converted_video
786
+ else:
787
+ if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
788
+ self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video)]
789
+ else:
790
+ self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video), deepcopy(self.parent().po.converted_video2)]
791
+
792
+ # videos = [self.parent().po.video.copy(), self.parent().po.converted_video.copy()]
793
+ else:
794
+ logging.info(f"Starting to load arena n°{arena} from .npy saved file")
795
+ self.videos_in_ram = None
796
+ l = [i, arena, self.parent().po.vars, False, False, False, self.videos_in_ram]
797
+ self.parent().po.motion = MotionAnalysis(l)
798
+ r = weakref.ref(self.parent().po.motion)
799
+
800
+ if self.videos_in_ram is None:
801
+ self.parent().po.converted_video = deepcopy(self.parent().po.motion.converted_video)
802
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
803
+ self.parent().po.converted_video2 = deepcopy(self.parent().po.motion.converted_video2)
804
+ self.parent().po.motion.get_origin_shape()
805
+
806
+ if self.parent().po.motion.dims[0] >= 40:
807
+ step = self.parent().po.motion.dims[0] // 20
808
+ else:
809
+ step = 1
810
+ if self.parent().po.motion.start >= (self.parent().po.motion.dims[0] - step - 1):
811
+ self.parent().po.motion.start = None
812
+ else:
813
+ self.parent().po.motion.get_covering_duration(step)
814
+ self.when_loading_finished.emit(save_loaded_video)
815
+
816
+ if self.parent().po.motion.visu is None:
817
+ visu = self.parent().po.motion.converted_video
818
+ visu -= np.min(visu)
819
+ visu = 255 * (visu / np.max(visu))
820
+ visu = np.round(visu).astype(np.uint8)
821
+ if len(visu.shape) == 3:
822
+ visu = np.stack((visu, visu, visu), axis=3)
823
+ self.parent().po.motion.visu = visu
824
+
825
+ def detection(self):
826
+ self.message_from_thread_starting.emit(f"Quick video segmentation")
827
+ self.parent().po.motion.converted_video = deepcopy(self.parent().po.converted_video)
828
+ if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
829
+ self.parent().po.motion.converted_video2 = deepcopy(self.parent().po.converted_video2)
830
+ # self.parent().po.motion.detection(compute_all_possibilities=True)
831
+ self.parent().po.motion.detection(compute_all_possibilities=self.parent().po.all['compute_all_options'])
832
+ if self.parent().po.all['compute_all_options']:
833
+ self.parent().po.computed_video_options = np.ones(5, bool)
834
+ else:
835
+ self.parent().po.computed_video_options = np.zeros(5, bool)
836
+ self.parent().po.computed_video_options[self.parent().po.all['video_option']] = True
837
+ # if self.parent().po.vars['color_number'] > 2:
838
+
839
+ def post_processing(self):
840
+ self.parent().po.motion.smoothed_video = None
841
+ # if self.parent().po.vars['already_greyscale']:
842
+ # if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
843
+ # self.videos_in_ram = self.parent().po.converted_video
844
+ # else:
845
+ # self.videos_in_ram = self.parent().po.converted_video, self.parent().po.converted_video2
846
+ # else:
847
+ # if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
848
+ # videos_in_ram = self.parent().po.visu, self.parent().po.converted_video
849
+ # else:
850
+ # videos_in_ram = self.parent().po.visu, self.parent().po.converted_video, \
851
+ # self.parent().po.converted_video2
852
+
853
+ if self.parent().po.vars['color_number'] > 2:
854
+ analyses_to_compute = [0]
855
+ else:
856
+ if self.parent().po.all['compute_all_options']:
857
+ analyses_to_compute = np.arange(5)
858
+ else:
859
+ logging.info(f"option: {self.parent().po.all['video_option']}")
860
+ analyses_to_compute = [self.parent().po.all['video_option']]
861
+ time_parameters = [self.parent().po.motion.start, self.parent().po.motion.step,
862
+ self.parent().po.motion.lost_frames, self.parent().po.motion.substantial_growth]
863
+
864
+ args = [self.parent().po.all['arena'] - 1, self.parent().po.all['arena'], self.parent().po.vars,
865
+ False, False, False, self.videos_in_ram]
866
+ if self.parent().po.vars['do_fading']:
867
+ self.parent().po.newly_explored_area = np.zeros((self.parent().po.motion.dims[0], 5), np.unp.int64)
868
+ for seg_i in analyses_to_compute:
869
+ analysis_i = MotionAnalysis(args)
870
+ r = weakref.ref(analysis_i)
871
+ analysis_i.segmentation = np.zeros(analysis_i.converted_video.shape[:3], dtype=np.uint8)
872
+ if self.parent().po.all['compute_all_options']:
873
+ if seg_i == 0:
874
+ analysis_i.segmentation = self.parent().po.motion.segmentation
875
+ else:
876
+ if seg_i == 1:
877
+ mask = self.parent().po.motion.luminosity_segmentation
878
+ elif seg_i == 2:
879
+ mask = self.parent().po.motion.gradient_segmentation
880
+ elif seg_i == 3:
881
+ mask = self.parent().po.motion.logical_and
882
+ elif seg_i == 4:
883
+ mask = self.parent().po.motion.logical_or
884
+ analysis_i.segmentation[mask[0], mask[1], mask[2]] = 1
885
+ else:
886
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
887
+ analysis_i.segmentation = self.parent().po.motion.segmentation
888
+
889
+ analysis_i.start = time_parameters[0]
890
+ analysis_i.step = time_parameters[1]
891
+ analysis_i.lost_frames = time_parameters[2]
892
+ analysis_i.substantial_growth = time_parameters[3]
893
+ analysis_i.origin_idx = self.parent().po.motion.origin_idx
894
+ analysis_i.initialize_post_processing()
895
+ analysis_i.t = analysis_i.start
896
+ # print_progress = ForLoopCounter(self.start)
897
+
898
+ while self._isRunning and analysis_i.t < analysis_i.binary.shape[0]:
899
+ # analysis_i.update_shape(True)
900
+ analysis_i.update_shape(False)
901
+ contours = np.nonzero(
902
+ cv2.morphologyEx(analysis_i.binary[analysis_i.t - 1, :, :], cv2.MORPH_GRADIENT, cross_33))
903
+ current_image = deepcopy(self.parent().po.motion.visu[analysis_i.t - 1, :, :, :])
904
+ current_image[contours[0], contours[1], :] = self.parent().po.vars['contour_color']
905
+ self.image_from_thread.emit(
906
+ {"message": f"Tracking option n°{seg_i + 1}. Image number: {analysis_i.t - 1}",
907
+ "current_image": current_image})
908
+ if analysis_i.start is None:
909
+ analysis_i.binary = np.repeat(np.expand_dims(analysis_i.origin, 0),
910
+ analysis_i.converted_video.shape[0], axis=0)
911
+ if self.parent().po.vars['color_number'] > 2:
912
+ self.message_from_thread_starting.emit(
913
+ f"Failed to detect motion. Redo image analysis (with only 2 colors?)")
914
+ else:
915
+ self.message_from_thread_starting.emit(f"Tracking option n°{seg_i + 1} failed to detect motion")
916
+
917
+ if self.parent().po.all['compute_all_options']:
918
+ if seg_i == 0:
919
+ self.parent().po.motion.segmentation = analysis_i.binary
920
+ elif seg_i == 1:
921
+ self.parent().po.motion.luminosity_segmentation = np.nonzero(analysis_i.binary)
922
+ elif seg_i == 2:
923
+ self.parent().po.motion.gradient_segmentation = np.nonzero(analysis_i.binary)
924
+ elif seg_i == 3:
925
+ self.parent().po.motion.logical_and = np.nonzero(analysis_i.binary)
926
+ elif seg_i == 4:
927
+ self.parent().po.motion.logical_or = np.nonzero(analysis_i.binary)
928
+ else:
929
+ self.parent().po.motion.segmentation = analysis_i.binary
930
+
931
+ # self.message_from_thread_starting.emit("If there are problems, change some parameters and try again")
932
+ self.when_detection_finished.emit("Post processing done, read to see the result")
933
+
934
+
935
+
936
+ class VideoReaderThread(QtCore.QThread):
937
+ message_from_thread = QtCore.Signal(dict)
938
+
939
+ def __init__(self, parent=None):
940
+ super(VideoReaderThread, self).__init__(parent)
941
+ self.setParent(parent)
942
+
943
+ def run(self):
944
+ video_analysis = deepcopy(self.parent().po.motion.visu)
945
+ self.message_from_thread.emit(
946
+ {"current_image": video_analysis[0, ...], "message": f"Video preparation, wait..."})
947
+ if self.parent().po.load_quick_full > 0:
948
+
949
+ if self.parent().po.all['compute_all_options']:
950
+ if self.parent().po.all['video_option'] == 0:
951
+ video_mask = self.parent().po.motion.segmentation
952
+ else:
953
+ if self.parent().po.all['video_option'] == 1:
954
+ mask = self.parent().po.motion.luminosity_segmentation
955
+ elif self.parent().po.all['video_option'] == 2:
956
+ mask = self.parent().po.motion.gradient_segmentation
957
+ elif self.parent().po.all['video_option'] == 3:
958
+ mask = self.parent().po.motion.logical_and
959
+ elif self.parent().po.all['video_option'] == 4:
960
+ mask = self.parent().po.motion.logical_or
961
+ video_mask = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
962
+ video_mask[mask[0], mask[1], mask[2]] = 1
963
+ else:
964
+ video_mask = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
965
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
966
+ video_mask = self.parent().po.motion.segmentation
967
+
968
+ if self.parent().po.load_quick_full == 1:
969
+ video_mask = np.cumsum(video_mask.astype(np.uint32), axis=0)
970
+ video_mask[video_mask > 0] = 1
971
+ video_mask = video_mask.astype(np.uint8)
972
+ logging.info(f"sum: {video_mask.sum()}")
973
+ # timings = genfromtxt("timings.csv")
974
+ for t in np.arange(self.parent().po.motion.dims[0]):
975
+ mask = cv2.morphologyEx(video_mask[t, ...], cv2.MORPH_GRADIENT, cross_33)
976
+ mask = np.stack((mask, mask, mask), axis=2)
977
+ # current_image[current_image > 0] = self.parent().po.vars['contour_color']
978
+ current_image = deepcopy(video_analysis[t, ...])
979
+ current_image[mask > 0] = self.parent().po.vars['contour_color']
980
+ self.message_from_thread.emit(
981
+ {"current_image": current_image, "message": f"Reading in progress... Image number: {t}"}) #, "time": timings[t]
982
+ time.sleep(1 / 50)
983
+ self.message_from_thread.emit({"current_image": current_image, "message": ""})#, "time": timings[t]
984
+
985
+
986
+ class ChangeOneRepResultThread(QtCore.QThread):
987
+ message_from_thread = QtCore.Signal(str)
988
+
989
+ def __init__(self, parent=None):
990
+ super(ChangeOneRepResultThread, self).__init__(parent)
991
+ self.setParent(parent)
992
+
993
+ def run(self):
994
+ self.message_from_thread.emit(
995
+ f"Arena n°{self.parent().po.all['arena']}: modifying its results...")
996
+ # self.parent().po.motion2 = deepcopy(self.parent().po.motion)
997
+ if self.parent().po.motion.start is None:
998
+ self.parent().po.motion.binary = np.repeat(np.expand_dims(self.parent().po.motion.origin, 0),
999
+ self.parent().po.motion.converted_video.shape[0], axis=0).astype(np.uint8)
1000
+ else:
1001
+ if self.parent().po.all['compute_all_options']:
1002
+ if self.parent().po.all['video_option'] == 0:
1003
+ self.parent().po.motion.binary = self.parent().po.motion.segmentation
1004
+ else:
1005
+ if self.parent().po.all['video_option'] == 1:
1006
+ mask = self.parent().po.motion.luminosity_segmentation
1007
+ elif self.parent().po.all['video_option'] == 2:
1008
+ mask = self.parent().po.motion.gradient_segmentation
1009
+ elif self.parent().po.all['video_option'] == 3:
1010
+ mask = self.parent().po.motion.logical_and
1011
+ elif self.parent().po.all['video_option'] == 4:
1012
+ mask = self.parent().po.motion.logical_or
1013
+ self.parent().po.motion.binary = np.zeros(self.parent().po.motion.dims, dtype=np.uint8)
1014
+ self.parent().po.motion.binary[mask[0], mask[1], mask[2]] = 1
1015
+ else:
1016
+ self.parent().po.motion.binary = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
1017
+ if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
1018
+ self.parent().po.motion.binary = self.parent().po.motion.segmentation
1019
+
1020
+ if self.parent().po.vars['do_fading']:
1021
+ self.parent().po.motion.newly_explored_area = self.parent().po.newly_explored_area[:, self.parent().po.all['video_option']]
1022
+ self.parent().po.motion.max_distance = 9 * self.parent().po.vars['detection_range_factor']
1023
+ self.parent().po.motion.get_descriptors_from_binary(release_memory=False)
1024
+ self.parent().po.motion.detect_growth_transitions()
1025
+ self.parent().po.motion.networks_detection(False)
1026
+ self.parent().po.motion.study_cytoscillations(False)
1027
+ self.parent().po.motion.fractal_descriptions()
1028
+ self.parent().po.motion.get_descriptors_summary()
1029
+ self.parent().po.motion.change_results_of_one_arena()
1030
+ self.parent().po.motion = None
1031
+ # self.parent().po.motion = None
1032
+ self.message_from_thread.emit("")
1033
+
1034
+
1035
+ class WriteVideoThread(QtCore.QThread):
1036
+ # message_from_thread_in_thread = QtCore.Signal(bool)
1037
+ def __init__(self, parent=None):
1038
+ super(WriteVideoThread, self).__init__(parent)
1039
+ self.setParent(parent)
1040
+
1041
+ def run(self):
1042
+ # self.message_from_thread_in_thread.emit({True})
1043
+ arena = self.parent().po.all['arena']
1044
+ if not self.parent().po.vars['already_greyscale']:
1045
+ write_video(self.parent().po.visu, f'ind_{arena}.npy')
1046
+ else:
1047
+ write_video(self.parent().po.converted_video, f'ind_{arena}.npy')
1048
+
1049
+
1050
+ class RunAllThread(QtCore.QThread):
1051
+ message_from_thread = QtCore.Signal(str)
1052
+ image_from_thread = QtCore.Signal(dict)
1053
+
1054
+ def __init__(self, parent=None):
1055
+ super(RunAllThread, self).__init__(parent)
1056
+ self.setParent(parent)
1057
+
1058
+ def run(self):
1059
+ analysis_status = {"continue": True, "message": ""}
1060
+ message = self.set_current_folder(0)
1061
+
1062
+ if self.parent().po.first_exp_ready_to_run:
1063
+
1064
+ self.message_from_thread.emit(message + ": Write videos...")
1065
+ if not self.parent().po.vars['several_blob_per_arena'] and self.parent().po.sample_number != len(self.parent().po.bot):
1066
+ analysis_status["continue"] = False
1067
+ analysis_status["message"] = f"Wrong specimen number: redo the first image analysis."
1068
+ self.message_from_thread.emit(f"Wrong specimen number: restart Cellects and do another analysis.")
1069
+ else:
1070
+ analysis_status = self.run_video_writing(message)
1071
+ if analysis_status["continue"]:
1072
+ self.message_from_thread.emit(message + ": Analyse all videos...")
1073
+ analysis_status = self.run_motion_analysis(message)
1074
+ if analysis_status["continue"]:
1075
+ if self.parent().po.all['folder_number'] > 1:
1076
+ self.parent().po.all['folder_list'] = self.parent().po.all['folder_list'][1:]
1077
+ self.parent().po.all['sample_number_per_folder'] = self.parent().po.all['sample_number_per_folder'][1:]
1078
+ else:
1079
+ self.parent().po.look_for_data()
1080
+
1081
+ if analysis_status["continue"] and (not self.parent().po.first_exp_ready_to_run or self.parent().po.all['folder_number'] > 1):
1082
+ folder_number = np.max((len(self.parent().po.all['folder_list']), 1))
1083
+
1084
+ for exp_i in np.arange(folder_number):
1085
+ if len(self.parent().po.all['folder_list']) > 0:
1086
+ logging.info(self.parent().po.all['folder_list'][exp_i])
1087
+ self.parent().po.first_im = None
1088
+ self.parent().po.first_image = None
1089
+ self.parent().po.last_im = None
1090
+ self.parent().po.last_image = None
1091
+ self.parent().po.videos = None
1092
+ self.parent().po.top = None
1093
+
1094
+ message = self.set_current_folder(exp_i)
1095
+ self.message_from_thread.emit(f'{message}, pre-processing...')
1096
+ self.parent().po.load_data_to_run_cellects_quickly()
1097
+ if not self.parent().po.first_exp_ready_to_run:
1098
+ analysis_status = self.pre_processing()
1099
+ if analysis_status["continue"]:
1100
+ self.message_from_thread.emit(message + ": Write videos from images before analysis...")
1101
+ if not self.parent().po.vars['several_blob_per_arena'] and self.parent().po.sample_number != len(self.parent().po.bot):
1102
+ self.message_from_thread.emit(f"Wrong specimen number: first image analysis is mandatory.")
1103
+ analysis_status["continue"] = False
1104
+ analysis_status["message"] = f"Wrong specimen number: first image analysis is mandatory."
1105
+ else:
1106
+ analysis_status = self.run_video_writing(message)
1107
+ if analysis_status["continue"]:
1108
+ self.message_from_thread.emit(message + ": Starting analysis...")
1109
+ analysis_status = self.run_motion_analysis(message)
1110
+
1111
+ if not analysis_status["continue"]:
1112
+ # self.message_from_thread.emit(analysis_status["message"])
1113
+ break
1114
+ # if not continue_analysis:
1115
+ # self.message_from_thread.emit(f"Error: wrong folder or parameters")
1116
+ # break
1117
+ # if not enough_memory:
1118
+ # self.message_from_thread.emit(f"Error: not enough memory")
1119
+ # break
1120
+ print(self.parent().po.vars['convert_for_motion'])
1121
+ if analysis_status["continue"]:
1122
+ if self.parent().po.all['folder_number'] > 1:
1123
+ self.message_from_thread.emit(f"Exp {self.parent().po.all['folder_list'][0]} to {self.parent().po.all['folder_list'][-1]} analyzed.")
1124
+ else:
1125
+ curr_path = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
1126
+ self.message_from_thread.emit(f'Exp {curr_path}, analyzed.')
1127
+ else:
1128
+ logging.error(message + " " + analysis_status["message"])
1129
+ self.message_from_thread.emit(message + " " + analysis_status["message"])
1130
+
1131
+ def set_current_folder(self, exp_i):
1132
+ if self.parent().po.all['folder_number'] > 1:
1133
+ logging.info(f"Use {self.parent().po.all['folder_list'][exp_i]} folder")
1134
+
1135
+ message = f"{str(self.parent().po.all['global_pathway'])[:6]} ... {self.parent().po.all['folder_list'][exp_i]}"
1136
+ self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][exp_i],
1137
+ self.parent().po.all['folder_list'][exp_i])
1138
+ else:
1139
+ message = reduce_path_len(self.parent().po.all['global_pathway'], 6, 10)
1140
+ logging.info(f"Use {message} folder")
1141
+ self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
1142
+ return message
1143
+
1144
+ def pre_processing(self):
1145
+ analysis_status = {"continue": True, "message": ""}
1146
+ logging.info("Pre-processing has started")
1147
+ if len(self.parent().po.data_list) > 0:
1148
+ self.parent().po.get_first_image()
1149
+ self.parent().po.fast_image_segmentation(True)
1150
+ self.parent().po.cropping(is_first_image=True)
1151
+ self.parent().po.get_average_pixel_size()
1152
+ try:
1153
+ analysis_status = self.parent().po.delineate_each_arena()
1154
+ except ValueError:
1155
+ analysis_status[
1156
+ "message"] = f"Failed to detect the right cell(s) number: the first image analysis is mandatory."
1157
+ analysis_status["continue"] = False
1158
+
1159
+ if analysis_status["continue"]:
1160
+ self.parent().po.data_to_save['exif'] = True
1161
+ self.parent().po.save_data_to_run_cellects_quickly()
1162
+ self.parent().po.data_to_save['exif'] = False
1163
+ # self.parent().po.extract_exif()
1164
+ self.parent().po.get_background_to_subtract()
1165
+ if len(self.parent().po.vars['analyzed_individuals']) != len(self.parent().po.top):
1166
+ analysis_status["message"] = f"Failed to detect the right cell(s) number: the first image analysis is mandatory."
1167
+ analysis_status["continue"] = False
1168
+ elif self.parent().po.top is None and self.parent().imageanalysiswindow.manual_delineation_flag:
1169
+ analysis_status["message"] = f"Auto video delineation failed, use manual delineation tool"
1170
+ analysis_status["continue"] = False
1171
+ else:
1172
+ self.parent().po.get_origins_and_backgrounds_lists()
1173
+ self.parent().po.get_last_image()
1174
+ self.parent().po.fast_image_segmentation(is_first_image=False)
1175
+ self.parent().po.find_if_lighter_backgnp.round()
1176
+ return analysis_status
1177
+ else:
1178
+ analysis_status["message"] = f"Wrong folder or parameters"
1179
+ analysis_status["continue"] = False
1180
+ return analysis_status
1181
+
1182
+ def run_video_writing(self, message):
1183
+ analysis_status = {"continue": True, "message": ""}
1184
+ look_for_existing_videos = glob('ind_' + '*' + '.npy')
1185
+ there_already_are_videos = len(look_for_existing_videos) == len(self.parent().po.vars['analyzed_individuals'])
1186
+ logging.info(f"{len(look_for_existing_videos)} .npy video files found for {len(self.parent().po.vars['analyzed_individuals'])} arenas to analyze")
1187
+ do_write_videos = not there_already_are_videos or (
1188
+ there_already_are_videos and self.parent().po.all['overwrite_unaltered_videos'])
1189
+ if do_write_videos:
1190
+ logging.info(f"Starting video writing")
1191
+ # self.videos.write_videos_as_np_arrays(self.data_list, self.vars['convert_for_motion'], in_colors=self.vars['save_in_colors'])
1192
+ in_colors = not self.parent().po.vars['already_greyscale']
1193
+ self.parent().po.videos = OneVideoPerBlob(self.parent().po.first_image,
1194
+ self.parent().po.starting_blob_hsize_in_pixels,
1195
+ self.parent().po.all['raw_images'])
1196
+ self.parent().po.videos.left = self.parent().po.left
1197
+ self.parent().po.videos.right = self.parent().po.right
1198
+ self.parent().po.videos.top = self.parent().po.top
1199
+ self.parent().po.videos.bot = self.parent().po.bot
1200
+ self.parent().po.videos.first_image.shape_number = self.parent().po.sample_number
1201
+ bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining = self.parent().po.videos.prepare_video_writing(
1202
+ self.parent().po.data_list, self.parent().po.vars['min_ram_free'], in_colors)
1203
+ if analysis_status["continue"]:
1204
+ # Check that there is enough available RAM for one video par bunch and ROM for all videos
1205
+ if video_nb_per_bunch > 0 and rom_memory_required is None:
1206
+ pat_tracker1 = PercentAndTimeTracker(bunch_nb * self.parent().po.vars['img_number'])
1207
+ pat_tracker2 = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']))
1208
+ arena_percentage = 0
1209
+ for bunch in np.arange(bunch_nb):
1210
+ # Update the labels of arenas and the video_bunch to write
1211
+ if bunch == (bunch_nb - 1) and remaining > 0:
1212
+ arena = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining)
1213
+ else:
1214
+ arena = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch)
1215
+ if self.parent().po.videos.use_list_of_vid:
1216
+ video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in arena]
1217
+ else:
1218
+ video_bunch = np.zeros(np.append(sizes[0, :], len(arena)), dtype=np.uint8)
1219
+ prev_img = None
1220
+ images_done = bunch * self.parent().po.vars['img_number']
1221
+ for image_i, image_name in enumerate(self.parent().po.data_list):
1222
+ image_percentage, remaining_time = pat_tracker1.get_progress(image_i + images_done)
1223
+ self.message_from_thread.emit(message + f" Step 1/2: Video writing ({np.round((image_percentage + arena_percentage) / 2, 2)}%)")
1224
+ if not os.path.exists(image_name):
1225
+ raise FileNotFoundError(image_name)
1226
+ img = self.parent().po.videos.read_and_rotate(image_name, prev_img)
1227
+ prev_img = deepcopy(img)
1228
+ if self.parent().po.vars['already_greyscale'] and self.parent().po.reduce_image_dim:
1229
+ img = img[:, :, 0]
1230
+
1231
+ for arena_i, arena_name in enumerate(arena):
1232
+ try:
1233
+ sub_img = img[self.parent().po.top[arena_name]: (self.parent().po.bot[arena_name] + 1),
1234
+ self.parent().po.left[arena_name]: (self.parent().po.right[arena_name] + 1), ...]
1235
+ if self.parent().po.videos.use_list_of_vid:
1236
+ video_bunch[arena_i][image_i, ...] = sub_img
1237
+ else:
1238
+ if len(video_bunch.shape) == 5:
1239
+ video_bunch[image_i, :, :, :, arena_i] = sub_img
1240
+ else:
1241
+ video_bunch[image_i, :, :, arena_i] = sub_img
1242
+ except ValueError:
1243
+ analysis_status["message"] = f"One (or more) image has a different size (restart)"
1244
+ analysis_status["continue"] = False
1245
+ logging.info(f"In the {message} folder: one (or more) image has a different size (restart)")
1246
+ break
1247
+ if not analysis_status["continue"]:
1248
+ break
1249
+ if not analysis_status["continue"]:
1250
+ break
1251
+ if analysis_status["continue"]:
1252
+ for arena_i, arena_name in enumerate(arena):
1253
+ try:
1254
+ arena_percentage, eta = pat_tracker2.get_progress()
1255
+ self.message_from_thread.emit(message + f" Step 1/2: Video writing ({np.round((image_percentage + arena_percentage) / 2, 2)}%)")# , ETA {remaining_time}
1256
+ if self.parent().po.videos.use_list_of_vid:
1257
+ np.save(vid_names[arena_name], video_bunch[arena_i])
1258
+ else:
1259
+ if len(video_bunch.shape) == 5:
1260
+ np.save(vid_names[arena_name], video_bunch[:, :, :, :, arena_i])
1261
+ else:
1262
+ np.save(vid_names[arena_name], video_bunch[:, :, :, arena_i])
1263
+ except OSError:
1264
+ self.message_from_thread.emit(message + f"full disk memory, clear space and retry")
1265
+ logging.info(f"Bunch n°{bunch + 1} over {bunch_nb} saved.")
1266
+ logging.info("When they exist, do not overwrite unaltered video")
1267
+ self.parent().po.all['overwrite_unaltered_videos'] = False
1268
+ self.parent().po.save_variable_dict()
1269
+ self.parent().po.save_data_to_run_cellects_quickly()
1270
+ analysis_status["message"] = f"Video writing complete."
1271
+ if self.parent().po.videos is not None:
1272
+ del self.parent().po.videos
1273
+ return analysis_status
1274
+ else:
1275
+ analysis_status["continue"] = False
1276
+ if video_nb_per_bunch == 0:
1277
+ memory_diff = self.parent().po.update_available_core_nb()
1278
+ ram_message = f"{memory_diff}GB of additional RAM"
1279
+ if rom_memory_required is not None:
1280
+ rom_message = f"at least {rom_memory_required}GB of free ROM"
1281
+
1282
+ if video_nb_per_bunch == 0 and rom_memory_required is not None:
1283
+ analysis_status["message"] = f"Requires {ram_message} and {rom_message} to run"
1284
+ # self.message_from_thread.emit(f"Analyzing {message} requires {ram_message} and {rom_message} to run")
1285
+ elif video_nb_per_bunch == 0:
1286
+ analysis_status["message"] = f"Requires {ram_message} to run"
1287
+ # self.message_from_thread.emit(f"Analyzing {message} requires {ram_message} to run")
1288
+ elif rom_memory_required is not None:
1289
+ analysis_status["message"] = f"Requires {rom_message} to run"
1290
+ # self.message_from_thread.emit(f"Analyzing {message} requires {rom_message} to run")
1291
+ logging.info(f"Cellects is not writing videos: insufficient memory")
1292
+ return analysis_status
1293
+ else:
1294
+ return analysis_status
1295
+
1296
+
1297
+ else:
1298
+ logging.info(f"Cellects is not writing videos: unnecessary")
1299
+ analysis_status["message"] = f"Cellects is not writing videos: unnecessary"
1300
+ return analysis_status
1301
+
1302
+ def run_motion_analysis(self, message):
1303
+ analysis_status = {"continue": True, "message": ""}
1304
+ logging.info(f"Starting motion analysis with the detection method n°{self.parent().po.all['video_option']}")
1305
+ self.parent().po.instantiate_tables()
1306
+ try:
1307
+ memory_diff = self.parent().po.update_available_core_nb()
1308
+ if self.parent().po.cores > 0: # i.e. enough memory
1309
+ if not self.parent().po.all['do_multiprocessing'] or self.parent().po.cores == 1:
1310
+ self.message_from_thread.emit(f"{message} Step 2/2: Video analysis")
1311
+ logging.info("fStarting sequential analysis")
1312
+ tiii = default_timer()
1313
+ pat_tracker = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']))
1314
+ for i, arena in enumerate(self.parent().po.vars['analyzed_individuals']):
1315
+
1316
+ l = [i, arena, self.parent().po.vars, True, True, False, None]
1317
+ # l = [0, 1, self.parent().po.vars, True, False, False, None]
1318
+ analysis_i = MotionAnalysis(l)
1319
+ r = weakref.ref(analysis_i)
1320
+ if not self.parent().po.vars['several_blob_per_arena']:
1321
+ # Save basic statistics
1322
+ self.parent().po.update_one_row_per_arena(i, analysis_i.one_descriptor_per_arena)
1323
+
1324
+
1325
+ # Save descriptors in long_format
1326
+ self.parent().po.update_one_row_per_frame(i * self.parent().po.vars['img_number'], arena * self.parent().po.vars['img_number'], analysis_i.one_row_per_frame)
1327
+
1328
+ # Save cytosol_oscillations
1329
+ if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
1330
+ if self.parent().po.vars['oscilacyto_analysis']:
1331
+ oscil_i = pd.DataFrame(
1332
+ np.c_[np.repeat(arena,
1333
+ analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
1334
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1335
+ if self.parent().po.one_row_per_oscillating_cluster is None:
1336
+ self.parent().po.one_row_per_oscillating_cluster = oscil_i
1337
+ else:
1338
+ self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, oscil_i))
1339
+
1340
+ # Save efficiency visualization
1341
+ self.parent().po.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
1342
+ analysis_i.efficiency_test_2)
1343
+ # Emit message to the interface
1344
+ current_percentage, eta = pat_tracker.get_progress()
1345
+ self.image_from_thread.emit({"current_image": self.parent().po.last_image.bgr,
1346
+ "message": f"{message} Step 2/2: analyzed {arena} out of {len(self.parent().po.vars['analyzed_individuals'])} arenas ({current_percentage}%){eta}"})
1347
+ del analysis_i
1348
+ logging.info(f"Sequential analysis lasted {(default_timer() - tiii)/ 60} minutes")
1349
+ else:
1350
+ self.message_from_thread.emit(
1351
+ f"{message}, Step 2/2: Analyse all videos using {self.parent().po.cores} cores...")
1352
+
1353
+ logging.info("fStarting analysis in parallel")
1354
+
1355
+ # new
1356
+ tiii = default_timer()
1357
+ arena_number = len(self.parent().po.vars['analyzed_individuals'])
1358
+ self.advance = 0
1359
+ self.pat_tracker = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']),
1360
+ core_number=self.parent().po.cores)
1361
+
1362
+ fair_core_workload = arena_number // self.parent().po.cores
1363
+ cores_with_1_more = arena_number % self.parent().po.cores
1364
+ EXTENTS_OF_SUBRANGES = []
1365
+ bound = 0
1366
+ parallel_organization = [fair_core_workload + 1 for _ in range(cores_with_1_more)] + [fair_core_workload for _ in range(self.parent().po.cores - cores_with_1_more)]
1367
+ # Emit message to the interface
1368
+ self.image_from_thread.emit({"current_image": self.parent().po.last_image.bgr,
1369
+ "message": f"{message} Step 2/2: Analysis running on {self.parent().po.cores} CPU cores"})
1370
+ for i, extent_size in enumerate(parallel_organization):
1371
+ EXTENTS_OF_SUBRANGES.append((bound, bound := bound + extent_size))
1372
+
1373
+ try:
1374
+ PROCESSES = []
1375
+ subtotals = Manager().Queue()# Queue()
1376
+ for extent in EXTENTS_OF_SUBRANGES:
1377
+ # print(extent)
1378
+ p = Process(target=motion_analysis_process, args=(extent[0], extent[1], self.parent().po.vars, subtotals))
1379
+ p.start()
1380
+ PROCESSES.append(p)
1381
+
1382
+ for p in PROCESSES:
1383
+ p.join()
1384
+
1385
+ self.message_from_thread.emit(f"{message}, Step 2/2: Saving all results...")
1386
+ for i in range(subtotals.qsize()):
1387
+ grouped_results = subtotals.get()
1388
+ for j, results_i in enumerate(grouped_results):
1389
+ if not self.parent().po.vars['several_blob_per_arena']:
1390
+ # Save basic statistics
1391
+ self.parent().po.update_one_row_per_arena(results_i['i'], results_i['one_row_per_arena'])
1392
+ # Save descriptors in long_format
1393
+ self.parent().po.update_one_row_per_frame(results_i['i'] * self.parent().po.vars['img_number'],
1394
+ results_i['arena'] * self.parent().po.vars['img_number'],
1395
+ results_i['one_row_per_frame'])
1396
+ if not pd.isna(results_i['first_move']):
1397
+ # Save cytosol_oscillations
1398
+ if self.parent().po.vars['oscilacyto_analysis']:
1399
+ if self.parent().po.one_row_per_oscillating_cluster is None:
1400
+ self.parent().po.one_row_per_oscillating_cluster = results_i['one_row_per_oscillating_cluster']
1401
+ else:
1402
+ self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, results_i['one_row_per_oscillating_cluster']))
1403
+
1404
+ # Save efficiency visualization
1405
+ self.parent().po.add_analysis_visualization_to_first_and_last_images(results_i['i'], results_i['efficiency_test_1'],
1406
+ results_i['efficiency_test_2'])
1407
+ self.image_from_thread.emit(
1408
+ {"current_image": self.parent().po.last_image.bgr,
1409
+ "message": f"{message} Step 2/2: analyzed {len(self.parent().po.vars['analyzed_individuals'])} out of {len(self.parent().po.vars['analyzed_individuals'])} arenas ({100}%)"})
1410
+
1411
+ logging.info(f"Parallel analysis lasted {(default_timer() - tiii)/ 60} minutes")
1412
+ except MemoryError:
1413
+ analysis_status["continue"] = False
1414
+ analysis_status["message"] = f"Not enough memory, reduce the core number for parallel analysis"
1415
+ self.message_from_thread.emit(f"Analyzing {message} requires to reduce the core number for parallel analysis")
1416
+ return analysis_status
1417
+ self.parent().po.save_tables()
1418
+ return analysis_status
1419
+ else:
1420
+ analysis_status["continue"] = False
1421
+ analysis_status["message"] = f"Requires an additional {memory_diff}GB of RAM to run"
1422
+ self.message_from_thread.emit(f"Analyzing {message} requires an additional {memory_diff}GB of RAM to run")
1423
+ return analysis_status
1424
+ except MemoryError:
1425
+ analysis_status["continue"] = False
1426
+ analysis_status["message"] = f"Requires additional memory to run"
1427
+ self.message_from_thread.emit(f"Analyzing {message} requires additional memory to run")
1428
+ return analysis_status
1429
+
1430
+
1431
+ def motion_analysis_process(lower_bound: int, upper_bound: int, vars: dict, subtotals: Queue) -> None:
1432
+ grouped_results = []
1433
+ for i in range(lower_bound, upper_bound):
1434
+ analysis_i = MotionAnalysis([i, i + 1, vars, True, True, False, None])
1435
+ r = weakref.ref(analysis_i)
1436
+ results_i = dict()
1437
+ results_i['arena'] = analysis_i.one_descriptor_per_arena['arena']
1438
+ results_i['i'] = analysis_i.one_descriptor_per_arena['arena'] - 1
1439
+ arena = results_i['arena']
1440
+ i = arena - 1
1441
+ if not vars['several_blob_per_arena']:
1442
+ # Save basic statistics
1443
+ results_i['one_row_per_arena'] = analysis_i.one_descriptor_per_arena
1444
+ # Save descriptors in long_format
1445
+ results_i['one_row_per_frame'] = analysis_i.one_row_per_frame
1446
+ # Save cytosol_oscillations
1447
+
1448
+ results_i['first_move'] = analysis_i.one_descriptor_per_arena["first_move"]
1449
+ if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
1450
+ if vars['oscilacyto_analysis']:
1451
+ results_i['clusters_final_data'] = analysis_i.clusters_final_data
1452
+ results_i['one_row_per_oscillating_cluster'] = pd.DataFrame(
1453
+ np.c_[np.repeat(arena, analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
1454
+ columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
1455
+ if vars['fractal_analysis']:
1456
+ results_i['fractal_box_sizes'] = pd.DataFrame(analysis_i.fractal_boxes,
1457
+ columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
1458
+
1459
+ # Save efficiency visualization
1460
+ results_i['efficiency_test_1'] = analysis_i.efficiency_test_1
1461
+ results_i['efficiency_test_2'] = analysis_i.efficiency_test_2
1462
+ grouped_results.append(results_i)
1463
+
1464
+ subtotals.put(grouped_results)