cellects 0.1.2__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__main__.py +65 -25
- cellects/config/all_vars_dict.py +18 -17
- cellects/core/cellects_threads.py +1034 -396
- cellects/core/motion_analysis.py +1664 -2010
- cellects/core/one_image_analysis.py +1082 -1061
- cellects/core/program_organizer.py +1687 -1316
- cellects/core/script_based_run.py +80 -76
- cellects/gui/advanced_parameters.py +390 -330
- cellects/gui/cellects.py +102 -91
- cellects/gui/custom_widgets.py +16 -33
- cellects/gui/first_window.py +226 -104
- cellects/gui/if_several_folders_window.py +117 -68
- cellects/gui/image_analysis_window.py +866 -454
- cellects/gui/required_output.py +104 -57
- cellects/gui/ui_strings.py +840 -0
- cellects/gui/video_analysis_window.py +333 -155
- cellects/image_analysis/cell_leaving_detection.py +64 -4
- cellects/image_analysis/image_segmentation.py +451 -22
- cellects/image_analysis/morphological_operations.py +2166 -1635
- cellects/image_analysis/network_functions.py +616 -253
- cellects/image_analysis/one_image_analysis_threads.py +94 -153
- cellects/image_analysis/oscillations_functions.py +131 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
- cellects/image_analysis/shape_descriptors.py +517 -466
- cellects/utils/formulas.py +169 -6
- cellects/utils/load_display_save.py +362 -109
- cellects/utils/utilitarian.py +86 -9
- cellects-0.2.6.dist-info/LICENSE +675 -0
- cellects-0.2.6.dist-info/METADATA +829 -0
- cellects-0.2.6.dist-info/RECORD +44 -0
- cellects/core/one_video_per_blob.py +0 -540
- cellects/image_analysis/cluster_flux_study.py +0 -102
- cellects-0.1.2.dist-info/LICENSE.odt +0 -0
- cellects-0.1.2.dist-info/METADATA +0 -132
- cellects-0.1.2.dist-info/RECORD +0 -44
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
- {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
|
@@ -1,10 +1,25 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
|
-
Cellects
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
Cellects GUI module implementing threaded image/video analysis workflows.
|
|
4
|
+
|
|
5
|
+
This module provides a Qt-based interface for analyzing biological motion and growth through color space combinations,
|
|
6
|
+
segmentation strategies, arena delineation, and video processing. Uses QThreaded workers to maintain UI responsiveness
|
|
7
|
+
during computationally intensive tasks like segmentation, motion tracking, network detection, oscillation and fractal
|
|
8
|
+
analysis.
|
|
9
|
+
|
|
10
|
+
Main Components
|
|
11
|
+
LoadDataToRunCellectsQuicklyThread : Loads necessary data asynchronously for quick Cellects execution.
|
|
12
|
+
FirstImageAnalysisThread : Analyzes first image with automatic color space selection and segmentation.
|
|
13
|
+
LastImageAnalysisThread : Processes last frame analysis for optimized color space combinations.
|
|
14
|
+
CropScaleSubtractDelineateThread : Handles cropping, scaling, and arena boundary detection.
|
|
15
|
+
OneArenaThread : Performs complete motion analysis on a single arena with post-processing.
|
|
16
|
+
RunAllThread : Executes full batch analysis across multiple arenas/experiments.
|
|
17
|
+
|
|
18
|
+
Notes
|
|
19
|
+
Uses QThread for background operations to maintain UI responsiveness. Key workflows include automated color space
|
|
20
|
+
optimization, adaptive segmentation algorithms, multithreaded video processing, and arena delineation via geometric
|
|
21
|
+
analysis or manual drawing. Implements special post-processing for Physarum polycephalum network detection and oscillatory
|
|
22
|
+
activity tracking.
|
|
8
23
|
"""
|
|
9
24
|
|
|
10
25
|
import logging
|
|
@@ -18,26 +33,55 @@ from copy import deepcopy
|
|
|
18
33
|
import cv2
|
|
19
34
|
from numba.typed import Dict as TDict
|
|
20
35
|
import numpy as np
|
|
36
|
+
from numpy.typing import NDArray
|
|
21
37
|
import pandas as pd
|
|
22
38
|
from PySide6 import QtCore
|
|
23
|
-
from cellects.image_analysis.morphological_operations import cross_33,
|
|
24
|
-
from cellects.image_analysis.image_segmentation import
|
|
25
|
-
from cellects.utils.
|
|
26
|
-
from cellects.utils.
|
|
27
|
-
|
|
28
|
-
from cellects.
|
|
29
|
-
from cellects.utils.load_display_save import write_video
|
|
39
|
+
from cellects.image_analysis.morphological_operations import cross_33, create_ellipse, create_mask, draw_img_with_mask, get_contours
|
|
40
|
+
from cellects.image_analysis.image_segmentation import convert_subtract_and_filter_video
|
|
41
|
+
from cellects.utils.formulas import scale_coordinates, bracket_to_uint8_image_contrast, get_contour_width_from_im_shape
|
|
42
|
+
from cellects.utils.load_display_save import (read_one_arena, read_and_rotate, read_rotate_crop_and_reduce_image,
|
|
43
|
+
create_empty_videos, write_video)
|
|
44
|
+
from cellects.utils.utilitarian import PercentAndTimeTracker, reduce_path_len, split_dict
|
|
30
45
|
from cellects.core.motion_analysis import MotionAnalysis
|
|
31
46
|
|
|
32
47
|
|
|
33
48
|
class LoadDataToRunCellectsQuicklyThread(QtCore.QThread):
|
|
49
|
+
"""
|
|
50
|
+
Load data to run Cellects quickly in a separate thread.
|
|
51
|
+
|
|
52
|
+
This class is responsible for loading necessary data asynchronously
|
|
53
|
+
in order to speed up the process of running Cellects.
|
|
54
|
+
|
|
55
|
+
Signals
|
|
56
|
+
-------
|
|
57
|
+
message_when_thread_finished : Signal(str)
|
|
58
|
+
Emitted when the thread finishes execution, indicating whether data loading was successful.
|
|
59
|
+
|
|
60
|
+
Notes
|
|
61
|
+
-----
|
|
62
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
63
|
+
"""
|
|
34
64
|
message_from_thread = QtCore.Signal(str)
|
|
35
65
|
|
|
36
66
|
def __init__(self, parent=None):
|
|
67
|
+
"""
|
|
68
|
+
Initialize the worker thread for quickly loading data to run Cellects.
|
|
69
|
+
|
|
70
|
+
Parameters
|
|
71
|
+
----------
|
|
72
|
+
parent : QObject, optional
|
|
73
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
74
|
+
"""
|
|
37
75
|
super(LoadDataToRunCellectsQuicklyThread, self).__init__(parent)
|
|
38
76
|
self.setParent(parent)
|
|
39
77
|
|
|
40
78
|
def run(self):
|
|
79
|
+
"""
|
|
80
|
+
Execute the data loading and preparation process for running cellects without setting all parameters in the GUI.
|
|
81
|
+
|
|
82
|
+
This method triggers the parent object's methods to look for data and load it,
|
|
83
|
+
then checks if the first experiment is ready. If so, it emits a message.
|
|
84
|
+
"""
|
|
41
85
|
self.parent().po.look_for_data()
|
|
42
86
|
self.parent().po.load_data_to_run_cellects_quickly()
|
|
43
87
|
if self.parent().po.first_exp_ready_to_run:
|
|
@@ -47,21 +91,63 @@ class LoadDataToRunCellectsQuicklyThread(QtCore.QThread):
|
|
|
47
91
|
|
|
48
92
|
|
|
49
93
|
class LookForDataThreadInFirstW(QtCore.QThread):
|
|
94
|
+
"""
|
|
95
|
+
Find and process data in a separate thread.
|
|
96
|
+
|
|
97
|
+
Notes
|
|
98
|
+
-----
|
|
99
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
100
|
+
"""
|
|
50
101
|
def __init__(self, parent=None):
|
|
102
|
+
|
|
103
|
+
"""
|
|
104
|
+
Initialize the worker thread for finding data to run Cellects.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
parent : QObject, optional
|
|
109
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
110
|
+
"""
|
|
51
111
|
super(LookForDataThreadInFirstW, self).__init__(parent)
|
|
52
112
|
self.setParent(parent)
|
|
53
113
|
|
|
54
114
|
def run(self):
|
|
115
|
+
"""
|
|
116
|
+
Run the data lookup process.
|
|
117
|
+
"""
|
|
55
118
|
self.parent().po.look_for_data()
|
|
56
119
|
|
|
57
120
|
|
|
58
121
|
class LoadFirstFolderIfSeveralThread(QtCore.QThread):
|
|
122
|
+
"""
|
|
123
|
+
Thread for loading data from the first folder if there are several folders.
|
|
124
|
+
|
|
125
|
+
Signals
|
|
126
|
+
-------
|
|
127
|
+
message_when_thread_finished : Signal(bool)
|
|
128
|
+
Emitted when the thread finishes execution, indicating whether data loading was successful.
|
|
129
|
+
|
|
130
|
+
Notes
|
|
131
|
+
-----
|
|
132
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
133
|
+
"""
|
|
59
134
|
message_when_thread_finished = QtCore.Signal(bool)
|
|
60
135
|
def __init__(self, parent=None):
|
|
136
|
+
"""
|
|
137
|
+
Initialize the worker thread for loading data and parameters to run Cellects when analyzing several folders.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
----------
|
|
141
|
+
parent : QObject, optional
|
|
142
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
143
|
+
"""
|
|
61
144
|
super(LoadFirstFolderIfSeveralThread, self).__init__(parent)
|
|
62
145
|
self.setParent(parent)
|
|
63
146
|
|
|
64
147
|
def run(self):
|
|
148
|
+
"""
|
|
149
|
+
Run the data lookup process.
|
|
150
|
+
"""
|
|
65
151
|
self.parent().po.load_data_to_run_cellects_quickly()
|
|
66
152
|
if not self.parent().po.first_exp_ready_to_run:
|
|
67
153
|
self.parent().po.get_first_image()
|
|
@@ -69,47 +155,122 @@ class LoadFirstFolderIfSeveralThread(QtCore.QThread):
|
|
|
69
155
|
|
|
70
156
|
|
|
71
157
|
class GetFirstImThread(QtCore.QThread):
|
|
72
|
-
|
|
158
|
+
"""
|
|
159
|
+
Thread for getting the first image.
|
|
160
|
+
|
|
161
|
+
Signals
|
|
162
|
+
-------
|
|
163
|
+
message_when_thread_finished : Signal(bool)
|
|
164
|
+
Emitted when the thread finishes execution, indicating whether data loading was successful.
|
|
165
|
+
|
|
166
|
+
Notes
|
|
167
|
+
-----
|
|
168
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
169
|
+
"""
|
|
170
|
+
message_when_thread_finished = QtCore.Signal(np.ndarray)
|
|
73
171
|
def __init__(self, parent=None):
|
|
74
172
|
"""
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
If the selected analysis contains videos instead of images, it opens the first video
|
|
82
|
-
and read the first_detection_frame th image.
|
|
83
|
-
:param parent: An object containing all necessary variables.
|
|
173
|
+
Initialize the worker thread for loading the first image of one folder.
|
|
174
|
+
|
|
175
|
+
Parameters
|
|
176
|
+
----------
|
|
177
|
+
parent : QObject, optional
|
|
178
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
84
179
|
"""
|
|
85
180
|
super(GetFirstImThread, self).__init__(parent)
|
|
86
181
|
self.setParent(parent)
|
|
87
182
|
|
|
88
183
|
def run(self):
|
|
184
|
+
"""
|
|
185
|
+
Run the first image reading task in the parent process and emit a signal when it finishes.
|
|
186
|
+
"""
|
|
89
187
|
self.parent().po.get_first_image()
|
|
90
|
-
self.message_when_thread_finished.emit(
|
|
188
|
+
self.message_when_thread_finished.emit(self.parent().po.first_im)
|
|
91
189
|
|
|
92
190
|
|
|
93
191
|
class GetLastImThread(QtCore.QThread):
|
|
192
|
+
"""
|
|
193
|
+
Thread for getting the last image.
|
|
194
|
+
|
|
195
|
+
Notes
|
|
196
|
+
-----
|
|
197
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
198
|
+
"""
|
|
94
199
|
def __init__(self, parent=None):
|
|
200
|
+
"""
|
|
201
|
+
Initialize the worker thread for loading the last image of one folder.
|
|
202
|
+
|
|
203
|
+
Parameters
|
|
204
|
+
----------
|
|
205
|
+
parent : QObject, optional
|
|
206
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
207
|
+
"""
|
|
95
208
|
super(GetLastImThread, self).__init__(parent)
|
|
96
209
|
self.setParent(parent)
|
|
97
210
|
|
|
98
211
|
def run(self):
|
|
212
|
+
"""
|
|
213
|
+
Run the last image reading task in the parent process.
|
|
214
|
+
"""
|
|
99
215
|
self.parent().po.get_last_image()
|
|
100
216
|
|
|
101
217
|
|
|
102
218
|
class UpdateImageThread(QtCore.QThread):
|
|
219
|
+
"""
|
|
220
|
+
Thread for updating GUI image.
|
|
221
|
+
|
|
222
|
+
Signals
|
|
223
|
+
-------
|
|
224
|
+
message_when_thread_finished : Signal(bool)
|
|
225
|
+
Emitted when the thread finishes execution, indicating whether image displaying was successful.
|
|
226
|
+
|
|
227
|
+
Notes
|
|
228
|
+
-----
|
|
229
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
230
|
+
"""
|
|
103
231
|
message_when_thread_finished = QtCore.Signal(bool)
|
|
104
232
|
|
|
105
233
|
def __init__(self, parent=None):
|
|
234
|
+
"""
|
|
235
|
+
Initialize the worker thread for updating the image displayed in GUI
|
|
236
|
+
|
|
237
|
+
Parameters
|
|
238
|
+
----------
|
|
239
|
+
parent : QObject, optional
|
|
240
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
241
|
+
"""
|
|
106
242
|
super(UpdateImageThread, self).__init__(parent)
|
|
107
243
|
self.setParent(parent)
|
|
108
244
|
|
|
109
245
|
def run(self):
|
|
246
|
+
"""
|
|
247
|
+
Execute the image display process, including user input handling and mask application.
|
|
248
|
+
|
|
249
|
+
This method performs several steps to analyze an image based on user input
|
|
250
|
+
and saved mask coordinates. It updates the drawn image with segmentation masks,
|
|
251
|
+
back masks, bio masks, and video contours.
|
|
252
|
+
|
|
253
|
+
Other Parameters
|
|
254
|
+
----------------
|
|
255
|
+
user_input : bool, optional
|
|
256
|
+
Flag indicating whether user input is available.
|
|
257
|
+
idx : list or numpy.ndarray, optional
|
|
258
|
+
Coordinates of the user- defined region of interest.
|
|
259
|
+
temp_mask_coord : list, optional
|
|
260
|
+
Temporary mask coordinates.
|
|
261
|
+
saved_coord : list, optional
|
|
262
|
+
Saved mask coordinates.
|
|
263
|
+
|
|
264
|
+
Notes
|
|
265
|
+
-----
|
|
266
|
+
- This function updates several attributes of `self.parent().imageanalysiswindow`.
|
|
267
|
+
- Performance considerations include handling large images efficiently.
|
|
268
|
+
- Important behavioral caveats: Ensure coordinates are within image bounds.
|
|
269
|
+
"""
|
|
110
270
|
# I/ If this thread runs from user input, get the right coordinates
|
|
111
271
|
# and convert them to fit the displayed image size
|
|
112
272
|
user_input = len(self.parent().imageanalysiswindow.saved_coord) > 0 or len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0
|
|
273
|
+
dims = self.parent().imageanalysiswindow.drawn_image.shape
|
|
113
274
|
if user_input:
|
|
114
275
|
if len(self.parent().imageanalysiswindow.temporary_mask_coord) > 0:
|
|
115
276
|
idx = self.parent().imageanalysiswindow.temporary_mask_coord
|
|
@@ -121,19 +282,8 @@ class UpdateImageThread(QtCore.QThread):
|
|
|
121
282
|
# Convert coordinates:
|
|
122
283
|
self.parent().imageanalysiswindow.display_image.update_image_scaling_factors()
|
|
123
284
|
sf = self.parent().imageanalysiswindow.display_image.scaling_factors
|
|
124
|
-
idx
|
|
125
|
-
|
|
126
|
-
max_y = np.max(idx[:, 0])
|
|
127
|
-
min_x = np.min(idx[:, 1])
|
|
128
|
-
max_x = np.max(idx[:, 1])
|
|
129
|
-
if max_y > self.parent().imageanalysiswindow.drawn_image.shape[0]:
|
|
130
|
-
max_y = self.parent().imageanalysiswindow.drawn_image.shape[0] - 1
|
|
131
|
-
if max_x > self.parent().imageanalysiswindow.drawn_image.shape[1]:
|
|
132
|
-
max_x = self.parent().imageanalysiswindow.drawn_image.shape[1] - 1
|
|
133
|
-
if min_y < 0:
|
|
134
|
-
min_y = 0
|
|
135
|
-
if min_x < 0:
|
|
136
|
-
min_x = 0
|
|
285
|
+
idx, min_y, max_y, min_x, max_x = scale_coordinates(coord=idx, scale=sf, dims=dims)
|
|
286
|
+
minmax = min_y, max_y, min_x, max_x
|
|
137
287
|
|
|
138
288
|
if len(self.parent().imageanalysiswindow.temporary_mask_coord) == 0:
|
|
139
289
|
# not_load
|
|
@@ -144,54 +294,38 @@ class UpdateImageThread(QtCore.QThread):
|
|
|
144
294
|
# 3) The automatically detected video contours
|
|
145
295
|
# (re-)Initialize drawn image
|
|
146
296
|
self.parent().imageanalysiswindow.drawn_image = deepcopy(self.parent().po.current_image)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
else:
|
|
150
|
-
contour_width = 6
|
|
151
|
-
# 1) The segmentation mask
|
|
152
|
-
logging.info('Add the segmentation mask to the image')
|
|
297
|
+
contour_width = get_contour_width_from_im_shape(dims)
|
|
298
|
+
# 1) Add the segmentation mask to the image
|
|
153
299
|
if self.parent().imageanalysiswindow.is_first_image_flag:
|
|
154
300
|
im_combinations = self.parent().po.first_image.im_combinations
|
|
155
301
|
im_mean = self.parent().po.first_image.image.mean()
|
|
156
302
|
else:
|
|
157
303
|
im_combinations = self.parent().po.last_image.im_combinations
|
|
158
|
-
im_mean = self.parent().po.last_image.
|
|
304
|
+
im_mean = self.parent().po.last_image.image.mean()
|
|
159
305
|
# If there are image combinations, get the current corresponding binary image
|
|
160
306
|
if im_combinations is not None and len(im_combinations) != 0:
|
|
161
307
|
binary_idx = im_combinations[self.parent().po.current_combination_id]["binary_image"]
|
|
162
308
|
# If it concerns the last image, only keep the contour coordinates
|
|
163
|
-
|
|
164
|
-
cv2.eroded_binary = cv2.erode(binary_idx, cross_33)
|
|
165
|
-
binary_idx = binary_idx - cv2.eroded_binary
|
|
166
|
-
binary_idx = cv2.dilate(binary_idx, kernel=cross_33, iterations=contour_width)
|
|
309
|
+
binary_idx = cv2.dilate(get_contours(binary_idx), kernel=cross_33, iterations=contour_width)
|
|
167
310
|
binary_idx = np.nonzero(binary_idx)
|
|
168
311
|
# Color these coordinates in magenta on bright images, and in pink on dark images
|
|
169
312
|
if im_mean > 126:
|
|
170
|
-
#
|
|
313
|
+
# Color the segmentation mask in magenta
|
|
171
314
|
self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((20, 0, 150), dtype=np.uint8)
|
|
172
315
|
else:
|
|
173
|
-
#
|
|
316
|
+
# Color the segmentation mask in pink
|
|
174
317
|
self.parent().imageanalysiswindow.drawn_image[binary_idx[0], binary_idx[1], :] = np.array((94, 0, 213), dtype=np.uint8)
|
|
175
318
|
if user_input:# save
|
|
176
|
-
mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
|
|
177
319
|
if self.parent().imageanalysiswindow.back1_bio2 == 0:
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
if self.parent().imageanalysiswindow.back1_bio2 == 2:
|
|
188
|
-
if self.parent().po.all['starting_blob_shape'] == 'circle':
|
|
189
|
-
ellipse = Ellipse((max_y - min_y, max_x - min_x)).create().astype(np.uint8)
|
|
190
|
-
mask[min_y:max_y, min_x:max_x, ...] = ellipse
|
|
191
|
-
else:
|
|
192
|
-
mask[min_y:max_y, min_x:max_x] = 1
|
|
193
|
-
else:
|
|
194
|
-
mask[min_y:max_y, min_x:max_x] = 1
|
|
320
|
+
mask_shape = self.parent().po.vars['arena_shape']
|
|
321
|
+
elif self.parent().imageanalysiswindow.back1_bio2 == 1:
|
|
322
|
+
mask_shape = "rectangle"
|
|
323
|
+
elif self.parent().imageanalysiswindow.back1_bio2 == 2:
|
|
324
|
+
mask_shape = self.parent().po.all['starting_blob_shape']
|
|
325
|
+
if mask_shape is None:
|
|
326
|
+
mask_shape = 'circle'
|
|
327
|
+
# Save the user drawn mask
|
|
328
|
+
mask = create_mask(dims, minmax, mask_shape)
|
|
195
329
|
mask = np.nonzero(mask)
|
|
196
330
|
|
|
197
331
|
if self.parent().imageanalysiswindow.back1_bio2 == 1:
|
|
@@ -217,12 +351,11 @@ class UpdateImageThread(QtCore.QThread):
|
|
|
217
351
|
|
|
218
352
|
self.parent().imageanalysiswindow.drawn_image[bio_coord[0], bio_coord[1], :] = np.array((17, 160, 212), dtype=np.uint8)
|
|
219
353
|
|
|
220
|
-
image = self.parent().imageanalysiswindow.drawn_image
|
|
354
|
+
image = self.parent().imageanalysiswindow.drawn_image.copy()
|
|
221
355
|
# 3) The automatically detected video contours
|
|
222
356
|
if self.parent().imageanalysiswindow.delineation_done: # add a mask of the video contour
|
|
223
|
-
#
|
|
357
|
+
# Draw the delineation mask of each arena
|
|
224
358
|
for contour_i in range(len(self.parent().po.top)):
|
|
225
|
-
mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
|
|
226
359
|
min_cy = self.parent().po.top[contour_i]
|
|
227
360
|
max_cy = self.parent().po.bot[contour_i]
|
|
228
361
|
min_cx = self.parent().po.left[contour_i]
|
|
@@ -239,70 +372,88 @@ class UpdateImageThread(QtCore.QThread):
|
|
|
239
372
|
2) # font stroke
|
|
240
373
|
if (max_cy - min_cy) < 0 or (max_cx - min_cx) < 0:
|
|
241
374
|
self.parent().imageanalysiswindow.message.setText("Error: the shape number or the detection is wrong")
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
ellipse = cv2.morphologyEx(ellipse, cv2.MORPH_GRADIENT, cross_33)
|
|
245
|
-
mask[min_cy:max_cy, min_cx:max_cx, ...] = ellipse
|
|
246
|
-
else:
|
|
247
|
-
mask[(min_cy, max_cy), min_cx:max_cx] = 1
|
|
248
|
-
mask[min_cy:max_cy, (min_cx, max_cx)] = 1
|
|
249
|
-
mask = cv2.dilate(mask, kernel=cross_33, iterations=contour_width)
|
|
250
|
-
|
|
251
|
-
mask = np.nonzero(mask)
|
|
252
|
-
image[mask[0], mask[1], :] = np.array((138, 95, 18), dtype=np.uint8)# self.parent().po.vars['contour_color']
|
|
253
|
-
|
|
375
|
+
image = draw_img_with_mask(image, dims, (min_cy, max_cy - 1, min_cx, max_cx - 1),
|
|
376
|
+
self.parent().po.vars['arena_shape'], (138, 95, 18), True, contour_width)
|
|
254
377
|
else: #load
|
|
255
378
|
if user_input:
|
|
256
379
|
# III/ If this thread runs from user input: update the drawn_image according to the current user input
|
|
257
380
|
# Just add the mask to drawn_image as quick as possible
|
|
258
381
|
# Add user defined masks
|
|
259
382
|
# Take the drawn image and add the temporary mask to it
|
|
260
|
-
image =
|
|
261
|
-
if self.parent().imageanalysiswindow.back1_bio2 ==
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
|
|
270
|
-
mask[min_y:max_y, min_x:max_x] = 1
|
|
271
|
-
mask = np.nonzero(mask)
|
|
272
|
-
image[mask[0], mask[1], :] = np.array((0, 0, 0), dtype=np.uint8)
|
|
383
|
+
image = self.parent().imageanalysiswindow.drawn_image.copy()
|
|
384
|
+
if self.parent().imageanalysiswindow.back1_bio2 == 2:
|
|
385
|
+
color = (17, 160, 212)
|
|
386
|
+
mask_shape = self.parent().po.all['starting_blob_shape']
|
|
387
|
+
if mask_shape is None:
|
|
388
|
+
mask_shape = 'circle'
|
|
389
|
+
elif self.parent().imageanalysiswindow.back1_bio2 == 1:
|
|
390
|
+
color = (224, 160, 81)
|
|
391
|
+
mask_shape = "rectangle"
|
|
273
392
|
else:
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
ellipse = Ellipse((max_y - min_y, max_x - min_x)).create()
|
|
278
|
-
ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
|
|
279
|
-
image[min_y:max_y, min_x:max_x, ...] *= (1 - ellipse)
|
|
280
|
-
ellipse[:, :, :] *= np.array((17, 160, 212), dtype=np.uint8)
|
|
281
|
-
image[min_y:max_y, min_x:max_x, ...] += ellipse
|
|
282
|
-
else:
|
|
283
|
-
mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
|
|
284
|
-
mask[min_y:max_y, min_x:max_x] = 1
|
|
285
|
-
mask = np.nonzero(mask)
|
|
286
|
-
image[mask[0], mask[1], :] = np.array((17, 160, 212), dtype=np.uint8)
|
|
287
|
-
else:
|
|
288
|
-
mask = np.zeros(self.parent().imageanalysiswindow.drawn_image.shape[:2], dtype=np.uint8)
|
|
289
|
-
mask[min_y:max_y, min_x:max_x] = 1
|
|
290
|
-
mask = np.nonzero(mask)
|
|
291
|
-
image[mask[0], mask[1], :] = np.array((224, 160, 81), dtype=np.uint8)
|
|
292
|
-
|
|
393
|
+
color = (0, 0, 0)
|
|
394
|
+
mask_shape = self.parent().po.all['arena_shape']
|
|
395
|
+
image = draw_img_with_mask(image, dims, minmax, mask_shape, color)
|
|
293
396
|
self.parent().imageanalysiswindow.display_image.update_image(image)
|
|
294
397
|
self.message_when_thread_finished.emit(True)
|
|
295
398
|
|
|
296
399
|
|
|
297
400
|
class FirstImageAnalysisThread(QtCore.QThread):
|
|
401
|
+
"""
|
|
402
|
+
Thread for analyzing the first image of a given folder.
|
|
403
|
+
|
|
404
|
+
Signals
|
|
405
|
+
-------
|
|
406
|
+
message_from_thread : Signal(str)
|
|
407
|
+
Signal emitted when progress messages are available.
|
|
408
|
+
message_when_thread_finished : Signal(bool)
|
|
409
|
+
Signal emitted upon completion of the thread's task.
|
|
410
|
+
|
|
411
|
+
Notes
|
|
412
|
+
-----
|
|
413
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
414
|
+
"""
|
|
298
415
|
message_from_thread = QtCore.Signal(str)
|
|
299
416
|
message_when_thread_finished = QtCore.Signal(bool)
|
|
300
417
|
|
|
301
418
|
def __init__(self, parent=None):
|
|
419
|
+
"""
|
|
420
|
+
Initialize the worker thread for analyzing the first image of a given folder
|
|
421
|
+
|
|
422
|
+
Parameters
|
|
423
|
+
----------
|
|
424
|
+
parent : QObject, optional
|
|
425
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
426
|
+
"""
|
|
302
427
|
super(FirstImageAnalysisThread, self).__init__(parent)
|
|
303
428
|
self.setParent(parent)
|
|
304
429
|
|
|
305
430
|
def run(self):
|
|
431
|
+
"""
|
|
432
|
+
Perform image analysis and segmentation based on the current state of the application.
|
|
433
|
+
|
|
434
|
+
This function handles both bio-mask and background mask processing, emits status messages,
|
|
435
|
+
computes average pixel size if necessary, and performs image segmentation or generates
|
|
436
|
+
analysis options.
|
|
437
|
+
|
|
438
|
+
Parameters
|
|
439
|
+
----------
|
|
440
|
+
self : object
|
|
441
|
+
The instance of the class containing this method. Should have attributes:
|
|
442
|
+
- parent: Reference to the parent object
|
|
443
|
+
- message_from_thread.emit: Method to emit messages from the thread
|
|
444
|
+
- message_when_thread_finished.emit: Method to signal thread completion
|
|
445
|
+
|
|
446
|
+
Returns
|
|
447
|
+
-------
|
|
448
|
+
None
|
|
449
|
+
This method does not return a value but emits messages and modifies the state of
|
|
450
|
+
self.parent objects.
|
|
451
|
+
Notes
|
|
452
|
+
-----
|
|
453
|
+
This method performs several complex operations involving image segmentation and
|
|
454
|
+
analysis generation. It handles both bio-masks and background masks, computes average
|
|
455
|
+
pixel sizes, and updates various state attributes on the parent object.
|
|
456
|
+
"""
|
|
306
457
|
tic = default_timer()
|
|
307
458
|
biomask = None
|
|
308
459
|
backmask = None
|
|
@@ -315,15 +466,14 @@ class FirstImageAnalysisThread(QtCore.QThread):
|
|
|
315
466
|
if self.parent().imageanalysiswindow.back_masks_number != 0:
|
|
316
467
|
backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
|
|
317
468
|
if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2 or shape_nb == self.parent().po.sample_number:
|
|
318
|
-
self.message_from_thread.emit("Image segmentation, wait
|
|
469
|
+
self.message_from_thread.emit("Image segmentation, wait")
|
|
319
470
|
if not self.parent().imageanalysiswindow.asking_first_im_parameters_flag and self.parent().po.all['scale_with_image_or_cells'] == 0 and self.parent().po.all["set_spot_size"]:
|
|
320
471
|
self.parent().po.get_average_pixel_size()
|
|
321
|
-
spot_size = self.parent().po.starting_blob_hsize_in_pixels
|
|
322
472
|
else:
|
|
323
|
-
|
|
473
|
+
self.parent().po.starting_blob_hsize_in_pixels = None
|
|
324
474
|
self.parent().po.all["bio_mask"] = biomask
|
|
325
475
|
self.parent().po.all["back_mask"] = backmask
|
|
326
|
-
self.parent().po.
|
|
476
|
+
self.parent().po.fast_first_image_segmentation()
|
|
327
477
|
if shape_nb == self.parent().po.sample_number and self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] != self.parent().po.sample_number:
|
|
328
478
|
self.parent().po.first_image.im_combinations[self.parent().po.current_combination_id]['shape_number'] = shape_nb
|
|
329
479
|
self.parent().po.first_image.shape_number = shape_nb
|
|
@@ -333,13 +483,13 @@ class FirstImageAnalysisThread(QtCore.QThread):
|
|
|
333
483
|
self.message_from_thread.emit("Generating analysis options, wait...")
|
|
334
484
|
if self.parent().po.vars["color_number"] > 2:
|
|
335
485
|
kmeans_clust_nb = self.parent().po.vars["color_number"]
|
|
336
|
-
if self.parent().po.
|
|
486
|
+
if self.parent().po.basic:
|
|
337
487
|
self.message_from_thread.emit("Generating analysis options, wait less than 30 minutes")
|
|
338
488
|
else:
|
|
339
489
|
self.message_from_thread.emit("Generating analysis options, a few minutes")
|
|
340
490
|
else:
|
|
341
491
|
kmeans_clust_nb = None
|
|
342
|
-
if self.parent().po.
|
|
492
|
+
if self.parent().po.basic:
|
|
343
493
|
self.message_from_thread.emit("Generating analysis options, wait a few minutes")
|
|
344
494
|
else:
|
|
345
495
|
self.message_from_thread.emit("Generating analysis options, around 1 minute")
|
|
@@ -351,7 +501,7 @@ class FirstImageAnalysisThread(QtCore.QThread):
|
|
|
351
501
|
biomask=self.parent().po.all["bio_mask"],
|
|
352
502
|
backmask=self.parent().po.all["back_mask"],
|
|
353
503
|
color_space_dictionaries=None,
|
|
354
|
-
|
|
504
|
+
basic=self.parent().po.basic)
|
|
355
505
|
else:
|
|
356
506
|
if self.parent().po.all['scale_with_image_or_cells'] == 0:
|
|
357
507
|
self.parent().po.get_average_pixel_size()
|
|
@@ -365,22 +515,73 @@ class FirstImageAnalysisThread(QtCore.QThread):
|
|
|
365
515
|
biomask=self.parent().po.all["bio_mask"],
|
|
366
516
|
backmask=self.parent().po.all["back_mask"],
|
|
367
517
|
color_space_dictionaries=None,
|
|
368
|
-
|
|
518
|
+
basic=self.parent().po.basic)
|
|
369
519
|
|
|
370
|
-
logging.info(f" image analysis lasted {default_timer() - tic} secondes")
|
|
371
|
-
logging.info(f" image analysis lasted {np.round((default_timer() - tic) / 60)} minutes")
|
|
520
|
+
logging.info(f" image analysis lasted {np.floor((default_timer() - tic) / 60).astype(int)} minutes {np.round((default_timer() - tic) % 60).astype(int)} secondes")
|
|
372
521
|
self.message_when_thread_finished.emit(True)
|
|
373
522
|
|
|
374
523
|
|
|
375
524
|
class LastImageAnalysisThread(QtCore.QThread):
|
|
525
|
+
"""
|
|
526
|
+
Thread for analyzing the last image of a given folder.
|
|
527
|
+
|
|
528
|
+
Signals
|
|
529
|
+
-------
|
|
530
|
+
message_from_thread : Signal(str)
|
|
531
|
+
Signal emitted when progress messages are available.
|
|
532
|
+
message_when_thread_finished : Signal(bool)
|
|
533
|
+
Signal emitted upon completion of the thread's task.
|
|
534
|
+
|
|
535
|
+
Notes
|
|
536
|
+
-----
|
|
537
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
538
|
+
"""
|
|
376
539
|
message_from_thread = QtCore.Signal(str)
|
|
377
540
|
message_when_thread_finished = QtCore.Signal(bool)
|
|
378
541
|
|
|
379
542
|
def __init__(self, parent=None):
|
|
543
|
+
"""
|
|
544
|
+
Initialize the worker thread for analyzing the last image of a given folder
|
|
545
|
+
|
|
546
|
+
Parameters
|
|
547
|
+
----------
|
|
548
|
+
parent : QObject, optional
|
|
549
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
550
|
+
"""
|
|
380
551
|
super(LastImageAnalysisThread, self).__init__(parent)
|
|
381
552
|
self.setParent(parent)
|
|
382
553
|
|
|
383
554
|
def run(self):
|
|
555
|
+
"""
|
|
556
|
+
Summary:
|
|
557
|
+
Run the image processing and analysis pipeline based on current settings.
|
|
558
|
+
|
|
559
|
+
Extended Description:
|
|
560
|
+
This function initiates the workflow for image processing and analysis,
|
|
561
|
+
including segmenting images, generating analysis options, and handling
|
|
562
|
+
various masks and settings based on the current state of the parent object.
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
--------
|
|
566
|
+
None
|
|
567
|
+
This method does not return a value. It emits signals to indicate the
|
|
568
|
+
progress and completion of the processing tasks.
|
|
569
|
+
|
|
570
|
+
Notes:
|
|
571
|
+
------
|
|
572
|
+
This function uses various attributes from the parent class to determine
|
|
573
|
+
how to process and analyze images. The specific behavior is heavily
|
|
574
|
+
dependent on the state of these attributes.
|
|
575
|
+
|
|
576
|
+
Attributes:
|
|
577
|
+
-----------
|
|
578
|
+
parent() : object
|
|
579
|
+
The owner of this instance, containing necessary settings and methods.
|
|
580
|
+
message_from_thread.emit(s : str) : signal
|
|
581
|
+
Signal to indicate progress messages from the thread.
|
|
582
|
+
message_when_thread_finished.emit(success : bool) : signal
|
|
583
|
+
Signal to indicate the completion of the thread.
|
|
584
|
+
"""
|
|
384
585
|
self.parent().po.cropping(False)
|
|
385
586
|
self.parent().po.get_background_to_subtract()
|
|
386
587
|
biomask = None
|
|
@@ -389,55 +590,101 @@ class LastImageAnalysisThread(QtCore.QThread):
|
|
|
389
590
|
biomask = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
|
|
390
591
|
if self.parent().imageanalysiswindow.back_masks_number != 0:
|
|
391
592
|
backmask = np.nonzero(self.parent().imageanalysiswindow.back_mask)
|
|
392
|
-
if self.parent().po.visualize or len(self.parent().po.first_im.shape) == 2:
|
|
593
|
+
if self.parent().po.visualize or (len(self.parent().po.first_im.shape) == 2 and not self.parent().po.network_shaped):
|
|
393
594
|
self.message_from_thread.emit("Image segmentation, wait...")
|
|
394
|
-
self.parent().po.
|
|
595
|
+
self.parent().po.fast_last_image_segmentation(biomask=biomask, backmask=backmask)
|
|
395
596
|
else:
|
|
396
597
|
self.message_from_thread.emit("Generating analysis options, wait...")
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
598
|
+
arenas_mask = None
|
|
599
|
+
if self.parent().po.all['are_gravity_centers_moving'] != 1:
|
|
600
|
+
cr = [self.parent().po.top, self.parent().po.bot, self.parent().po.left, self.parent().po.right]
|
|
601
|
+
arenas_mask = np.zeros_like(self.parent().po.first_image.validated_shapes)
|
|
602
|
+
for _i in np.arange(len(self.parent().po.vars['analyzed_individuals'])):
|
|
603
|
+
if self.parent().po.vars['arena_shape'] == 'circle':
|
|
604
|
+
ellipse = create_ellipse(cr[1][_i] - cr[0][_i], cr[3][_i] - cr[2][_i])
|
|
605
|
+
arenas_mask[cr[0][_i]: cr[1][_i], cr[2][_i]:cr[3][_i]] = ellipse
|
|
606
|
+
else:
|
|
607
|
+
arenas_mask[cr[0][_i]: cr[1][_i], cr[2][_i]:cr[3][_i]] = 1
|
|
608
|
+
if self.parent().po.network_shaped:
|
|
609
|
+
self.parent().po.last_image.network_detection(arenas_mask, csc_dict=self.parent().po.vars["convert_for_motion"], biomask=biomask, backmask=backmask)
|
|
401
610
|
else:
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
|
|
407
|
-
else:
|
|
408
|
-
dist1 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.y_boundaries)))
|
|
409
|
-
dist2 = np.mean(np.diff(np.nonzero(self.parent().po.videos.first_image.x_boundaries)))
|
|
410
|
-
inter_dist = np.max(dist1, dist2)
|
|
411
|
-
if self.parent().po.all['starting_blob_shape'] == "circle":
|
|
412
|
-
max_shape_size = np.pi * np.square(inter_dist)
|
|
611
|
+
if self.parent().po.vars['several_blob_per_arena']:
|
|
612
|
+
concomp_nb = [self.parent().po.sample_number, self.parent().po.first_image.size // 50]
|
|
613
|
+
max_shape_size = .75 * self.parent().po.first_image.size
|
|
614
|
+
total_surfarea = .99 * self.parent().po.first_image.size
|
|
413
615
|
else:
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
616
|
+
concomp_nb = [self.parent().po.sample_number, self.parent().po.sample_number * 200]
|
|
617
|
+
if self.parent().po.all['are_zigzag'] == "columns":
|
|
618
|
+
inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.first_image.y_boundaries)))
|
|
619
|
+
elif self.parent().po.all['are_zigzag'] == "rows":
|
|
620
|
+
inter_dist = np.mean(np.diff(np.nonzero(self.parent().po.first_image.x_boundaries)))
|
|
621
|
+
else:
|
|
622
|
+
dist1 = np.mean(np.diff(np.nonzero(self.parent().po.first_image.y_boundaries)))
|
|
623
|
+
dist2 = np.mean(np.diff(np.nonzero(self.parent().po.first_image.x_boundaries)))
|
|
624
|
+
inter_dist = np.max(dist1, dist2)
|
|
625
|
+
if self.parent().po.all['starting_blob_shape'] == "rectangle":
|
|
626
|
+
max_shape_size = np.square(2 * inter_dist)
|
|
627
|
+
else:
|
|
628
|
+
max_shape_size = np.pi * np.square(inter_dist)
|
|
629
|
+
total_surfarea = max_shape_size * self.parent().po.sample_number
|
|
630
|
+
ref_image = self.parent().po.first_image.validated_shapes
|
|
631
|
+
self.parent().po.first_image.generate_subtract_background(self.parent().po.vars['convert_for_motion'], self.parent().po.vars['drift_already_corrected'])
|
|
632
|
+
kmeans_clust_nb = None
|
|
633
|
+
self.parent().po.last_image.find_last_im_csc(concomp_nb, total_surfarea, max_shape_size, arenas_mask,
|
|
634
|
+
ref_image, self.parent().po.first_image.subtract_background,
|
|
635
|
+
kmeans_clust_nb, biomask, backmask, color_space_dictionaries=None,
|
|
636
|
+
basic=self.parent().po.basic)
|
|
429
637
|
self.message_when_thread_finished.emit(True)
|
|
430
638
|
|
|
431
639
|
|
|
432
640
|
class CropScaleSubtractDelineateThread(QtCore.QThread):
|
|
641
|
+
"""
|
|
642
|
+
Thread for detecting crop and arena coordinates.
|
|
643
|
+
|
|
644
|
+
Signals
|
|
645
|
+
-------
|
|
646
|
+
message_from_thread : Signal(str)
|
|
647
|
+
Signal emitted when progress messages are available.
|
|
648
|
+
message_when_thread_finished : Signal(bool)
|
|
649
|
+
Signal emitted upon completion of the thread's task.
|
|
650
|
+
|
|
651
|
+
Notes
|
|
652
|
+
-----
|
|
653
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
654
|
+
"""
|
|
433
655
|
message_from_thread = QtCore.Signal(str)
|
|
434
656
|
message_when_thread_finished = QtCore.Signal(str)
|
|
435
657
|
|
|
436
658
|
def __init__(self, parent=None):
|
|
659
|
+
"""
|
|
660
|
+
Initialize the worker thread for detecting crop and arena coordinates in the first image
|
|
661
|
+
|
|
662
|
+
Parameters
|
|
663
|
+
----------
|
|
664
|
+
parent : QObject, optional
|
|
665
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
666
|
+
"""
|
|
667
|
+
|
|
437
668
|
super(CropScaleSubtractDelineateThread, self).__init__(parent)
|
|
438
669
|
self.setParent(parent)
|
|
439
670
|
|
|
440
671
|
def run(self):
|
|
672
|
+
"""
|
|
673
|
+
Start cropping if required, perform initial processing,
|
|
674
|
+
and handle subsequent operations based on configuration.
|
|
675
|
+
|
|
676
|
+
Extended Description
|
|
677
|
+
--------------------
|
|
678
|
+
This method initiates the cropping process if necessary,
|
|
679
|
+
performs initial processing steps, and manages subsequent operations
|
|
680
|
+
depending on whether multiple blobs are detected per arena.
|
|
681
|
+
|
|
682
|
+
Notes
|
|
683
|
+
-----
|
|
684
|
+
This method uses several logging operations to track its progress.
|
|
685
|
+
It interacts with various components of the parent object
|
|
686
|
+
to perform necessary image processing tasks.
|
|
687
|
+
"""
|
|
441
688
|
logging.info("Start cropping if required")
|
|
442
689
|
self.parent().po.cropping(is_first_image=True)
|
|
443
690
|
self.parent().po.cropping(is_first_image=False)
|
|
@@ -454,7 +701,7 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
|
|
|
454
701
|
y_lim = self.parent().po.first_image.y_boundaries
|
|
455
702
|
if ((nb - 1) != self.parent().po.sample_number or np.any(stats[:, 4] == 1)):
|
|
456
703
|
self.message_from_thread.emit("Image analysis failed to detect the right cell(s) number: restart the analysis.")
|
|
457
|
-
elif
|
|
704
|
+
elif (y_lim == - 1).sum() != (y_lim == 1).sum():
|
|
458
705
|
self.message_from_thread.emit("Automatic arena delineation cannot work if one cell touches the image border.")
|
|
459
706
|
self.parent().po.first_image.y_boundaries = None
|
|
460
707
|
else:
|
|
@@ -468,12 +715,29 @@ class CropScaleSubtractDelineateThread(QtCore.QThread):
|
|
|
468
715
|
|
|
469
716
|
|
|
470
717
|
class SaveManualDelineationThread(QtCore.QThread):
|
|
718
|
+
"""
|
|
719
|
+
Thread for saving user's defined arena delineation through the GUI.
|
|
471
720
|
|
|
721
|
+
Notes
|
|
722
|
+
-----
|
|
723
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
724
|
+
"""
|
|
472
725
|
def __init__(self, parent=None):
|
|
726
|
+
"""
|
|
727
|
+
Initialize the worker thread for saving the arena coordinates when the user draw them manually
|
|
728
|
+
|
|
729
|
+
Parameters
|
|
730
|
+
----------
|
|
731
|
+
parent : QObject, optional
|
|
732
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
733
|
+
"""
|
|
473
734
|
super(SaveManualDelineationThread, self).__init__(parent)
|
|
474
735
|
self.setParent(parent)
|
|
475
736
|
|
|
476
737
|
def run(self):
|
|
738
|
+
"""
|
|
739
|
+
Do save the coordinates.
|
|
740
|
+
"""
|
|
477
741
|
self.parent().po.left = np.arange(self.parent().po.sample_number)
|
|
478
742
|
self.parent().po.right = np.arange(self.parent().po.sample_number)
|
|
479
743
|
self.parent().po.top = np.arange(self.parent().po.sample_number)
|
|
@@ -492,37 +756,114 @@ class SaveManualDelineationThread(QtCore.QThread):
|
|
|
492
756
|
|
|
493
757
|
logging.info("Save manual video delineation")
|
|
494
758
|
self.parent().po.vars['analyzed_individuals'] = np.arange(self.parent().po.sample_number) + 1
|
|
495
|
-
self.parent().po.videos = OneVideoPerBlob(self.parent().po.first_image, self.parent().po.starting_blob_hsize_in_pixels, self.parent().po.all['raw_images'])
|
|
496
|
-
self.parent().po.videos.left = self.parent().po.left
|
|
497
|
-
self.parent().po.videos.right = self.parent().po.right
|
|
498
|
-
self.parent().po.videos.top = self.parent().po.top
|
|
499
|
-
self.parent().po.videos.bot = self.parent().po.bot
|
|
500
759
|
|
|
501
760
|
|
|
502
761
|
class GetExifDataThread(QtCore.QThread):
|
|
762
|
+
"""
|
|
763
|
+
Thread for loading exif data from images.
|
|
764
|
+
|
|
765
|
+
Notes
|
|
766
|
+
-----
|
|
767
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
768
|
+
"""
|
|
503
769
|
|
|
504
770
|
def __init__(self, parent=None):
|
|
771
|
+
"""
|
|
772
|
+
Initialize the worker thread for looking for the exif data.
|
|
773
|
+
|
|
774
|
+
Parameters
|
|
775
|
+
----------
|
|
776
|
+
parent : QObject, optional
|
|
777
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
778
|
+
"""
|
|
505
779
|
super(GetExifDataThread, self).__init__(parent)
|
|
506
780
|
self.setParent(parent)
|
|
507
781
|
|
|
508
782
|
def run(self):
|
|
783
|
+
"""
|
|
784
|
+
Do extract exif data..
|
|
785
|
+
"""
|
|
509
786
|
self.parent().po.extract_exif()
|
|
510
787
|
|
|
511
788
|
|
|
512
|
-
class
|
|
789
|
+
class CompleteImageAnalysisThread(QtCore.QThread):
|
|
790
|
+
"""
|
|
791
|
+
Thread for completing the last image analysis.
|
|
792
|
+
|
|
793
|
+
Signals
|
|
794
|
+
-------
|
|
795
|
+
message_when_thread_finished : Signal(bool)
|
|
796
|
+
Signal emitted upon completion of the thread's task.
|
|
797
|
+
|
|
798
|
+
Notes
|
|
799
|
+
-----
|
|
800
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
801
|
+
"""
|
|
802
|
+
message_when_thread_finished = QtCore.Signal(bool)
|
|
803
|
+
|
|
804
|
+
def __init__(self, parent=None):
|
|
805
|
+
"""
|
|
806
|
+
Initialize the worker thread for completing the last image analysis
|
|
807
|
+
|
|
808
|
+
Parameters
|
|
809
|
+
----------
|
|
810
|
+
parent : QObject, optional
|
|
811
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
812
|
+
"""
|
|
813
|
+
super(CompleteImageAnalysisThread, self).__init__(parent)
|
|
814
|
+
self.setParent(parent)
|
|
815
|
+
|
|
816
|
+
def run(self):
|
|
817
|
+
self.parent().po.get_background_to_subtract()
|
|
818
|
+
self.parent().po.get_origins_and_backgrounds_lists()
|
|
819
|
+
self.parent().po.data_to_save['coordinates'] = True
|
|
820
|
+
self.parent().po.data_to_save['exif'] = True
|
|
821
|
+
self.parent().po.save_data_to_run_cellects_quickly()
|
|
822
|
+
self.parent().po.all['bio_mask'] = None
|
|
823
|
+
self.parent().po.all['back_mask'] = None
|
|
824
|
+
if self.parent().imageanalysiswindow.bio_masks_number != 0:
|
|
825
|
+
self.parent().po.all['bio_mask'] = np.nonzero(self.parent().imageanalysiswindow.bio_mask)
|
|
826
|
+
if self.parent().imageanalysiswindow.back_masks_number != 0:
|
|
827
|
+
self.parent().po.all['back_mask'] = np.nonzero(self.parent().imageanalysiswindow.back_mask)
|
|
828
|
+
self.parent().po.complete_image_analysis()
|
|
829
|
+
self.message_when_thread_finished.emit(True)
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
class PrepareVideoAnalysisThread(QtCore.QThread):
|
|
833
|
+
"""
|
|
834
|
+
Thread for preparing video analysis.
|
|
835
|
+
|
|
836
|
+
Notes
|
|
837
|
+
-----
|
|
838
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
839
|
+
"""
|
|
513
840
|
|
|
514
841
|
def __init__(self, parent=None):
|
|
515
|
-
|
|
842
|
+
"""
|
|
843
|
+
Initialize the worker thread for ending up the last image analysis and preparing video analysis.
|
|
844
|
+
|
|
845
|
+
Parameters
|
|
846
|
+
----------
|
|
847
|
+
parent : QObject, optional
|
|
848
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
849
|
+
"""
|
|
850
|
+
super(PrepareVideoAnalysisThread, self).__init__(parent)
|
|
516
851
|
self.setParent(parent)
|
|
517
852
|
|
|
518
853
|
def run(self):
|
|
854
|
+
"""
|
|
855
|
+
Run the image processing pipeline for the last image of the current folder.
|
|
856
|
+
|
|
857
|
+
This method handles background subtraction,
|
|
858
|
+
image segmentation, and data saving.
|
|
859
|
+
"""
|
|
519
860
|
self.parent().po.get_background_to_subtract()
|
|
520
861
|
|
|
521
862
|
self.parent().po.get_origins_and_backgrounds_lists()
|
|
522
863
|
|
|
523
864
|
if self.parent().po.last_image is None:
|
|
524
865
|
self.parent().po.get_last_image()
|
|
525
|
-
self.parent().po.
|
|
866
|
+
self.parent().po.fast_last_image_segmentation()
|
|
526
867
|
self.parent().po.find_if_lighter_background()
|
|
527
868
|
logging.info("The current (or the first) folder is ready to run")
|
|
528
869
|
self.parent().po.first_exp_ready_to_run = True
|
|
@@ -534,25 +875,46 @@ class FinalizeImageAnalysisThread(QtCore.QThread):
|
|
|
534
875
|
|
|
535
876
|
|
|
536
877
|
class SaveAllVarsThread(QtCore.QThread):
|
|
878
|
+
"""
|
|
879
|
+
Thread for saving the GUI parameters and updating current folder.
|
|
880
|
+
|
|
881
|
+
Notes
|
|
882
|
+
-----
|
|
883
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
884
|
+
"""
|
|
537
885
|
|
|
538
886
|
def __init__(self, parent=None):
|
|
887
|
+
"""
|
|
888
|
+
Initialize the worker thread for saving the GUI parameters and updating current folder
|
|
889
|
+
|
|
890
|
+
Parameters
|
|
891
|
+
----------
|
|
892
|
+
parent : QObject, optional
|
|
893
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
894
|
+
"""
|
|
539
895
|
super(SaveAllVarsThread, self).__init__(parent)
|
|
540
896
|
self.setParent(parent)
|
|
541
897
|
|
|
542
898
|
def run(self):
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
#self.parent().po.all['global_pathway']
|
|
546
|
-
#os.getcwd()
|
|
899
|
+
"""
|
|
900
|
+
Execute a sequence of operations to save data and update the current folder.
|
|
547
901
|
|
|
548
|
-
|
|
902
|
+
This method performs several steps:
|
|
903
|
+
1. Save variable dictionary.
|
|
904
|
+
2. Set the current folder.
|
|
905
|
+
3. Save data to run Cellects quickly without creating a new one if it doesn't exist.
|
|
906
|
+
"""
|
|
907
|
+
self.parent().po.save_variable_dict()
|
|
908
|
+
self._set_current_folder()
|
|
549
909
|
self.parent().po.save_data_to_run_cellects_quickly(new_one_if_does_not_exist=False)
|
|
550
|
-
#if os.access(f"", os.R_OK):
|
|
551
|
-
# self.parent().po.save_data_to_run_cellects_quickly()
|
|
552
|
-
#else:
|
|
553
|
-
# logging.error(f"No permission access to write in {os.getcwd()}")
|
|
554
910
|
|
|
555
|
-
def
|
|
911
|
+
def _set_current_folder(self):
|
|
912
|
+
"""
|
|
913
|
+
Set the current folder based on conditions.
|
|
914
|
+
|
|
915
|
+
Sets the current folder to the first one in the list if there are multiple
|
|
916
|
+
folders, otherwise sets it to a reduced global pathway.
|
|
917
|
+
"""
|
|
556
918
|
if self.parent().po.all['folder_number'] > 1: # len(self.parent().po.all['folder_list']) > 1: # len(self.parent().po.all['folder_list']) > 0:
|
|
557
919
|
logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
|
|
558
920
|
self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
|
|
@@ -564,30 +926,80 @@ class SaveAllVarsThread(QtCore.QThread):
|
|
|
564
926
|
|
|
565
927
|
|
|
566
928
|
class OneArenaThread(QtCore.QThread):
|
|
929
|
+
"""
|
|
930
|
+
Thread for completing the analysis of one particular arena in the current folder.
|
|
931
|
+
|
|
932
|
+
Signals
|
|
933
|
+
-------
|
|
934
|
+
message_from_thread_starting : Signal(str)
|
|
935
|
+
Signal emitted when the thread successfully starts.
|
|
936
|
+
image_from_thread : Signal(dict)
|
|
937
|
+
Signal emitted during the video reading or analysis to display images of the current status to the GUI.
|
|
938
|
+
when_loading_finished : Signal(bool)
|
|
939
|
+
Signal emitted when the video is completely loaded.
|
|
940
|
+
when_detection_finished : Signal(str)
|
|
941
|
+
Signal emitted when the video analysis is finished.
|
|
942
|
+
|
|
943
|
+
Notes
|
|
944
|
+
-----
|
|
945
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
946
|
+
"""
|
|
567
947
|
message_from_thread_starting = QtCore.Signal(str)
|
|
568
948
|
image_from_thread = QtCore.Signal(dict)
|
|
569
949
|
when_loading_finished = QtCore.Signal(bool)
|
|
570
950
|
when_detection_finished = QtCore.Signal(str)
|
|
571
951
|
|
|
572
952
|
def __init__(self, parent=None):
|
|
953
|
+
"""
|
|
954
|
+
Initialize the worker thread for saving the analyzing one arena entirely
|
|
955
|
+
|
|
956
|
+
Parameters
|
|
957
|
+
----------
|
|
958
|
+
parent : QObject, optional
|
|
959
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
960
|
+
"""
|
|
573
961
|
super(OneArenaThread, self).__init__(parent)
|
|
574
962
|
self.setParent(parent)
|
|
575
963
|
self._isRunning = False
|
|
576
964
|
|
|
577
965
|
def run(self):
|
|
966
|
+
"""
|
|
967
|
+
|
|
968
|
+
Run analysis on one arena.
|
|
969
|
+
|
|
970
|
+
This method prepares and initiates the analysis process for a video by setting up required folders,
|
|
971
|
+
loading necessary data, and performing pre-processing steps. It manages the state of running analysis and
|
|
972
|
+
handles memory allocation for efficient processing.
|
|
973
|
+
|
|
974
|
+
Notes
|
|
975
|
+
-----
|
|
976
|
+
- This method uses threading to handle long-running operations without blocking the main UI.
|
|
977
|
+
- The memory allocation is dynamically adjusted based on available system resources.
|
|
978
|
+
|
|
979
|
+
Attributes
|
|
980
|
+
----------
|
|
981
|
+
self.parent().po.vars['convert_for_motion'] : dict
|
|
982
|
+
Dictionary containing variables related to motion conversion.
|
|
983
|
+
self.parent().po.first_exp_ready_to_run : bool
|
|
984
|
+
Boolean indicating if the first experiment is ready to run.
|
|
985
|
+
self.parent().po.cores : int
|
|
986
|
+
Number of cores available for processing.
|
|
987
|
+
self.parent().po.motion : object
|
|
988
|
+
Object containing motion-related data and methods.
|
|
989
|
+
self.parent().po.load_quick_full : int
|
|
990
|
+
Number of arenas to load quickly for full detection.
|
|
991
|
+
"""
|
|
578
992
|
continue_analysis = True
|
|
579
993
|
self._isRunning = True
|
|
580
994
|
self.message_from_thread_starting.emit("Video loading, wait...")
|
|
581
995
|
|
|
582
996
|
self.set_current_folder()
|
|
583
|
-
print(self.parent().po.vars['convert_for_motion'])
|
|
584
997
|
if not self.parent().po.first_exp_ready_to_run:
|
|
585
998
|
self.parent().po.load_data_to_run_cellects_quickly()
|
|
586
999
|
if not self.parent().po.first_exp_ready_to_run:
|
|
587
1000
|
#Need a look for data when Data to run Cellects quickly.pkl and 1 folder selected amon several
|
|
588
1001
|
continue_analysis = self.pre_processing()
|
|
589
1002
|
if continue_analysis:
|
|
590
|
-
print(self.parent().po.vars['convert_for_motion'])
|
|
591
1003
|
memory_diff = self.parent().po.update_available_core_nb()
|
|
592
1004
|
if self.parent().po.cores == 0:
|
|
593
1005
|
self.message_from_thread_starting.emit(f"Analyzing one arena requires {memory_diff}GB of additional RAM to run")
|
|
@@ -607,9 +1019,22 @@ class OneArenaThread(QtCore.QThread):
|
|
|
607
1019
|
self.message_from_thread_starting.emit(f"The current parameters failed to detect the cell(s) motion")
|
|
608
1020
|
|
|
609
1021
|
def stop(self):
|
|
1022
|
+
"""
|
|
1023
|
+
Stops the running process.
|
|
1024
|
+
|
|
1025
|
+
This method is used to safely halt the current process.
|
|
1026
|
+
"""
|
|
610
1027
|
self._isRunning = False
|
|
611
1028
|
|
|
612
1029
|
def set_current_folder(self):
|
|
1030
|
+
"""
|
|
1031
|
+
|
|
1032
|
+
Sets the current folder based on conditions.
|
|
1033
|
+
|
|
1034
|
+
This method determines which folder to use and updates the current
|
|
1035
|
+
folder ID accordingly. If there are multiple folders, it uses the first folder
|
|
1036
|
+
from the list; otherwise, it uses a reduced global pathway as the current.
|
|
1037
|
+
"""
|
|
613
1038
|
if self.parent().po.all['folder_number'] > 1:
|
|
614
1039
|
logging.info(f"Use {self.parent().po.all['folder_list'][0]} folder")
|
|
615
1040
|
self.parent().po.update_folder_id(self.parent().po.all['sample_number_per_folder'][0],
|
|
@@ -620,11 +1045,24 @@ class OneArenaThread(QtCore.QThread):
|
|
|
620
1045
|
self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
|
|
621
1046
|
|
|
622
1047
|
def pre_processing(self):
|
|
1048
|
+
"""
|
|
1049
|
+
Pre-processes the data for running Cellects on one arena.
|
|
1050
|
+
|
|
1051
|
+
This function logs various stages of preprocessing, validates specimen numbers,
|
|
1052
|
+
performs necessary segmentations and data saving operations. It handles the
|
|
1053
|
+
initialization, image analysis, and background extraction processes to prepare
|
|
1054
|
+
the folder for further analysis.
|
|
1055
|
+
|
|
1056
|
+
Returns
|
|
1057
|
+
-------
|
|
1058
|
+
bool
|
|
1059
|
+
Returns True if pre-processing completed successfully; False otherwise.
|
|
1060
|
+
"""
|
|
623
1061
|
logging.info("Pre-processing has started")
|
|
624
1062
|
analysis_status = {"continue": True, "message": ""}
|
|
625
1063
|
|
|
626
1064
|
self.parent().po.get_first_image()
|
|
627
|
-
self.parent().po.
|
|
1065
|
+
self.parent().po.fast_first_image_segmentation()
|
|
628
1066
|
if len(self.parent().po.vars['analyzed_individuals']) != self.parent().po.first_image.shape_number:
|
|
629
1067
|
self.message_from_thread_starting.emit(f"Wrong specimen number: (re)do the complete analysis.")
|
|
630
1068
|
analysis_status["continue"] = False
|
|
@@ -646,126 +1084,75 @@ class OneArenaThread(QtCore.QThread):
|
|
|
646
1084
|
else:
|
|
647
1085
|
self.parent().po.get_origins_and_backgrounds_lists()
|
|
648
1086
|
self.parent().po.get_last_image()
|
|
649
|
-
self.parent().po.
|
|
1087
|
+
self.parent().po.fast_last_image_segmentation()
|
|
650
1088
|
self.parent().po.find_if_lighter_backgnp.round()
|
|
651
1089
|
logging.info("The current (or the first) folder is ready to run")
|
|
652
1090
|
self.parent().po.first_exp_ready_to_run = True
|
|
653
1091
|
return analysis_status["continue"]
|
|
654
1092
|
|
|
655
1093
|
def load_one_arena(self):
|
|
1094
|
+
"""
|
|
1095
|
+
Load a single arena from images or video to perform motion analysis.
|
|
1096
|
+
"""
|
|
656
1097
|
arena = self.parent().po.all['arena']
|
|
657
1098
|
i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
|
|
1099
|
+
true_frame_width = self.parent().po.vars['origin_list'][i].shape[1]
|
|
1100
|
+
if self.parent().po.all['overwrite_unaltered_videos'] and os.path.isfile(f'ind_{arena}.npy'):
|
|
1101
|
+
os.remove(f'ind_{arena}.npy')
|
|
1102
|
+
background = None
|
|
1103
|
+
background2 = None
|
|
1104
|
+
if self.parent().po.vars['subtract_background']:
|
|
1105
|
+
background = self.parent().po.vars['background_list'][i]
|
|
1106
|
+
if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
1107
|
+
background2 = self.parent().po.vars['background_list2'][i]
|
|
1108
|
+
vid_name = None
|
|
1109
|
+
if self.parent().po.vars['video_list'] is not None:
|
|
1110
|
+
vid_name = self.parent().po.vars['video_list'][i]
|
|
1111
|
+
visu, converted_video, converted_video2 = read_one_arena(self.parent().po.all['arena'],
|
|
1112
|
+
self.parent().po.vars['already_greyscale'], self.parent().po.vars['convert_for_motion'],
|
|
1113
|
+
None, true_frame_width, vid_name, background, background2)
|
|
1114
|
+
|
|
658
1115
|
save_loaded_video: bool = False
|
|
659
|
-
if
|
|
1116
|
+
if visu is None or (self.parent().po.vars['already_greyscale'] and converted_video is None):
|
|
1117
|
+
cr = [self.parent().po.top[i], self.parent().po.bot[i],
|
|
1118
|
+
self.parent().po.left[i], self.parent().po.right[i]]
|
|
1119
|
+
vids = create_empty_videos(self.parent().po.data_list, cr,
|
|
1120
|
+
self.parent().po.vars['lose_accuracy_to_save_memory'], self.parent().po.vars['already_greyscale'],
|
|
1121
|
+
self.parent().po.vars['convert_for_motion'])
|
|
1122
|
+
self.parent().po.visu, self.parent().po.converted_video, self.parent().po.converted_video2 = vids
|
|
660
1123
|
logging.info(f"Starting to load arena n°{arena} from images")
|
|
661
|
-
|
|
662
|
-
self.parent().po.one_arenate_done = True
|
|
663
|
-
i = np.nonzero(self.parent().po.vars['analyzed_individuals'] == arena)[0][0]
|
|
664
|
-
if self.parent().po.vars['lose_accuracy_to_save_memory']:
|
|
665
|
-
self.parent().po.converted_video = np.zeros(
|
|
666
|
-
(len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
|
|
667
|
-
dtype=np.uint8)
|
|
668
|
-
else:
|
|
669
|
-
self.parent().po.converted_video = np.zeros(
|
|
670
|
-
(len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c, self.parent().po.right[i] - self.parent().po.left[i] + add_to_c),
|
|
671
|
-
dtype=float)
|
|
672
|
-
if not self.parent().po.vars['already_greyscale']:
|
|
673
|
-
self.parent().po.visu = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
|
|
674
|
-
self.parent().po.right[i] - self.parent().po.left[i] + add_to_c, 3), dtype=np.uint8)
|
|
675
|
-
if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
676
|
-
if self.parent().po.vars['lose_accuracy_to_save_memory']:
|
|
677
|
-
self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
|
|
678
|
-
self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=np.uint8)
|
|
679
|
-
else:
|
|
680
|
-
self.parent().po.converted_video2 = np.zeros((len(self.parent().po.data_list), self.parent().po.bot[i] - self.parent().po.top[i] + add_to_c,
|
|
681
|
-
self.parent().po.right[i] - self.parent().po.left[i] + add_to_c), dtype=float)
|
|
682
|
-
first_dict = TDict()
|
|
683
|
-
second_dict = TDict()
|
|
684
|
-
c_spaces = []
|
|
685
|
-
for k, v in self.parent().po.vars['convert_for_motion'].items():
|
|
686
|
-
if k != 'logical' and v.sum() > 0:
|
|
687
|
-
if k[-1] != '2':
|
|
688
|
-
first_dict[k] = v
|
|
689
|
-
c_spaces.append(k)
|
|
690
|
-
else:
|
|
691
|
-
second_dict[k[:-1]] = v
|
|
692
|
-
c_spaces.append(k[:-1])
|
|
1124
|
+
|
|
693
1125
|
prev_img = None
|
|
694
|
-
background = None
|
|
695
|
-
background2 = None
|
|
696
1126
|
pat_tracker = PercentAndTimeTracker(self.parent().po.vars['img_number'])
|
|
1127
|
+
is_landscape = self.parent().po.first_image.image.shape[0] < self.parent().po.first_image.image.shape[1]
|
|
697
1128
|
for image_i, image_name in enumerate(self.parent().po.data_list):
|
|
698
1129
|
current_percentage, eta = pat_tracker.get_progress()
|
|
699
|
-
|
|
700
|
-
img =
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
self.parent().po.first_image.crop_coord[2]:self.parent().po.first_image.crop_coord[3], :]
|
|
706
|
-
img = img[self.parent().po.top[arena - 1]: (self.parent().po.bot[arena - 1] + add_to_c),
|
|
707
|
-
self.parent().po.left[arena - 1]: (self.parent().po.right[arena - 1] + add_to_c), :]
|
|
708
|
-
|
|
709
|
-
self.image_from_thread.emit({"message": f"Video loading: {current_percentage}%{eta}", "current_image": img})
|
|
1130
|
+
reduce_image_dim = self.parent().po.vars['already_greyscale'] and self.parent().po.reduce_image_dim
|
|
1131
|
+
img, prev_img = read_rotate_crop_and_reduce_image(image_name, prev_img,
|
|
1132
|
+
self.parent().po.first_image.crop_coord, cr, self.parent().po.all['raw_images'], is_landscape,
|
|
1133
|
+
reduce_image_dim)
|
|
1134
|
+
self.image_from_thread.emit(
|
|
1135
|
+
{"message": f"Video loading: {current_percentage}%{eta}", "current_image": img})
|
|
710
1136
|
if self.parent().po.vars['already_greyscale']:
|
|
711
|
-
|
|
712
|
-
self.parent().po.converted_video[image_i, ...] = img[:, :, 0]
|
|
713
|
-
else:
|
|
714
|
-
self.parent().po.converted_video[image_i, ...] = img
|
|
1137
|
+
self.parent().po.converted_video[image_i, ...] = img
|
|
715
1138
|
else:
|
|
716
1139
|
self.parent().po.visu[image_i, ...] = img
|
|
717
1140
|
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
if greyscale_image2 is not None and self.parent().po.vars['filter_spec']['filter2_type'] != "":
|
|
734
|
-
greyscale_image2 = apply_filter(greyscale_image2,
|
|
735
|
-
self.parent().po.vars['filter_spec']['filter2_type'],
|
|
736
|
-
self.parent().po.vars['filter_spec']['filter2_param'],
|
|
737
|
-
self.parent().po.vars['lose_accuracy_to_save_memory'])
|
|
738
|
-
self.parent().po.converted_video[image_i, ...] = greyscale_image
|
|
739
|
-
if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
740
|
-
self.parent().po.converted_video2[image_i, ...] = greyscale_image2
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
# csc = OneImageAnalysis(img)
|
|
745
|
-
# if self.parent().po.vars['subtract_background']:
|
|
746
|
-
# if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
747
|
-
# csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
|
|
748
|
-
# self.parent().po.vars['background_list'][i],
|
|
749
|
-
# self.parent().po.vars['background_list2'][i])
|
|
750
|
-
# else:
|
|
751
|
-
# csc.generate_color_space_combination(c_spaces, first_dict, second_dict,
|
|
752
|
-
# self.parent().po.vars['background_list'][i], None)
|
|
753
|
-
# else:
|
|
754
|
-
# csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
|
|
755
|
-
# # self.parent().po.converted_video[image_i, ...] = csc.image
|
|
756
|
-
# if self.parent().po.vars['lose_accuracy_to_save_memory']:
|
|
757
|
-
# self.parent().po.converted_video[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image)
|
|
758
|
-
# else:
|
|
759
|
-
# self.parent().po.converted_video[image_i, ...] = csc.image
|
|
760
|
-
# if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
761
|
-
# if self.parent().po.vars['lose_accuracy_to_save_memory']:
|
|
762
|
-
# self.parent().po.converted_video2[image_i, ...] = bracket_to_np.uint8_image_contrast(csc.image2)
|
|
763
|
-
# else:
|
|
764
|
-
# self.parent().po.converted_video2[image_i, ...] = csc.image2
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
# self.parent().po.load_one_arena(arena)
|
|
1141
|
+
if not self.parent().po.vars['already_greyscale']:
|
|
1142
|
+
msg = "Video conversion"
|
|
1143
|
+
if background is not None :
|
|
1144
|
+
msg += ", background subtraction"
|
|
1145
|
+
if self.parent().po.vars['filter_spec'] is not None:
|
|
1146
|
+
msg += ", filtering"
|
|
1147
|
+
msg += ", wait..."
|
|
1148
|
+
self.image_from_thread.emit({"message": msg, "current_image": img})
|
|
1149
|
+
converted_videos = convert_subtract_and_filter_video(self.parent().po.visu,
|
|
1150
|
+
self.parent().po.vars['convert_for_motion'],
|
|
1151
|
+
background, background2,
|
|
1152
|
+
self.parent().po.vars['lose_accuracy_to_save_memory'],
|
|
1153
|
+
self.parent().po.vars['filter_spec'])
|
|
1154
|
+
self.parent().po.converted_video, self.parent().po.converted_video2 = converted_videos
|
|
1155
|
+
|
|
769
1156
|
save_loaded_video = True
|
|
770
1157
|
if self.parent().po.vars['already_greyscale']:
|
|
771
1158
|
self.videos_in_ram = self.parent().po.converted_video
|
|
@@ -773,9 +1160,8 @@ class OneArenaThread(QtCore.QThread):
|
|
|
773
1160
|
if self.parent().po.vars['convert_for_motion']['logical'] == 'None':
|
|
774
1161
|
self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video)]
|
|
775
1162
|
else:
|
|
776
|
-
self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video),
|
|
777
|
-
|
|
778
|
-
# videos = [self.parent().po.video.copy(), self.parent().po.converted_video.copy()]
|
|
1163
|
+
self.videos_in_ram = [self.parent().po.visu, deepcopy(self.parent().po.converted_video),
|
|
1164
|
+
deepcopy(self.parent().po.converted_video2)]
|
|
779
1165
|
else:
|
|
780
1166
|
logging.info(f"Starting to load arena n°{arena} from .npy saved file")
|
|
781
1167
|
self.videos_in_ram = None
|
|
@@ -800,42 +1186,97 @@ class OneArenaThread(QtCore.QThread):
|
|
|
800
1186
|
self.when_loading_finished.emit(save_loaded_video)
|
|
801
1187
|
|
|
802
1188
|
if self.parent().po.motion.visu is None:
|
|
803
|
-
visu = self.parent().po.motion.converted_video
|
|
804
|
-
visu -= np.min(visu)
|
|
805
|
-
visu = 255 * (visu / np.max(visu))
|
|
806
|
-
visu = np.round(visu).astype(np.uint8)
|
|
1189
|
+
visu = bracket_to_uint8_image_contrast(self.parent().po.motion.converted_video)
|
|
807
1190
|
if len(visu.shape) == 3:
|
|
808
1191
|
visu = np.stack((visu, visu, visu), axis=3)
|
|
809
1192
|
self.parent().po.motion.visu = visu
|
|
810
1193
|
|
|
811
1194
|
def detection(self):
|
|
1195
|
+
"""
|
|
1196
|
+
Perform quick video segmentation and update motion detection parameters.
|
|
1197
|
+
|
|
1198
|
+
This method is responsible for initiating a quick video segmentation process and updating the motion detection
|
|
1199
|
+
parameters accordingly. It handles duplicate video conversion based on certain logical conditions and computes
|
|
1200
|
+
video options.
|
|
1201
|
+
"""
|
|
812
1202
|
self.message_from_thread_starting.emit(f"Quick video segmentation")
|
|
813
1203
|
self.parent().po.motion.converted_video = deepcopy(self.parent().po.converted_video)
|
|
814
1204
|
if self.parent().po.vars['convert_for_motion']['logical'] != 'None':
|
|
815
1205
|
self.parent().po.motion.converted_video2 = deepcopy(self.parent().po.converted_video2)
|
|
816
|
-
# self.parent().po.motion.detection(compute_all_possibilities=True)
|
|
817
1206
|
self.parent().po.motion.detection(compute_all_possibilities=self.parent().po.all['compute_all_options'])
|
|
818
1207
|
if self.parent().po.all['compute_all_options']:
|
|
819
1208
|
self.parent().po.computed_video_options = np.ones(5, bool)
|
|
820
1209
|
else:
|
|
821
1210
|
self.parent().po.computed_video_options = np.zeros(5, bool)
|
|
822
1211
|
self.parent().po.computed_video_options[self.parent().po.all['video_option']] = True
|
|
823
|
-
# if self.parent().po.vars['color_number'] > 2:
|
|
824
1212
|
|
|
825
1213
|
def post_processing(self):
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
1214
|
+
"""
|
|
1215
|
+
Handle post-processing operations for motion analysis and video processing.
|
|
1216
|
+
|
|
1217
|
+
Extended Description
|
|
1218
|
+
--------------------
|
|
1219
|
+
This method is responsible for managing various post-processing steps,
|
|
1220
|
+
including video segmentation, contour detection, and updating motion analysis
|
|
1221
|
+
parameters. It processes different video options based on the configuration
|
|
1222
|
+
settings and handles motion detection failures by emitting appropriate signals.
|
|
1223
|
+
|
|
1224
|
+
Notes
|
|
1225
|
+
-----
|
|
1226
|
+
This method performs a series of operations that are computationally intensive.
|
|
1227
|
+
It leverages NumPy and OpenCV for image processing tasks. The method assumes
|
|
1228
|
+
that the parent object has been properly initialized with all required attributes
|
|
1229
|
+
and configurations.
|
|
1230
|
+
|
|
1231
|
+
Attributes
|
|
1232
|
+
----------
|
|
1233
|
+
self.parent().po.motion.smoothed_video : NoneType
|
|
1234
|
+
A placeholder for the smoothed video data.
|
|
1235
|
+
self.parent().po.vars['already_greyscale'] : bool
|
|
1236
|
+
Indicates if the video is already in greyscale format.
|
|
1237
|
+
self.parent().po.vars['convert_for_motion']['logical'] : str
|
|
1238
|
+
Indicates the logical conversion method for motion analysis.
|
|
1239
|
+
self.parent().po.converted_video : ndarray
|
|
1240
|
+
The converted video data for motion analysis.
|
|
1241
|
+
self.parent().po.converted_video2 : ndarray
|
|
1242
|
+
Another converted video data for motion analysis.
|
|
1243
|
+
self.parent().po.visu : ndarray
|
|
1244
|
+
The visual representation of the video data.
|
|
1245
|
+
self.videos_in_ram : list or tuple
|
|
1246
|
+
The videos currently in RAM, either a single video or multiple.
|
|
1247
|
+
self.parent().po.vars['color_number'] : int
|
|
1248
|
+
The number of colors in the video.
|
|
1249
|
+
self.parent().po.all['compute_all_options'] : bool
|
|
1250
|
+
Indicates if all options should be computed.
|
|
1251
|
+
self.parent().po.all['video_option'] : int
|
|
1252
|
+
The current video option to be processed.
|
|
1253
|
+
self.parent().po.newly_explored_area : ndarray
|
|
1254
|
+
The area newly explored during motion detection.
|
|
1255
|
+
self.parent().po.motion.start : int
|
|
1256
|
+
The start frame for motion analysis.
|
|
1257
|
+
self.parent().po.motion.step : int
|
|
1258
|
+
The step interval in frames for motion analysis.
|
|
1259
|
+
self.parent().po.motion.lost_frames : int
|
|
1260
|
+
The number of lost frames during motion analysis.
|
|
1261
|
+
self.parent().po.motion.substantial_growth : int
|
|
1262
|
+
The substantial growth threshold for motion detection.
|
|
1263
|
+
self.parent().po.all['arena'] : int
|
|
1264
|
+
The arena identifier used in motion analysis.
|
|
1265
|
+
self.parent().po.vars['do_fading'] : bool
|
|
1266
|
+
Indicates if fading effects should be applied.
|
|
1267
|
+
self.parent().po.motion.dims : tuple
|
|
1268
|
+
The dimensions of the motion data.
|
|
1269
|
+
analyses_to_compute : list or ndarray
|
|
1270
|
+
List of analysis options to compute based on configuration settings.
|
|
1271
|
+
args : list
|
|
1272
|
+
Arguments used for initializing the MotionAnalysis object.
|
|
1273
|
+
analysis_i : MotionAnalysis
|
|
1274
|
+
An instance of MotionAnalysis for each segment to be processed.
|
|
1275
|
+
mask : tuple or NoneType
|
|
1276
|
+
The mask used for different segmentation options.
|
|
838
1277
|
|
|
1278
|
+
"""
|
|
1279
|
+
self.parent().po.motion.smoothed_video = None
|
|
839
1280
|
if self.parent().po.vars['color_number'] > 2:
|
|
840
1281
|
analyses_to_compute = [0]
|
|
841
1282
|
else:
|
|
@@ -850,14 +1291,14 @@ class OneArenaThread(QtCore.QThread):
|
|
|
850
1291
|
args = [self.parent().po.all['arena'] - 1, self.parent().po.all['arena'], self.parent().po.vars,
|
|
851
1292
|
False, False, False, self.videos_in_ram]
|
|
852
1293
|
if self.parent().po.vars['do_fading']:
|
|
853
|
-
self.parent().po.newly_explored_area = np.zeros((self.parent().po.motion.dims[0], 5), np.
|
|
1294
|
+
self.parent().po.newly_explored_area = np.zeros((self.parent().po.motion.dims[0], 5), np.int64)
|
|
854
1295
|
for seg_i in analyses_to_compute:
|
|
855
1296
|
analysis_i = MotionAnalysis(args)
|
|
856
1297
|
r = weakref.ref(analysis_i)
|
|
857
|
-
analysis_i.
|
|
1298
|
+
analysis_i.segmented = np.zeros(analysis_i.converted_video.shape[:3], dtype=np.uint8)
|
|
858
1299
|
if self.parent().po.all['compute_all_options']:
|
|
859
1300
|
if seg_i == 0:
|
|
860
|
-
analysis_i.
|
|
1301
|
+
analysis_i.segmented = self.parent().po.motion.segmented
|
|
861
1302
|
else:
|
|
862
1303
|
if seg_i == 1:
|
|
863
1304
|
mask = self.parent().po.motion.luminosity_segmentation
|
|
@@ -867,10 +1308,10 @@ class OneArenaThread(QtCore.QThread):
|
|
|
867
1308
|
mask = self.parent().po.motion.logical_and
|
|
868
1309
|
elif seg_i == 4:
|
|
869
1310
|
mask = self.parent().po.motion.logical_or
|
|
870
|
-
analysis_i.
|
|
1311
|
+
analysis_i.segmented[mask[0], mask[1], mask[2]] = 1
|
|
871
1312
|
else:
|
|
872
1313
|
if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
|
|
873
|
-
analysis_i.
|
|
1314
|
+
analysis_i.segmented = self.parent().po.motion.segmented
|
|
874
1315
|
|
|
875
1316
|
analysis_i.start = time_parameters[0]
|
|
876
1317
|
analysis_i.step = time_parameters[1]
|
|
@@ -879,10 +1320,8 @@ class OneArenaThread(QtCore.QThread):
|
|
|
879
1320
|
analysis_i.origin_idx = self.parent().po.motion.origin_idx
|
|
880
1321
|
analysis_i.initialize_post_processing()
|
|
881
1322
|
analysis_i.t = analysis_i.start
|
|
882
|
-
# print_progress = ForLoopCounter(self.start)
|
|
883
1323
|
|
|
884
1324
|
while self._isRunning and analysis_i.t < analysis_i.binary.shape[0]:
|
|
885
|
-
# analysis_i.update_shape(True)
|
|
886
1325
|
analysis_i.update_shape(False)
|
|
887
1326
|
contours = np.nonzero(
|
|
888
1327
|
cv2.morphologyEx(analysis_i.binary[analysis_i.t - 1, :, :], cv2.MORPH_GRADIENT, cross_33))
|
|
@@ -902,7 +1341,7 @@ class OneArenaThread(QtCore.QThread):
|
|
|
902
1341
|
|
|
903
1342
|
if self.parent().po.all['compute_all_options']:
|
|
904
1343
|
if seg_i == 0:
|
|
905
|
-
self.parent().po.motion.
|
|
1344
|
+
self.parent().po.motion.segmented = analysis_i.binary
|
|
906
1345
|
elif seg_i == 1:
|
|
907
1346
|
self.parent().po.motion.luminosity_segmentation = np.nonzero(analysis_i.binary)
|
|
908
1347
|
elif seg_i == 2:
|
|
@@ -912,21 +1351,54 @@ class OneArenaThread(QtCore.QThread):
|
|
|
912
1351
|
elif seg_i == 4:
|
|
913
1352
|
self.parent().po.motion.logical_or = np.nonzero(analysis_i.binary)
|
|
914
1353
|
else:
|
|
915
|
-
self.parent().po.motion.
|
|
916
|
-
|
|
917
|
-
# self.message_from_thread_starting.emit("If there are problems, change some parameters and try again")
|
|
1354
|
+
self.parent().po.motion.segmented = analysis_i.binary
|
|
918
1355
|
self.when_detection_finished.emit("Post processing done, read to see the result")
|
|
919
1356
|
|
|
920
1357
|
|
|
921
1358
|
|
|
922
1359
|
class VideoReaderThread(QtCore.QThread):
|
|
1360
|
+
"""
|
|
1361
|
+
Thread for reading a video in the GUI.
|
|
1362
|
+
|
|
1363
|
+
Signals
|
|
1364
|
+
--------
|
|
1365
|
+
message_from_thread : Signal(dict)
|
|
1366
|
+
Signal emitted during the video reading to display images to the GUI.
|
|
1367
|
+
|
|
1368
|
+
Notes
|
|
1369
|
+
-----
|
|
1370
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
1371
|
+
"""
|
|
923
1372
|
message_from_thread = QtCore.Signal(dict)
|
|
924
1373
|
|
|
925
1374
|
def __init__(self, parent=None):
|
|
1375
|
+
"""
|
|
1376
|
+
Initialize the worker thread for reading a video in the GUI
|
|
1377
|
+
|
|
1378
|
+
Parameters
|
|
1379
|
+
----------
|
|
1380
|
+
parent : QObject, optional
|
|
1381
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
1382
|
+
"""
|
|
926
1383
|
super(VideoReaderThread, self).__init__(parent)
|
|
927
1384
|
self.setParent(parent)
|
|
928
1385
|
|
|
929
1386
|
def run(self):
|
|
1387
|
+
"""
|
|
1388
|
+
Summary
|
|
1389
|
+
-------
|
|
1390
|
+
Run the video analysis process, applying segmentation and contouring to each frame.
|
|
1391
|
+
|
|
1392
|
+
Extended Description
|
|
1393
|
+
--------------------
|
|
1394
|
+
This method performs video analysis by segmenting frames based on selected options and overlaying contours.
|
|
1395
|
+
It also updates the UI with progress messages.
|
|
1396
|
+
|
|
1397
|
+
Notes
|
|
1398
|
+
-----
|
|
1399
|
+
This method emits signals to update the UI with progress messages and current images.
|
|
1400
|
+
It uses OpenCV for morphological operations on video frames.
|
|
1401
|
+
"""
|
|
930
1402
|
video_analysis = deepcopy(self.parent().po.motion.visu)
|
|
931
1403
|
self.message_from_thread.emit(
|
|
932
1404
|
{"current_image": video_analysis[0, ...], "message": f"Video preparation, wait..."})
|
|
@@ -934,7 +1406,7 @@ class VideoReaderThread(QtCore.QThread):
|
|
|
934
1406
|
|
|
935
1407
|
if self.parent().po.all['compute_all_options']:
|
|
936
1408
|
if self.parent().po.all['video_option'] == 0:
|
|
937
|
-
video_mask = self.parent().po.motion.
|
|
1409
|
+
video_mask = self.parent().po.motion.segmented
|
|
938
1410
|
else:
|
|
939
1411
|
if self.parent().po.all['video_option'] == 1:
|
|
940
1412
|
mask = self.parent().po.motion.luminosity_segmentation
|
|
@@ -949,18 +1421,16 @@ class VideoReaderThread(QtCore.QThread):
|
|
|
949
1421
|
else:
|
|
950
1422
|
video_mask = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
|
|
951
1423
|
if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
|
|
952
|
-
video_mask = self.parent().po.motion.
|
|
1424
|
+
video_mask = self.parent().po.motion.segmented
|
|
953
1425
|
|
|
954
1426
|
if self.parent().po.load_quick_full == 1:
|
|
955
1427
|
video_mask = np.cumsum(video_mask.astype(np.uint32), axis=0)
|
|
956
1428
|
video_mask[video_mask > 0] = 1
|
|
957
1429
|
video_mask = video_mask.astype(np.uint8)
|
|
958
1430
|
logging.info(f"sum: {video_mask.sum()}")
|
|
959
|
-
# timings = genfromtxt("timings.csv")
|
|
960
1431
|
for t in np.arange(self.parent().po.motion.dims[0]):
|
|
961
1432
|
mask = cv2.morphologyEx(video_mask[t, ...], cv2.MORPH_GRADIENT, cross_33)
|
|
962
1433
|
mask = np.stack((mask, mask, mask), axis=2)
|
|
963
|
-
# current_image[current_image > 0] = self.parent().po.vars['contour_color']
|
|
964
1434
|
current_image = deepcopy(video_analysis[t, ...])
|
|
965
1435
|
current_image[mask > 0] = self.parent().po.vars['contour_color']
|
|
966
1436
|
self.message_from_thread.emit(
|
|
@@ -970,23 +1440,52 @@ class VideoReaderThread(QtCore.QThread):
|
|
|
970
1440
|
|
|
971
1441
|
|
|
972
1442
|
class ChangeOneRepResultThread(QtCore.QThread):
|
|
1443
|
+
"""
|
|
1444
|
+
Thread for modifying the results of one arena.
|
|
1445
|
+
|
|
1446
|
+
Signals
|
|
1447
|
+
--------
|
|
1448
|
+
message_from_thread : Signal(str)
|
|
1449
|
+
Signal emitted when the result is changed.
|
|
1450
|
+
|
|
1451
|
+
Notes
|
|
1452
|
+
-----
|
|
1453
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
1454
|
+
"""
|
|
973
1455
|
message_from_thread = QtCore.Signal(str)
|
|
974
1456
|
|
|
975
1457
|
def __init__(self, parent=None):
|
|
1458
|
+
"""
|
|
1459
|
+
Initialize the worker thread for changing the saved results in the current folder, for a particular arena
|
|
1460
|
+
|
|
1461
|
+
Parameters
|
|
1462
|
+
----------
|
|
1463
|
+
parent : QObject, optional
|
|
1464
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
1465
|
+
"""
|
|
976
1466
|
super(ChangeOneRepResultThread, self).__init__(parent)
|
|
977
1467
|
self.setParent(parent)
|
|
978
1468
|
|
|
979
1469
|
def run(self):
|
|
1470
|
+
"""
|
|
1471
|
+
Modify the motion and results of an arena.
|
|
1472
|
+
|
|
1473
|
+
Extended Description
|
|
1474
|
+
--------------------
|
|
1475
|
+
This method performs various operations on the motion data of an arena,
|
|
1476
|
+
including binary mask creation, descriptor computation, and transition
|
|
1477
|
+
detection. It also handles optional computations like fading effects and
|
|
1478
|
+
segmentation based on different video options.
|
|
1479
|
+
"""
|
|
980
1480
|
self.message_from_thread.emit(
|
|
981
1481
|
f"Arena n°{self.parent().po.all['arena']}: modifying its results...")
|
|
982
|
-
# self.parent().po.motion2 = deepcopy(self.parent().po.motion)
|
|
983
1482
|
if self.parent().po.motion.start is None:
|
|
984
1483
|
self.parent().po.motion.binary = np.repeat(np.expand_dims(self.parent().po.motion.origin, 0),
|
|
985
1484
|
self.parent().po.motion.converted_video.shape[0], axis=0).astype(np.uint8)
|
|
986
1485
|
else:
|
|
987
1486
|
if self.parent().po.all['compute_all_options']:
|
|
988
1487
|
if self.parent().po.all['video_option'] == 0:
|
|
989
|
-
self.parent().po.motion.binary = self.parent().po.motion.
|
|
1488
|
+
self.parent().po.motion.binary = self.parent().po.motion.segmented
|
|
990
1489
|
else:
|
|
991
1490
|
if self.parent().po.all['video_option'] == 1:
|
|
992
1491
|
mask = self.parent().po.motion.luminosity_segmentation
|
|
@@ -1001,31 +1500,79 @@ class ChangeOneRepResultThread(QtCore.QThread):
|
|
|
1001
1500
|
else:
|
|
1002
1501
|
self.parent().po.motion.binary = np.zeros(self.parent().po.motion.dims[:3], dtype=np.uint8)
|
|
1003
1502
|
if self.parent().po.computed_video_options[self.parent().po.all['video_option']]:
|
|
1004
|
-
self.parent().po.motion.binary = self.parent().po.motion.
|
|
1503
|
+
self.parent().po.motion.binary = self.parent().po.motion.segmented
|
|
1005
1504
|
|
|
1006
1505
|
if self.parent().po.vars['do_fading']:
|
|
1007
1506
|
self.parent().po.motion.newly_explored_area = self.parent().po.newly_explored_area[:, self.parent().po.all['video_option']]
|
|
1008
1507
|
self.parent().po.motion.max_distance = 9 * self.parent().po.vars['detection_range_factor']
|
|
1009
1508
|
self.parent().po.motion.get_descriptors_from_binary(release_memory=False)
|
|
1010
1509
|
self.parent().po.motion.detect_growth_transitions()
|
|
1011
|
-
self.parent().po.motion.
|
|
1510
|
+
self.parent().po.motion.networks_analysis(False)
|
|
1012
1511
|
self.parent().po.motion.study_cytoscillations(False)
|
|
1013
1512
|
self.parent().po.motion.fractal_descriptions()
|
|
1014
|
-
self.parent().po.motion.get_descriptors_summary()
|
|
1015
1513
|
self.parent().po.motion.change_results_of_one_arena()
|
|
1016
1514
|
self.parent().po.motion = None
|
|
1017
1515
|
# self.parent().po.motion = None
|
|
1018
|
-
self.message_from_thread.emit("")
|
|
1516
|
+
self.message_from_thread.emit(f"Arena n°{self.parent().po.all['arena']}: analysis finished.")
|
|
1019
1517
|
|
|
1020
1518
|
|
|
1021
1519
|
class WriteVideoThread(QtCore.QThread):
|
|
1022
|
-
|
|
1520
|
+
"""
|
|
1521
|
+
Thread for writing one video per arena in the current folder.
|
|
1522
|
+
|
|
1523
|
+
Notes
|
|
1524
|
+
-----
|
|
1525
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
1526
|
+
"""
|
|
1023
1527
|
def __init__(self, parent=None):
|
|
1528
|
+
"""
|
|
1529
|
+
Initialize the worker thread for writing the video corresponding to the current arena
|
|
1530
|
+
|
|
1531
|
+
Parameters
|
|
1532
|
+
----------
|
|
1533
|
+
parent : QObject, optional
|
|
1534
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
1535
|
+
"""
|
|
1024
1536
|
super(WriteVideoThread, self).__init__(parent)
|
|
1025
1537
|
self.setParent(parent)
|
|
1026
1538
|
|
|
1027
1539
|
def run(self):
|
|
1028
|
-
|
|
1540
|
+
"""
|
|
1541
|
+
Run the visualization or converted video for a specific arena and save it as an .npy file.
|
|
1542
|
+
|
|
1543
|
+
Parameters
|
|
1544
|
+
----------
|
|
1545
|
+
self : object
|
|
1546
|
+
The instance of the class containing this method.
|
|
1547
|
+
|
|
1548
|
+
Other Parameters
|
|
1549
|
+
----------------
|
|
1550
|
+
arena : str
|
|
1551
|
+
Name of the arena.
|
|
1552
|
+
|
|
1553
|
+
already_greyscale : bool
|
|
1554
|
+
Flag indicating if the video is already in greyscale format.
|
|
1555
|
+
This parameter must be set as a variable named 'already_greyscale' in the instance
|
|
1556
|
+
variables of the parent object.
|
|
1557
|
+
|
|
1558
|
+
Returns
|
|
1559
|
+
-------
|
|
1560
|
+
None
|
|
1561
|
+
|
|
1562
|
+
Raises
|
|
1563
|
+
------
|
|
1564
|
+
FileNotFoundError
|
|
1565
|
+
When the path to write the video is not specified.
|
|
1566
|
+
|
|
1567
|
+
Examples
|
|
1568
|
+
--------
|
|
1569
|
+
>>> self.parent().po.vars['already_greyscale'] = False
|
|
1570
|
+
>>> self.run()
|
|
1571
|
+
>>> # Expects to write a visualization video as 'ind_arena.npy'
|
|
1572
|
+
>>> self.parent().po.vars['already_greyscale'] = True
|
|
1573
|
+
>>> self.run()
|
|
1574
|
+
>>> # Expects to write a converted video as 'ind_arena.npy'
|
|
1575
|
+
"""
|
|
1029
1576
|
arena = self.parent().po.all['arena']
|
|
1030
1577
|
if not self.parent().po.vars['already_greyscale']:
|
|
1031
1578
|
write_video(self.parent().po.visu, f'ind_{arena}.npy')
|
|
@@ -1034,19 +1581,61 @@ class WriteVideoThread(QtCore.QThread):
|
|
|
1034
1581
|
|
|
1035
1582
|
|
|
1036
1583
|
class RunAllThread(QtCore.QThread):
|
|
1584
|
+
"""
|
|
1585
|
+
Thread for running the analysis on all arenas of the current folder.
|
|
1586
|
+
|
|
1587
|
+
Signals
|
|
1588
|
+
--------
|
|
1589
|
+
image_from_thread : Signal(str)
|
|
1590
|
+
Signal emitted to send information to the user through the GUI
|
|
1591
|
+
message_from_thread : Signal(str)
|
|
1592
|
+
Signal emitted to send images showing the current status of the analysis to the GUI.
|
|
1593
|
+
|
|
1594
|
+
Notes
|
|
1595
|
+
-----
|
|
1596
|
+
This class uses `QThread` to manage the process asynchronously.
|
|
1597
|
+
"""
|
|
1037
1598
|
message_from_thread = QtCore.Signal(str)
|
|
1038
1599
|
image_from_thread = QtCore.Signal(dict)
|
|
1039
1600
|
|
|
1040
1601
|
def __init__(self, parent=None):
|
|
1602
|
+
"""
|
|
1603
|
+
Initialize the worker thread for running a complete analysis on one folder or a folder containing several
|
|
1604
|
+
folders.
|
|
1605
|
+
|
|
1606
|
+
Parameters
|
|
1607
|
+
----------
|
|
1608
|
+
parent : QObject, optional
|
|
1609
|
+
The parent object of this thread instance. In use, an instance of CellectsMainWidget class. Default is None.
|
|
1610
|
+
"""
|
|
1041
1611
|
super(RunAllThread, self).__init__(parent)
|
|
1042
1612
|
self.setParent(parent)
|
|
1043
1613
|
|
|
1044
1614
|
def run(self):
|
|
1615
|
+
"""
|
|
1616
|
+
Run the analysis process for video writing and motion analysis.
|
|
1617
|
+
|
|
1618
|
+
This method manages the overall flow of the analysis including setting up
|
|
1619
|
+
folders, loading data, writing videos from images, and performing motion
|
|
1620
|
+
analysis. It handles various conditions like checking if the specimen number
|
|
1621
|
+
matches expectations or if multiple experiments are ready to run.
|
|
1622
|
+
|
|
1623
|
+
Returns
|
|
1624
|
+
-------
|
|
1625
|
+
dict
|
|
1626
|
+
A dictionary containing:
|
|
1627
|
+
- 'continue': bool indicating if the analysis should continue.
|
|
1628
|
+
- 'message': str with a relevant message about the current status.
|
|
1629
|
+
Notes
|
|
1630
|
+
-----
|
|
1631
|
+
This method uses several internal methods like `set_current_folder`,
|
|
1632
|
+
`run_video_writing`, and `run_motion_analysis` to perform the analysis steps.
|
|
1633
|
+
It also checks various conditions based on parent object attributes.
|
|
1634
|
+
"""
|
|
1045
1635
|
analysis_status = {"continue": True, "message": ""}
|
|
1046
1636
|
message = self.set_current_folder(0)
|
|
1047
1637
|
|
|
1048
1638
|
if self.parent().po.first_exp_ready_to_run:
|
|
1049
|
-
|
|
1050
1639
|
self.message_from_thread.emit(message + ": Write videos...")
|
|
1051
1640
|
if not self.parent().po.vars['several_blob_per_arena'] and self.parent().po.sample_number != len(self.parent().po.bot):
|
|
1052
1641
|
analysis_status["continue"] = False
|
|
@@ -1074,7 +1663,6 @@ class RunAllThread(QtCore.QThread):
|
|
|
1074
1663
|
self.parent().po.first_image = None
|
|
1075
1664
|
self.parent().po.last_im = None
|
|
1076
1665
|
self.parent().po.last_image = None
|
|
1077
|
-
self.parent().po.videos = None
|
|
1078
1666
|
self.parent().po.top = None
|
|
1079
1667
|
|
|
1080
1668
|
message = self.set_current_folder(exp_i)
|
|
@@ -1095,14 +1683,7 @@ class RunAllThread(QtCore.QThread):
|
|
|
1095
1683
|
analysis_status = self.run_motion_analysis(message)
|
|
1096
1684
|
|
|
1097
1685
|
if not analysis_status["continue"]:
|
|
1098
|
-
# self.message_from_thread.emit(analysis_status["message"])
|
|
1099
1686
|
break
|
|
1100
|
-
# if not continue_analysis:
|
|
1101
|
-
# self.message_from_thread.emit(f"Error: wrong folder or parameters")
|
|
1102
|
-
# break
|
|
1103
|
-
# if not enough_memory:
|
|
1104
|
-
# self.message_from_thread.emit(f"Error: not enough memory")
|
|
1105
|
-
# break
|
|
1106
1687
|
print(self.parent().po.vars['convert_for_motion'])
|
|
1107
1688
|
if analysis_status["continue"]:
|
|
1108
1689
|
if self.parent().po.all['folder_number'] > 1:
|
|
@@ -1127,12 +1708,37 @@ class RunAllThread(QtCore.QThread):
|
|
|
1127
1708
|
self.parent().po.update_folder_id(self.parent().po.all['first_folder_sample_number'])
|
|
1128
1709
|
return message
|
|
1129
1710
|
|
|
1130
|
-
def pre_processing(self):
|
|
1711
|
+
def pre_processing(self) -> dict:
|
|
1712
|
+
"""
|
|
1713
|
+
Pre-processes the video data for further analysis.
|
|
1714
|
+
|
|
1715
|
+
Extended Description
|
|
1716
|
+
---------------------
|
|
1717
|
+
This method performs several preprocessing steps on the video data, including image segmentation,
|
|
1718
|
+
cropping, background subtraction, and origin detection. It also handles errors related to image analysis
|
|
1719
|
+
and manual delineation.
|
|
1720
|
+
|
|
1721
|
+
Returns
|
|
1722
|
+
-------
|
|
1723
|
+
dict
|
|
1724
|
+
A dictionary containing `continue` (bool) and `message` (str). If analysis can continue, `continue`
|
|
1725
|
+
is True; otherwise, it's False and a descriptive message is provided.
|
|
1726
|
+
|
|
1727
|
+
Raises
|
|
1728
|
+
------
|
|
1729
|
+
**ValueError**
|
|
1730
|
+
When the correct number of cells cannot be detected in the first image.
|
|
1731
|
+
|
|
1732
|
+
Notes
|
|
1733
|
+
-----
|
|
1734
|
+
* The method logs important preprocessing steps using `logging.info`.
|
|
1735
|
+
* Assumes that parent object (`self.parent().po`) has methods and attributes required for preprocessing.
|
|
1736
|
+
"""
|
|
1131
1737
|
analysis_status = {"continue": True, "message": ""}
|
|
1132
1738
|
logging.info("Pre-processing has started")
|
|
1133
1739
|
if len(self.parent().po.data_list) > 0:
|
|
1134
1740
|
self.parent().po.get_first_image()
|
|
1135
|
-
self.parent().po.
|
|
1741
|
+
self.parent().po.fast_first_image_segmentation()
|
|
1136
1742
|
self.parent().po.cropping(is_first_image=True)
|
|
1137
1743
|
self.parent().po.get_average_pixel_size()
|
|
1138
1744
|
try:
|
|
@@ -1157,7 +1763,7 @@ class RunAllThread(QtCore.QThread):
|
|
|
1157
1763
|
else:
|
|
1158
1764
|
self.parent().po.get_origins_and_backgrounds_lists()
|
|
1159
1765
|
self.parent().po.get_last_image()
|
|
1160
|
-
self.parent().po.
|
|
1766
|
+
self.parent().po.fast_last_image_segmentation()
|
|
1161
1767
|
self.parent().po.find_if_lighter_backgnp.round()
|
|
1162
1768
|
return analysis_status
|
|
1163
1769
|
else:
|
|
@@ -1165,26 +1771,45 @@ class RunAllThread(QtCore.QThread):
|
|
|
1165
1771
|
analysis_status["continue"] = False
|
|
1166
1772
|
return analysis_status
|
|
1167
1773
|
|
|
1168
|
-
def run_video_writing(self, message):
|
|
1774
|
+
def run_video_writing(self, message: str) -> dict:
|
|
1775
|
+
"""
|
|
1776
|
+
Initiate the process of writing videos from image data.
|
|
1777
|
+
|
|
1778
|
+
Parameters
|
|
1779
|
+
----------
|
|
1780
|
+
message : str
|
|
1781
|
+
A string to emit as a status update during video writing.
|
|
1782
|
+
|
|
1783
|
+
Returns
|
|
1784
|
+
-------
|
|
1785
|
+
dict
|
|
1786
|
+
A dictionary containing the analysis status with keys:
|
|
1787
|
+
- "continue": bool indicating whether to continue video writing
|
|
1788
|
+
- "message": str providing a status or error message
|
|
1789
|
+
|
|
1790
|
+
Raises
|
|
1791
|
+
------
|
|
1792
|
+
FileNotFoundError
|
|
1793
|
+
If an image file specified in `data_list` does not exist.
|
|
1794
|
+
OSError
|
|
1795
|
+
If there is an issue writing to disk, such as when the disk is full.
|
|
1796
|
+
|
|
1797
|
+
Notes
|
|
1798
|
+
-----
|
|
1799
|
+
This function manages video writing in batches, checking available memory
|
|
1800
|
+
and handling errors related to file sizes or missing images
|
|
1801
|
+
"""
|
|
1169
1802
|
analysis_status = {"continue": True, "message": ""}
|
|
1170
1803
|
look_for_existing_videos = glob('ind_' + '*' + '.npy')
|
|
1171
1804
|
there_already_are_videos = len(look_for_existing_videos) == len(self.parent().po.vars['analyzed_individuals'])
|
|
1172
1805
|
logging.info(f"{len(look_for_existing_videos)} .npy video files found for {len(self.parent().po.vars['analyzed_individuals'])} arenas to analyze")
|
|
1173
|
-
do_write_videos = not there_already_are_videos or (
|
|
1174
|
-
there_already_are_videos and self.parent().po.all['overwrite_unaltered_videos'])
|
|
1806
|
+
do_write_videos = not self.parent().po.all['im_or_vid'] and (not there_already_are_videos or (there_already_are_videos and self.parent().po.all['overwrite_unaltered_videos']))
|
|
1175
1807
|
if do_write_videos:
|
|
1176
1808
|
logging.info(f"Starting video writing")
|
|
1177
1809
|
# self.videos.write_videos_as_np_arrays(self.data_list, self.vars['convert_for_motion'], in_colors=self.vars['save_in_colors'])
|
|
1178
1810
|
in_colors = not self.parent().po.vars['already_greyscale']
|
|
1179
|
-
self.parent().po.
|
|
1180
|
-
|
|
1181
|
-
self.parent().po.all['raw_images'])
|
|
1182
|
-
self.parent().po.videos.left = self.parent().po.left
|
|
1183
|
-
self.parent().po.videos.right = self.parent().po.right
|
|
1184
|
-
self.parent().po.videos.top = self.parent().po.top
|
|
1185
|
-
self.parent().po.videos.bot = self.parent().po.bot
|
|
1186
|
-
self.parent().po.videos.first_image.shape_number = self.parent().po.sample_number
|
|
1187
|
-
bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining = self.parent().po.videos.prepare_video_writing(
|
|
1811
|
+
self.parent().po.first_image.shape_number = self.parent().po.sample_number
|
|
1812
|
+
bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining, use_list_of_vid, is_landscape = self.parent().po.prepare_video_writing(
|
|
1188
1813
|
self.parent().po.data_list, self.parent().po.vars['min_ram_free'], in_colors)
|
|
1189
1814
|
if analysis_status["continue"]:
|
|
1190
1815
|
# Check that there is enough available RAM for one video par bunch and ROM for all videos
|
|
@@ -1192,14 +1817,13 @@ class RunAllThread(QtCore.QThread):
|
|
|
1192
1817
|
pat_tracker1 = PercentAndTimeTracker(bunch_nb * self.parent().po.vars['img_number'])
|
|
1193
1818
|
pat_tracker2 = PercentAndTimeTracker(len(self.parent().po.vars['analyzed_individuals']))
|
|
1194
1819
|
arena_percentage = 0
|
|
1195
|
-
is_landscape = self.parent().po.first_image.image.shape[0] < self.parent().po.first_image.image.shape[1]
|
|
1196
1820
|
for bunch in np.arange(bunch_nb):
|
|
1197
1821
|
# Update the labels of arenas and the video_bunch to write
|
|
1198
1822
|
if bunch == (bunch_nb - 1) and remaining > 0:
|
|
1199
1823
|
arena = np.arange(bunch * video_nb_per_bunch, bunch * video_nb_per_bunch + remaining)
|
|
1200
1824
|
else:
|
|
1201
1825
|
arena = np.arange(bunch * video_nb_per_bunch, (bunch + 1) * video_nb_per_bunch)
|
|
1202
|
-
if
|
|
1826
|
+
if use_list_of_vid:
|
|
1203
1827
|
video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in arena]
|
|
1204
1828
|
else:
|
|
1205
1829
|
video_bunch = np.zeros(np.append(sizes[0, :], len(arena)), dtype=np.uint8)
|
|
@@ -1217,9 +1841,9 @@ class RunAllThread(QtCore.QThread):
|
|
|
1217
1841
|
|
|
1218
1842
|
for arena_i, arena_name in enumerate(arena):
|
|
1219
1843
|
try:
|
|
1220
|
-
sub_img = img[self.parent().po.top[arena_name]:
|
|
1221
|
-
self.parent().po.left[arena_name]:
|
|
1222
|
-
if
|
|
1844
|
+
sub_img = img[self.parent().po.top[arena_name]: self.parent().po.bot[arena_name],
|
|
1845
|
+
self.parent().po.left[arena_name]: self.parent().po.right[arena_name], ...]
|
|
1846
|
+
if use_list_of_vid:
|
|
1223
1847
|
video_bunch[arena_i][image_i, ...] = sub_img
|
|
1224
1848
|
else:
|
|
1225
1849
|
if len(video_bunch.shape) == 5:
|
|
@@ -1227,9 +1851,9 @@ class RunAllThread(QtCore.QThread):
|
|
|
1227
1851
|
else:
|
|
1228
1852
|
video_bunch[image_i, :, :, arena_i] = sub_img
|
|
1229
1853
|
except ValueError:
|
|
1230
|
-
analysis_status["message"] = f"
|
|
1854
|
+
analysis_status["message"] = f"Some images have incorrect size, reset all settings in advanced parameters"
|
|
1231
1855
|
analysis_status["continue"] = False
|
|
1232
|
-
logging.info(f"
|
|
1856
|
+
logging.info(f"Reset all settings in advanced parameters")
|
|
1233
1857
|
break
|
|
1234
1858
|
if not analysis_status["continue"]:
|
|
1235
1859
|
break
|
|
@@ -1240,7 +1864,7 @@ class RunAllThread(QtCore.QThread):
|
|
|
1240
1864
|
try:
|
|
1241
1865
|
arena_percentage, eta = pat_tracker2.get_progress()
|
|
1242
1866
|
self.message_from_thread.emit(message + f" Step 1/2: Video writing ({np.round((image_percentage + arena_percentage) / 2, 2)}%)")# , ETA {remaining_time}
|
|
1243
|
-
if
|
|
1867
|
+
if use_list_of_vid:
|
|
1244
1868
|
np.save(vid_names[arena_name], video_bunch[arena_i])
|
|
1245
1869
|
else:
|
|
1246
1870
|
if len(video_bunch.shape) == 5:
|
|
@@ -1254,9 +1878,8 @@ class RunAllThread(QtCore.QThread):
|
|
|
1254
1878
|
self.parent().po.all['overwrite_unaltered_videos'] = False
|
|
1255
1879
|
self.parent().po.save_variable_dict()
|
|
1256
1880
|
self.parent().po.save_data_to_run_cellects_quickly()
|
|
1257
|
-
analysis_status["
|
|
1258
|
-
|
|
1259
|
-
del self.parent().po.videos
|
|
1881
|
+
if analysis_status["continue"]:
|
|
1882
|
+
analysis_status["message"] = f"Video writing complete."
|
|
1260
1883
|
return analysis_status
|
|
1261
1884
|
else:
|
|
1262
1885
|
analysis_status["continue"] = False
|
|
@@ -1286,7 +1909,35 @@ class RunAllThread(QtCore.QThread):
|
|
|
1286
1909
|
analysis_status["message"] = f"Cellects is not writing videos: unnecessary"
|
|
1287
1910
|
return analysis_status
|
|
1288
1911
|
|
|
1289
|
-
def run_motion_analysis(self, message):
|
|
1912
|
+
def run_motion_analysis(self, message: str) -> dict:
|
|
1913
|
+
"""
|
|
1914
|
+
Run motion analysis on analyzed individuals with optional multiprocessing.
|
|
1915
|
+
|
|
1916
|
+
This method processes video frames to analyze motion attributes of individuals.
|
|
1917
|
+
It can operate in either sequential or parallel mode based on available system
|
|
1918
|
+
resources and configuration settings. Analysis results are saved in multiple
|
|
1919
|
+
output formats.
|
|
1920
|
+
|
|
1921
|
+
Parameters
|
|
1922
|
+
----------
|
|
1923
|
+
message : str
|
|
1924
|
+
A status message to be displayed during the analysis process.
|
|
1925
|
+
|
|
1926
|
+
Returns
|
|
1927
|
+
-------
|
|
1928
|
+
dict
|
|
1929
|
+
A dictionary containing the status of the motion analysis.
|
|
1930
|
+
|
|
1931
|
+
Raises
|
|
1932
|
+
------
|
|
1933
|
+
MemoryError
|
|
1934
|
+
If there is insufficient memory to perform the analysis in parallel.
|
|
1935
|
+
|
|
1936
|
+
Notes
|
|
1937
|
+
-----
|
|
1938
|
+
Sequential mode is used when multiprocessing is disabled or only one core
|
|
1939
|
+
is available. Parallel mode utilizes multiple CPU cores for faster processing.
|
|
1940
|
+
"""
|
|
1290
1941
|
analysis_status = {"continue": True, "message": ""}
|
|
1291
1942
|
logging.info(f"Starting motion analysis with the detection method n°{self.parent().po.all['video_option']}")
|
|
1292
1943
|
self.parent().po.instantiate_tables()
|
|
@@ -1301,7 +1952,6 @@ class RunAllThread(QtCore.QThread):
|
|
|
1301
1952
|
for i, arena in enumerate(self.parent().po.vars['analyzed_individuals']):
|
|
1302
1953
|
|
|
1303
1954
|
l = [i, arena, self.parent().po.vars, True, True, False, None]
|
|
1304
|
-
# l = [0, 1, self.parent().po.vars, True, False, False, None]
|
|
1305
1955
|
analysis_i = MotionAnalysis(l)
|
|
1306
1956
|
r = weakref.ref(analysis_i)
|
|
1307
1957
|
if not self.parent().po.vars['several_blob_per_arena']:
|
|
@@ -1311,19 +1961,7 @@ class RunAllThread(QtCore.QThread):
|
|
|
1311
1961
|
|
|
1312
1962
|
# Save descriptors in long_format
|
|
1313
1963
|
self.parent().po.update_one_row_per_frame(i * self.parent().po.vars['img_number'], arena * self.parent().po.vars['img_number'], analysis_i.one_row_per_frame)
|
|
1314
|
-
|
|
1315
|
-
# Save cytosol_oscillations
|
|
1316
|
-
if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
|
|
1317
|
-
if self.parent().po.vars['oscilacyto_analysis']:
|
|
1318
|
-
oscil_i = pd.DataFrame(
|
|
1319
|
-
np.c_[np.repeat(arena,
|
|
1320
|
-
analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
|
|
1321
|
-
columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
|
|
1322
|
-
if self.parent().po.one_row_per_oscillating_cluster is None:
|
|
1323
|
-
self.parent().po.one_row_per_oscillating_cluster = oscil_i
|
|
1324
|
-
else:
|
|
1325
|
-
self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, oscil_i))
|
|
1326
|
-
|
|
1964
|
+
|
|
1327
1965
|
# Save efficiency visualization
|
|
1328
1966
|
self.parent().po.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
|
|
1329
1967
|
analysis_i.efficiency_test_2)
|
|
@@ -1339,7 +1977,6 @@ class RunAllThread(QtCore.QThread):
|
|
|
1339
1977
|
|
|
1340
1978
|
logging.info("fStarting analysis in parallel")
|
|
1341
1979
|
|
|
1342
|
-
# new
|
|
1343
1980
|
tiii = default_timer()
|
|
1344
1981
|
arena_number = len(self.parent().po.vars['analyzed_individuals'])
|
|
1345
1982
|
self.advance = 0
|
|
@@ -1378,17 +2015,9 @@ class RunAllThread(QtCore.QThread):
|
|
|
1378
2015
|
self.parent().po.update_one_row_per_arena(results_i['i'], results_i['one_row_per_arena'])
|
|
1379
2016
|
# Save descriptors in long_format
|
|
1380
2017
|
self.parent().po.update_one_row_per_frame(results_i['i'] * self.parent().po.vars['img_number'],
|
|
1381
|
-
results_i['
|
|
2018
|
+
(results_i['i'] + 1) * self.parent().po.vars['img_number'],
|
|
1382
2019
|
results_i['one_row_per_frame'])
|
|
1383
|
-
|
|
1384
|
-
# Save cytosol_oscillations
|
|
1385
|
-
if self.parent().po.vars['oscilacyto_analysis']:
|
|
1386
|
-
if self.parent().po.one_row_per_oscillating_cluster is None:
|
|
1387
|
-
self.parent().po.one_row_per_oscillating_cluster = results_i['one_row_per_oscillating_cluster']
|
|
1388
|
-
else:
|
|
1389
|
-
self.parent().po.one_row_per_oscillating_cluster = pd.concat((self.parent().po.one_row_per_oscillating_cluster, results_i['one_row_per_oscillating_cluster']))
|
|
1390
|
-
|
|
1391
|
-
# Save efficiency visualization
|
|
2020
|
+
|
|
1392
2021
|
self.parent().po.add_analysis_visualization_to_first_and_last_images(results_i['i'], results_i['efficiency_test_1'],
|
|
1393
2022
|
results_i['efficiency_test_2'])
|
|
1394
2023
|
self.image_from_thread.emit(
|
|
@@ -1416,6 +2045,26 @@ class RunAllThread(QtCore.QThread):
|
|
|
1416
2045
|
|
|
1417
2046
|
|
|
1418
2047
|
def motion_analysis_process(lower_bound: int, upper_bound: int, vars: dict, subtotals: Queue) -> None:
|
|
2048
|
+
"""
|
|
2049
|
+
Motion Analysis Process for parallel computing
|
|
2050
|
+
|
|
2051
|
+
Process a group of motion analysis results and store them in a queue.
|
|
2052
|
+
|
|
2053
|
+
Parameters
|
|
2054
|
+
----------
|
|
2055
|
+
lower_bound : int
|
|
2056
|
+
The lower bound index for the range of analysis.
|
|
2057
|
+
upper_bound : int
|
|
2058
|
+
The upper bound index (exclusive) for the range of analysis.
|
|
2059
|
+
vars : dict
|
|
2060
|
+
Dictionary containing variables and configurations for the motion analysis process.
|
|
2061
|
+
subtotals : Queue
|
|
2062
|
+
A queue to store intermediate results.
|
|
2063
|
+
Notes
|
|
2064
|
+
-----
|
|
2065
|
+
This function processes a range of motion analysis results based on the provided configuration variables and
|
|
2066
|
+
stores the intermediate results in a queue.
|
|
2067
|
+
"""
|
|
1419
2068
|
grouped_results = []
|
|
1420
2069
|
for i in range(lower_bound, upper_bound):
|
|
1421
2070
|
analysis_i = MotionAnalysis([i, i + 1, vars, True, True, False, None])
|
|
@@ -1430,19 +2079,8 @@ def motion_analysis_process(lower_bound: int, upper_bound: int, vars: dict, subt
|
|
|
1430
2079
|
results_i['one_row_per_arena'] = analysis_i.one_descriptor_per_arena
|
|
1431
2080
|
# Save descriptors in long_format
|
|
1432
2081
|
results_i['one_row_per_frame'] = analysis_i.one_row_per_frame
|
|
1433
|
-
# Save cytosol_oscillations
|
|
1434
2082
|
|
|
1435
2083
|
results_i['first_move'] = analysis_i.one_descriptor_per_arena["first_move"]
|
|
1436
|
-
if not pd.isna(analysis_i.one_descriptor_per_arena["first_move"]):
|
|
1437
|
-
if vars['oscilacyto_analysis']:
|
|
1438
|
-
results_i['clusters_final_data'] = analysis_i.clusters_final_data
|
|
1439
|
-
results_i['one_row_per_oscillating_cluster'] = pd.DataFrame(
|
|
1440
|
-
np.c_[np.repeat(arena, analysis_i.clusters_final_data.shape[0]), analysis_i.clusters_final_data],
|
|
1441
|
-
columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size', 'edge_distance', 'coord_y', 'coord_x'])
|
|
1442
|
-
if vars['fractal_analysis']:
|
|
1443
|
-
results_i['fractal_box_sizes'] = pd.DataFrame(analysis_i.fractal_boxes,
|
|
1444
|
-
columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
|
|
1445
|
-
|
|
1446
2084
|
# Save efficiency visualization
|
|
1447
2085
|
results_i['efficiency_test_1'] = analysis_i.efficiency_test_1
|
|
1448
2086
|
results_i['efficiency_test_2'] = analysis_i.efficiency_test_2
|