cellects 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cellects/__init__.py +0 -0
- cellects/__main__.py +49 -0
- cellects/config/__init__.py +0 -0
- cellects/config/all_vars_dict.py +155 -0
- cellects/core/__init__.py +0 -0
- cellects/core/cellects_paths.py +31 -0
- cellects/core/cellects_threads.py +1451 -0
- cellects/core/motion_analysis.py +2010 -0
- cellects/core/one_image_analysis.py +1061 -0
- cellects/core/one_video_per_blob.py +540 -0
- cellects/core/program_organizer.py +1316 -0
- cellects/core/script_based_run.py +154 -0
- cellects/gui/__init__.py +0 -0
- cellects/gui/advanced_parameters.py +1258 -0
- cellects/gui/cellects.py +189 -0
- cellects/gui/custom_widgets.py +790 -0
- cellects/gui/first_window.py +449 -0
- cellects/gui/if_several_folders_window.py +239 -0
- cellects/gui/image_analysis_window.py +2066 -0
- cellects/gui/required_output.py +232 -0
- cellects/gui/video_analysis_window.py +656 -0
- cellects/icons/__init__.py +0 -0
- cellects/icons/cellects_icon.icns +0 -0
- cellects/icons/cellects_icon.ico +0 -0
- cellects/image_analysis/__init__.py +0 -0
- cellects/image_analysis/cell_leaving_detection.py +54 -0
- cellects/image_analysis/cluster_flux_study.py +102 -0
- cellects/image_analysis/image_segmentation.py +706 -0
- cellects/image_analysis/morphological_operations.py +1635 -0
- cellects/image_analysis/network_functions.py +1757 -0
- cellects/image_analysis/one_image_analysis_threads.py +289 -0
- cellects/image_analysis/progressively_add_distant_shapes.py +508 -0
- cellects/image_analysis/shape_descriptors.py +1016 -0
- cellects/utils/__init__.py +0 -0
- cellects/utils/decorators.py +14 -0
- cellects/utils/formulas.py +637 -0
- cellects/utils/load_display_save.py +1054 -0
- cellects/utils/utilitarian.py +490 -0
- cellects-0.1.2.dist-info/LICENSE.odt +0 -0
- cellects-0.1.2.dist-info/METADATA +132 -0
- cellects-0.1.2.dist-info/RECORD +44 -0
- cellects-0.1.2.dist-info/WHEEL +5 -0
- cellects-0.1.2.dist-info/entry_points.txt +2 -0
- cellects-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1316 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""This file contains the class constituting the link between the graphical interface and the computations
|
|
3
|
+
First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
|
|
4
|
+
and the background.
|
|
5
|
+
Second, Cellects automatically delineate each arena.
|
|
6
|
+
Third, Cellects write one video for each arena.
|
|
7
|
+
Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
|
|
8
|
+
Fifth, Cellects extract variables and store them in .csv files.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
import pickle
|
|
14
|
+
import sys
|
|
15
|
+
from copy import deepcopy
|
|
16
|
+
import cv2
|
|
17
|
+
from numba.typed import Dict as TDict
|
|
18
|
+
import pandas as pd
|
|
19
|
+
import numpy as np
|
|
20
|
+
from psutil import virtual_memory
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
import natsort
|
|
23
|
+
from cellects.image_analysis.image_segmentation import generate_color_space_combination
|
|
24
|
+
from cellects.utils.load_display_save import extract_time # named exif
|
|
25
|
+
from cellects.image_analysis.one_image_analysis_threads import ProcessFirstImage
|
|
26
|
+
from cellects.core.one_image_analysis import OneImageAnalysis
|
|
27
|
+
from cellects.utils.load_display_save import PickleRick, read_and_rotate, readim, is_raw_image, read_h5_array, get_h5_keys
|
|
28
|
+
from cellects.utils.utilitarian import insensitive_glob, vectorized_len
|
|
29
|
+
from cellects.image_analysis.morphological_operations import Ellipse, cross_33
|
|
30
|
+
from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
|
|
31
|
+
from cellects.core.motion_analysis import MotionAnalysis
|
|
32
|
+
from cellects.core.one_video_per_blob import OneVideoPerBlob
|
|
33
|
+
from cellects.config.all_vars_dict import DefaultDicts
|
|
34
|
+
from cellects.image_analysis.shape_descriptors import from_shape_descriptors_class
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ProgramOrganizer:
|
|
38
|
+
def __init__(self):
|
|
39
|
+
"""
|
|
40
|
+
This class stores all variables required for analysis as well as
|
|
41
|
+
methods to process it.
|
|
42
|
+
Global variables (i.e. that does not concern the MotionAnalysis)
|
|
43
|
+
are directly stored in self.
|
|
44
|
+
Variables used in the MotionAnalysis class are stored in a dict
|
|
45
|
+
called self.vars
|
|
46
|
+
"""
|
|
47
|
+
if os.path.isfile('PickleRick.pkl'):
|
|
48
|
+
os.remove('PickleRick.pkl')
|
|
49
|
+
if os.path.isfile('PickleRick0.pkl'):
|
|
50
|
+
os.remove('PickleRick0.pkl')
|
|
51
|
+
if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick.pkl')):
|
|
52
|
+
os.remove(Path(CELLECTS_DIR.parent / 'PickleRick.pkl'))
|
|
53
|
+
if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl')):
|
|
54
|
+
os.remove(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl'))
|
|
55
|
+
# self.delineation_number = 0
|
|
56
|
+
self.one_arena_done: bool = False
|
|
57
|
+
self.reduce_image_dim: bool = False
|
|
58
|
+
self.first_exp_ready_to_run: bool = False
|
|
59
|
+
self.data_to_save = {'first_image': False, 'coordinates': False, 'exif': False, 'vars': False}
|
|
60
|
+
self.videos = None
|
|
61
|
+
self.motion = None
|
|
62
|
+
self.analysis_instance = None
|
|
63
|
+
self.computed_video_options = np.zeros(5, bool)
|
|
64
|
+
self.vars = {}
|
|
65
|
+
self.all = {}
|
|
66
|
+
self.all['folder_list'] = []
|
|
67
|
+
self.all['first_detection_frame'] = 1
|
|
68
|
+
self.first_im = None
|
|
69
|
+
self.last_im = None
|
|
70
|
+
self.vars['background_list'] = []
|
|
71
|
+
self.starting_blob_hsize_in_pixels = None
|
|
72
|
+
self.vars['first_move_threshold'] = None
|
|
73
|
+
self.vars['convert_for_origin'] = None
|
|
74
|
+
self.vars['convert_for_motion'] = None
|
|
75
|
+
self.current_combination_id = 0
|
|
76
|
+
self.data_list = []
|
|
77
|
+
self.one_row_per_arena = None
|
|
78
|
+
self.one_row_per_frame = None
|
|
79
|
+
self.one_row_per_oscillating_cluster = None
|
|
80
|
+
# self.fractal_box_sizes = None
|
|
81
|
+
|
|
82
|
+
def save_variable_dict(self):
|
|
83
|
+
logging.info("Save the parameters dictionaries in the Cellects folder")
|
|
84
|
+
self.all['vars'] = self.vars
|
|
85
|
+
all_vars = deepcopy(self.all)
|
|
86
|
+
if not self.all['keep_cell_and_back_for_all_folders']:
|
|
87
|
+
all_vars['bio_mask'] = None
|
|
88
|
+
all_vars['back_mask'] = None
|
|
89
|
+
pickle_rick = PickleRick(0)
|
|
90
|
+
pickle_rick.write_file(all_vars, ALL_VARS_PKL_FILE)
|
|
91
|
+
|
|
92
|
+
def load_variable_dict(self):
|
|
93
|
+
# loading_succeed: bool = False
|
|
94
|
+
# if os.path.isfile('Data to run Cellects quickly.pkl'):
|
|
95
|
+
# try:
|
|
96
|
+
# with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
|
|
97
|
+
# data_to_run_cellects_quickly = pickle.load(fileopen)
|
|
98
|
+
# if 'vars' in data_to_run_cellects_quickly:
|
|
99
|
+
# self.vars = data_to_run_cellects_quickly['vars']
|
|
100
|
+
# loading_succeed = True
|
|
101
|
+
# logging.info("Success to load vars from the data folder")
|
|
102
|
+
# except EOFError:
|
|
103
|
+
# logging.error("Pickle error: will try to load vars from the Cellects folder")
|
|
104
|
+
|
|
105
|
+
if os.path.isfile(ALL_VARS_PKL_FILE):
|
|
106
|
+
logging.info("Load the parameters from all_vars.pkl in the config of the Cellects folder")
|
|
107
|
+
try: # NEW
|
|
108
|
+
with open(ALL_VARS_PKL_FILE, 'rb') as fileopen: # NEW
|
|
109
|
+
self.all = pickle.load(fileopen) # NEW
|
|
110
|
+
self.vars = self.all['vars']
|
|
111
|
+
self.update_data()
|
|
112
|
+
logging.info("Success to load the parameters dictionaries from the Cellects folder")
|
|
113
|
+
logging.info(os.getcwd())
|
|
114
|
+
except Exception as exc: # NEW
|
|
115
|
+
logging.error(f"Initialize default parameters because error: {exc}") # NEW
|
|
116
|
+
default_dicts = DefaultDicts() # NEW
|
|
117
|
+
self.all = default_dicts.all # NEW
|
|
118
|
+
self.vars = default_dicts.vars # NEW
|
|
119
|
+
else:
|
|
120
|
+
logging.info("Initialize default parameters")
|
|
121
|
+
default_dicts = DefaultDicts()
|
|
122
|
+
self.all = default_dicts.all
|
|
123
|
+
self.vars = default_dicts.vars
|
|
124
|
+
if self.all['cores'] == 1:
|
|
125
|
+
self.all['cores'] = os.cpu_count() - 1
|
|
126
|
+
|
|
127
|
+
def analyze_without_gui(self):
|
|
128
|
+
# Eventually load "all" dir before calling this function
|
|
129
|
+
# self = po
|
|
130
|
+
# if len(self.all['folder_list']) == 0:
|
|
131
|
+
# folder_list = "/"
|
|
132
|
+
# else:
|
|
133
|
+
# folder_list = self.all['folder_list']
|
|
134
|
+
# for exp_i, folder_name in enumerate(folder_list):
|
|
135
|
+
# # exp_i = 0 ; folder_name = folder_list
|
|
136
|
+
|
|
137
|
+
self=ProgramOrganizer()
|
|
138
|
+
self.load_variable_dict()
|
|
139
|
+
# dd = DefaultDicts()
|
|
140
|
+
# self.all = dd.all
|
|
141
|
+
# self.vars = dd.vars
|
|
142
|
+
self.all['global_pathway'] = "/Users/Directory/Data/dossier1"
|
|
143
|
+
self.all['first_folder_sample_number'] = 6
|
|
144
|
+
# self.all['global_pathway'] = "D:\Directory\Data\Audrey\dosier1"
|
|
145
|
+
# self.all['first_folder_sample_number'] = 6
|
|
146
|
+
# self.all['radical'] = "IMG"
|
|
147
|
+
# self.all['extension'] = ".jpg"
|
|
148
|
+
# self.all['im_or_vid'] = 0
|
|
149
|
+
self.look_for_data()
|
|
150
|
+
self.load_data_to_run_cellects_quickly()
|
|
151
|
+
if not self.first_exp_ready_to_run:
|
|
152
|
+
self.get_first_image()
|
|
153
|
+
self.fast_image_segmentation(True)
|
|
154
|
+
# self.first_image.find_first_im_csc(sample_number=self.sample_number,
|
|
155
|
+
# several_blob_per_arena=None,
|
|
156
|
+
# spot_shape=None, spot_size=None,
|
|
157
|
+
# kmeans_clust_nb=2,
|
|
158
|
+
# biomask=None, backmask=None,
|
|
159
|
+
# color_space_dictionaries=None,
|
|
160
|
+
# carefully=True)
|
|
161
|
+
self.cropping(is_first_image=True)
|
|
162
|
+
self.get_average_pixel_size()
|
|
163
|
+
self.delineate_each_arena()
|
|
164
|
+
self.get_background_to_subtract()
|
|
165
|
+
self.get_origins_and_backgrounds_lists()
|
|
166
|
+
self.get_last_image()
|
|
167
|
+
self.fast_image_segmentation(is_first_image=False)
|
|
168
|
+
self.find_if_lighter_background()
|
|
169
|
+
self.extract_exif()
|
|
170
|
+
self.update_output_list()
|
|
171
|
+
look_for_existing_videos = insensitive_glob('ind_' + '*' + '.npy')
|
|
172
|
+
there_already_are_videos = len(look_for_existing_videos) == len(self.vars['analyzed_individuals'])
|
|
173
|
+
logging.info(
|
|
174
|
+
f"{len(look_for_existing_videos)} .npy video files found for {len(self.vars['analyzed_individuals'])} arenas to analyze")
|
|
175
|
+
do_write_videos = not there_already_are_videos or (
|
|
176
|
+
there_already_are_videos and self.all['overwrite_unaltered_videos'])
|
|
177
|
+
if do_write_videos:
|
|
178
|
+
self.videos = OneVideoPerBlob(self.first_image, self.starting_blob_hsize_in_pixels, self.all['raw_images'])
|
|
179
|
+
self.videos.left = self.left
|
|
180
|
+
self.videos.right = self.right
|
|
181
|
+
self.videos.top = self.top
|
|
182
|
+
self.videos.bot = self.bot
|
|
183
|
+
self.videos.first_image.shape_number = self.sample_number
|
|
184
|
+
self.videos.write_videos_as_np_arrays(
|
|
185
|
+
self.data_list, self.vars['min_ram_free'], not self.vars['already_greyscale'], self.reduce_image_dim)
|
|
186
|
+
self.instantiate_tables()
|
|
187
|
+
|
|
188
|
+
i=1
|
|
189
|
+
show_seg=True
|
|
190
|
+
|
|
191
|
+
if os.path.isfile(f"coord_specimen{i + 1}_t720_y1475_x1477.npy"):
|
|
192
|
+
binary_coord = np.load(f"coord_specimen{i + 1}_t720_y1475_x1477.npy")
|
|
193
|
+
l = [i, i + 1, self.vars, False, False, show_seg, None]
|
|
194
|
+
sav = self
|
|
195
|
+
self = MotionAnalysis(l)
|
|
196
|
+
self.binary = np.zeros((720, 1475, 1477), dtype=np.uint8)
|
|
197
|
+
self.binary[binary_coord[0, :], binary_coord[1, :], binary_coord[2, :]] = 1
|
|
198
|
+
else:
|
|
199
|
+
l = [i, i + 1, self.vars, True, False, show_seg, None]
|
|
200
|
+
sav = self
|
|
201
|
+
self = MotionAnalysis(l)
|
|
202
|
+
self.get_descriptors_from_binary()
|
|
203
|
+
self.detect_growth_transitions()
|
|
204
|
+
# self.networks_detection(show_seg)
|
|
205
|
+
self.study_cytoscillations(show_seg)
|
|
206
|
+
|
|
207
|
+
# for i, arena in enumerate(self.vars['analyzed_individuals']):
|
|
208
|
+
# l = [i, i + 1, self.vars, True, False, False, None]
|
|
209
|
+
# analysis_i = MotionAnalysis(l)
|
|
210
|
+
# self.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
|
|
211
|
+
# analysis_i.efficiency_test_2)
|
|
212
|
+
# self.save_tables()
|
|
213
|
+
#
|
|
214
|
+
# self = MotionAnalysis(l)
|
|
215
|
+
# l = [5, 6, self.vars, True, False, False, None]
|
|
216
|
+
# sav=self
|
|
217
|
+
# self.get_descriptors_from_binary()
|
|
218
|
+
# self.detect_growth_transitions()
|
|
219
|
+
|
|
220
|
+
def look_for_data(self):
|
|
221
|
+
# global_pathway = 'I:\Directory\Tracking_data\generalization_and_potentiation\drop_nak1'
|
|
222
|
+
os.chdir(Path(self.all['global_pathway']))
|
|
223
|
+
logging.info(f"Dir: {self.all['global_pathway']}")
|
|
224
|
+
self.data_list = insensitive_glob(
|
|
225
|
+
self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
|
|
226
|
+
self.data_list = insensitive_glob(self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
|
|
227
|
+
self.all['folder_list'] = []
|
|
228
|
+
self.all['folder_number'] = 1
|
|
229
|
+
if len(self.data_list) > 0:
|
|
230
|
+
lengths = vectorized_len(self.data_list)
|
|
231
|
+
if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
|
|
232
|
+
logging.error(f"File names present strong variations and cannot be correctly sorted.")
|
|
233
|
+
self.data_list = natsort.natsorted(self.data_list)
|
|
234
|
+
self.sample_number = self.all['first_folder_sample_number']
|
|
235
|
+
else:
|
|
236
|
+
content = os.listdir()
|
|
237
|
+
for obj in content:
|
|
238
|
+
if not os.path.isfile(obj):
|
|
239
|
+
data_list = insensitive_glob(obj + "/" + self.all['radical'] + '*' + self.all['extension'])
|
|
240
|
+
if len(data_list) > 0:
|
|
241
|
+
self.all['folder_list'].append(obj)
|
|
242
|
+
self.all['folder_number'] += 1
|
|
243
|
+
self.all['folder_list'] = np.sort(self.all['folder_list'])
|
|
244
|
+
|
|
245
|
+
if isinstance(self.all['sample_number_per_folder'], int) or len(self.all['sample_number_per_folder']) == 1:
|
|
246
|
+
self.all['sample_number_per_folder'] = np.repeat(self.all['sample_number_per_folder'],
|
|
247
|
+
self.all['folder_number'])
|
|
248
|
+
|
|
249
|
+
def update_folder_id(self, sample_number, folder_name=""):
|
|
250
|
+
os.chdir(Path(self.all['global_pathway']) / folder_name)
|
|
251
|
+
self.data_list = insensitive_glob(
|
|
252
|
+
self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
|
|
253
|
+
# Sorting is necessary when some modifications (like rotation) modified the last modification date
|
|
254
|
+
lengths = vectorized_len(self.data_list)
|
|
255
|
+
if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
|
|
256
|
+
logging.error(f"File names present strong variations and cannot be correctly sorted.")
|
|
257
|
+
self.data_list = natsort.natsorted(self.data_list)
|
|
258
|
+
if self.all['im_or_vid'] == 1:
|
|
259
|
+
self.sample_number = len(self.data_list)
|
|
260
|
+
else:
|
|
261
|
+
self.vars['img_number'] = len(self.data_list)
|
|
262
|
+
self.sample_number = sample_number
|
|
263
|
+
if len(self.vars['analyzed_individuals']) != sample_number:
|
|
264
|
+
self.vars['analyzed_individuals'] = np.arange(sample_number) + 1
|
|
265
|
+
|
|
266
|
+
def load_data_to_run_cellects_quickly(self):
|
|
267
|
+
current_global_pathway = self.all['global_pathway']
|
|
268
|
+
folder_number = self.all['folder_number']
|
|
269
|
+
if folder_number > 1:
|
|
270
|
+
folder_list = deepcopy(self.all['folder_list'])
|
|
271
|
+
sample_number_per_folder = deepcopy(self.all['sample_number_per_folder'])
|
|
272
|
+
|
|
273
|
+
if os.path.isfile('Data to run Cellects quickly.pkl'):
|
|
274
|
+
pickle_rick = PickleRick()
|
|
275
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
276
|
+
if data_to_run_cellects_quickly is None:
|
|
277
|
+
data_to_run_cellects_quickly = {}
|
|
278
|
+
|
|
279
|
+
# try:
|
|
280
|
+
# with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
|
|
281
|
+
# data_to_run_cellects_quickly = pickle.load(fileopen)
|
|
282
|
+
# except pickle.UnpicklingError:
|
|
283
|
+
# logging.error("Pickle error")
|
|
284
|
+
# data_to_run_cellects_quickly = {}
|
|
285
|
+
if ('validated_shapes' in data_to_run_cellects_quickly) and ('coordinates' in data_to_run_cellects_quickly) and ('all' in data_to_run_cellects_quickly):
|
|
286
|
+
logging.info("Success to load Data to run Cellects quickly.pkl from the user chosen directory")
|
|
287
|
+
self.all = data_to_run_cellects_quickly['all']
|
|
288
|
+
# If you want to add a new variable, first run an updated version of all_vars_dict,
|
|
289
|
+
# then put a breakpoint here and run the following + self.save_data_to_run_cellects_quickly() :
|
|
290
|
+
# self.all['vars']['lose_accuracy_to_save_memory'] = False
|
|
291
|
+
self.vars = self.all['vars']
|
|
292
|
+
self.update_data()
|
|
293
|
+
print(self.vars['convert_for_motion'])
|
|
294
|
+
folder_changed = False
|
|
295
|
+
if current_global_pathway != self.all['global_pathway']:
|
|
296
|
+
folder_changed = True
|
|
297
|
+
logging.info(
|
|
298
|
+
"Although the folder is ready, it is not at the same place as it was during creation, updating")
|
|
299
|
+
self.all['global_pathway'] = current_global_pathway
|
|
300
|
+
if folder_number > 1:
|
|
301
|
+
self.all['global_pathway'] = current_global_pathway
|
|
302
|
+
self.all['folder_list'] = folder_list
|
|
303
|
+
self.all['folder_number'] = folder_number
|
|
304
|
+
self.all['sample_number_per_folder'] = sample_number_per_folder
|
|
305
|
+
|
|
306
|
+
if len(self.data_list) == 0:
|
|
307
|
+
self.look_for_data()
|
|
308
|
+
if folder_changed and folder_number > 1 and len(self.all['folder_list']) > 0:
|
|
309
|
+
self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
|
|
310
|
+
self.get_first_image()
|
|
311
|
+
self.get_last_image()
|
|
312
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = data_to_run_cellects_quickly[
|
|
313
|
+
'coordinates']
|
|
314
|
+
if self.all['automatically_crop']:
|
|
315
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
316
|
+
logging.info("Crop first image")
|
|
317
|
+
self.first_image.automatically_crop(self.first_image.crop_coord)
|
|
318
|
+
logging.info("Crop last image")
|
|
319
|
+
self.last_image.automatically_crop(self.first_image.crop_coord)
|
|
320
|
+
else:
|
|
321
|
+
self.first_image.crop_coord = None
|
|
322
|
+
# self.cropping(True)
|
|
323
|
+
# self.cropping(False)
|
|
324
|
+
self.first_image.validated_shapes = data_to_run_cellects_quickly['validated_shapes']
|
|
325
|
+
self.first_image.im_combinations = []
|
|
326
|
+
self.current_combination_id = 0
|
|
327
|
+
self.first_image.im_combinations.append({})
|
|
328
|
+
self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
|
|
329
|
+
self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
|
|
330
|
+
self.first_image.im_combinations[self.current_combination_id]['shape_number'] = data_to_run_cellects_quickly['shape_number']
|
|
331
|
+
|
|
332
|
+
self.first_exp_ready_to_run = True
|
|
333
|
+
print(f"Overwrite is {self.all['overwrite_unaltered_videos']}")
|
|
334
|
+
if self.vars['subtract_background'] and len(self.vars['background_list']) == 0:
|
|
335
|
+
self.first_exp_ready_to_run = False
|
|
336
|
+
else:
|
|
337
|
+
self.first_exp_ready_to_run = False
|
|
338
|
+
else:
|
|
339
|
+
self.first_exp_ready_to_run = False
|
|
340
|
+
if self.first_exp_ready_to_run:
|
|
341
|
+
logging.info("The current (or the first) folder is ready to run")
|
|
342
|
+
else:
|
|
343
|
+
logging.info("The current (or the first) folder is not ready to run")
|
|
344
|
+
|
|
345
|
+
def update_data(self):
|
|
346
|
+
dd = DefaultDicts()
|
|
347
|
+
all = len(dd.all) != len(self.all)
|
|
348
|
+
vars = len(dd.vars) != len(self.vars)
|
|
349
|
+
all_desc = len(dd.all['descriptors']) != len(self.all['descriptors'])
|
|
350
|
+
vars_desc = len(dd.vars['descriptors']) != len(self.vars['descriptors'])
|
|
351
|
+
if all:
|
|
352
|
+
for key, val in dd.all.items():
|
|
353
|
+
if not key in self.all:
|
|
354
|
+
self.all[key] = val
|
|
355
|
+
if vars:
|
|
356
|
+
for key, val in dd.vars.items():
|
|
357
|
+
if not key in self.vars:
|
|
358
|
+
self.vars[key] = val
|
|
359
|
+
if all_desc:
|
|
360
|
+
for key, val in dd.all['descriptors'].items():
|
|
361
|
+
if not key in self.all['descriptors']:
|
|
362
|
+
self.all['descriptors'][key] = val
|
|
363
|
+
if vars_desc:
|
|
364
|
+
for key, val in dd.vars['descriptors'].items():
|
|
365
|
+
if not key in self.vars['descriptors']:
|
|
366
|
+
self.vars['descriptors'][key] = val
|
|
367
|
+
|
|
368
|
+
def save_data_to_run_cellects_quickly(self, new_one_if_does_not_exist=True):
|
|
369
|
+
data_to_run_cellects_quickly = None
|
|
370
|
+
if os.path.isfile('Data to run Cellects quickly.pkl'):
|
|
371
|
+
logging.info("Update -Data to run Cellects quickly.pkl- in the user chosen directory")
|
|
372
|
+
pickle_rick = PickleRick()
|
|
373
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
374
|
+
if data_to_run_cellects_quickly is None:
|
|
375
|
+
logging.error("Failed to load Data to run Cellects quickly.pkl before update. Abort saving.")
|
|
376
|
+
|
|
377
|
+
# try:
|
|
378
|
+
# with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
|
|
379
|
+
# data_to_run_cellects_quickly = pickle.load(fileopen)
|
|
380
|
+
# except pickle.UnpicklingError:
|
|
381
|
+
# logging.error("Pickle error")
|
|
382
|
+
# data_to_run_cellects_quickly = {}
|
|
383
|
+
else:
|
|
384
|
+
if new_one_if_does_not_exist:
|
|
385
|
+
logging.info("Create Data to run Cellects quickly.pkl in the user chosen directory")
|
|
386
|
+
data_to_run_cellects_quickly = {}
|
|
387
|
+
if data_to_run_cellects_quickly is not None:
|
|
388
|
+
if self.data_to_save['first_image']:
|
|
389
|
+
data_to_run_cellects_quickly['validated_shapes'] = self.first_image.im_combinations[self.current_combination_id]['binary_image']
|
|
390
|
+
data_to_run_cellects_quickly['shape_number'] = self.first_image.im_combinations[self.current_combination_id]['shape_number']
|
|
391
|
+
# data_to_run_cellects_quickly['converted_image'] = self.first_image.im_combinations[self.current_combination_id]['converted_image']
|
|
392
|
+
if self.data_to_save['coordinates']:
|
|
393
|
+
data_to_run_cellects_quickly['coordinates'] = self.list_coordinates()
|
|
394
|
+
logging.info("When they exist, do overwrite unaltered video")
|
|
395
|
+
self.all['overwrite_unaltered_videos'] = True
|
|
396
|
+
if self.data_to_save['exif']:
|
|
397
|
+
self.vars['exif'] = self.extract_exif()
|
|
398
|
+
# data_to_run_cellects_quickly['exif'] = self.extract_exif()
|
|
399
|
+
# if self.data_to_save['background_and_origin_list']:
|
|
400
|
+
# logging.info(f"Origin shape is {self.vars['origin_list'][0].shape}")
|
|
401
|
+
# data_to_run_cellects_quickly['background_and_origin_list'] = [self.vars['origin_list'], self.vars['background_list'], self.vars['background_list2']]
|
|
402
|
+
self.all['vars'] = self.vars
|
|
403
|
+
print(self.vars['convert_for_motion'])
|
|
404
|
+
data_to_run_cellects_quickly['all'] = self.all
|
|
405
|
+
# data_to_run_cellects_quickly['all']['vars']['origin_state'] = "fluctuating"
|
|
406
|
+
pickle_rick = PickleRick()
|
|
407
|
+
pickle_rick.write_file(data_to_run_cellects_quickly, 'Data to run Cellects quickly.pkl')
|
|
408
|
+
|
|
409
|
+
def list_coordinates(self):
|
|
410
|
+
if self.first_image.crop_coord is None:
|
|
411
|
+
self.first_image.crop_coord = [0, self.first_image.image.shape[0], 0,
|
|
412
|
+
self.first_image.image.shape[1]]
|
|
413
|
+
videos_coordinates = self.first_image.crop_coord + [self.left, self.right, self.top, self.bot]
|
|
414
|
+
return videos_coordinates
|
|
415
|
+
|
|
416
|
+
def extract_exif(self):
|
|
417
|
+
if self.all['im_or_vid'] == 1:
|
|
418
|
+
timings = np.arange(self.vars['dims'][0])
|
|
419
|
+
else:
|
|
420
|
+
if sys.platform.startswith('win'):
|
|
421
|
+
pathway = os.getcwd() + '\\'
|
|
422
|
+
else:
|
|
423
|
+
pathway = os.getcwd() + '/'
|
|
424
|
+
arbitrary_time_step: bool = True
|
|
425
|
+
if self.all['extract_time_interval']:
|
|
426
|
+
self.vars['time_step'] = 1
|
|
427
|
+
try:
|
|
428
|
+
timings = extract_time(self.data_list, pathway, self.all['raw_images'])
|
|
429
|
+
timings = timings - timings[0]
|
|
430
|
+
timings = timings / 60
|
|
431
|
+
time_step = np.mean(np.diff(timings))
|
|
432
|
+
digit_nb = 0
|
|
433
|
+
for i in str(time_step):
|
|
434
|
+
if i in {'.'}:
|
|
435
|
+
pass
|
|
436
|
+
elif i in {'0'}:
|
|
437
|
+
digit_nb += 1
|
|
438
|
+
else:
|
|
439
|
+
break
|
|
440
|
+
self.vars['time_step'] = np.round(time_step, digit_nb + 1)
|
|
441
|
+
arbitrary_time_step = False
|
|
442
|
+
except:
|
|
443
|
+
pass
|
|
444
|
+
if arbitrary_time_step:
|
|
445
|
+
timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
|
|
446
|
+
timings = timings - timings[0]
|
|
447
|
+
timings = timings / 60
|
|
448
|
+
return timings
|
|
449
|
+
|
|
450
|
+
#
|
|
451
|
+
# if not os.path.isfile("timings.csv") or self.all['overwrite_cellects_data']:
|
|
452
|
+
# if self.vars['time_step'] == 0:
|
|
453
|
+
# if self.all['im_or_vid'] == 1:
|
|
454
|
+
# savetxt("timings.csv", np.arange(self.vars['dims'][0]), fmt='%10d', delimiter=',')
|
|
455
|
+
# else:
|
|
456
|
+
# if sys.platform.startswith('win'):
|
|
457
|
+
# pathway = os.getcwd() + '\\'
|
|
458
|
+
# else:
|
|
459
|
+
# pathway = os.getcwd() + '/'
|
|
460
|
+
# timings = extract_time(self.data_list, pathway, self.all['raw_images'])
|
|
461
|
+
# timings = timings - timings[0]
|
|
462
|
+
# timings = timings / 60
|
|
463
|
+
# else:
|
|
464
|
+
# timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
|
|
465
|
+
# savetxt("timings.csv", timings, fmt='%1.2f', delimiter=',')
|
|
466
|
+
|
|
467
|
+
def get_first_image(self):
|
|
468
|
+
logging.info("Load first image")
|
|
469
|
+
just_read_image = self.first_im is not None
|
|
470
|
+
self.reduce_image_dim = False
|
|
471
|
+
# just_read_image = self.analysis_instance is not None
|
|
472
|
+
if self.all['im_or_vid'] == 1:
|
|
473
|
+
cap = cv2.VideoCapture(self.data_list[0])
|
|
474
|
+
counter = 0
|
|
475
|
+
if not just_read_image:
|
|
476
|
+
self.sample_number = len(self.data_list)
|
|
477
|
+
self.vars['img_number'] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
478
|
+
self.analysis_instance = np.zeros(
|
|
479
|
+
[int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
|
|
480
|
+
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3])
|
|
481
|
+
while cap.isOpened() and counter < 1:
|
|
482
|
+
ret, frame = cap.read()
|
|
483
|
+
if counter == 0:
|
|
484
|
+
self.first_im = frame
|
|
485
|
+
self.analysis_instance[0, ...] = self.first_im
|
|
486
|
+
break
|
|
487
|
+
cap.release()
|
|
488
|
+
elif np.sum(self.analysis_instance[self.all['first_detection_frame'] - 1, ...] == 0):
|
|
489
|
+
cap = cv2.VideoCapture(self.data_list[0])
|
|
490
|
+
counter = 0
|
|
491
|
+
while cap.isOpened() and (counter < self.all['first_detection_frame']):
|
|
492
|
+
ret, frame = cap.read()
|
|
493
|
+
# if self.reduce_image_dim:
|
|
494
|
+
# frame = frame[:, :, 0]
|
|
495
|
+
self.analysis_instance[counter, ...] = frame
|
|
496
|
+
counter += 1
|
|
497
|
+
|
|
498
|
+
cap.release()
|
|
499
|
+
self.first_im = self.analysis_instance[
|
|
500
|
+
self.all['first_detection_frame'] - 1, ...]
|
|
501
|
+
self.vars['dims'] = self.analysis_instance.shape[:3]
|
|
502
|
+
|
|
503
|
+
else:
|
|
504
|
+
self.vars['img_number'] = len(self.data_list)
|
|
505
|
+
self.all['raw_images'] = is_raw_image(self.data_list[0])
|
|
506
|
+
self.first_im = readim(self.data_list[self.all['first_detection_frame'] - 1], self.all['raw_images'])
|
|
507
|
+
# if self.reduce_image_dim:
|
|
508
|
+
# self.first_im = self.first_im[:, :, 0]
|
|
509
|
+
self.vars['dims'] = [self.vars['img_number'], self.first_im.shape[0], self.first_im.shape[1]]
|
|
510
|
+
# self.first_im = readim(self.data_list[0], self.all['raw_images'])
|
|
511
|
+
if len(self.first_im.shape) == 3:
|
|
512
|
+
if np.all(np.equal(self.first_im[:, :, 0], self.first_im[:, :, 1])) and np.all(
|
|
513
|
+
np.equal(self.first_im[:, :, 1], self.first_im[:, :, 2])):
|
|
514
|
+
self.reduce_image_dim = True
|
|
515
|
+
if self.reduce_image_dim:
|
|
516
|
+
self.first_im = self.first_im[:, :, 0]
|
|
517
|
+
if self.all['im_or_vid'] == 1:
|
|
518
|
+
self.analysis_instance = self.analysis_instance[:, :, :, 0]
|
|
519
|
+
self.first_image = OneImageAnalysis(self.first_im)
|
|
520
|
+
self.vars['already_greyscale'] = self.first_image.already_greyscale
|
|
521
|
+
if self.vars['already_greyscale']:
|
|
522
|
+
self.vars["convert_for_origin"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
523
|
+
self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
524
|
+
if np.mean((np.mean(self.first_image.image[2, :, ...]), np.mean(self.first_image.image[-3, :, ...]), np.mean(self.first_image.image[:, 2, ...]), np.mean(self.first_image.image[:, -3, ...]))) > 127:
|
|
525
|
+
self.vars['contour_color']: np.uint8 = 0
|
|
526
|
+
else:
|
|
527
|
+
self.vars['contour_color']: np.uint8 = 255
|
|
528
|
+
if self.all['first_detection_frame'] > 1:
|
|
529
|
+
self.vars['origin_state'] = 'invisible'
|
|
530
|
+
|
|
531
|
+
def get_last_image(self):
|
|
532
|
+
logging.info("Load last image")
|
|
533
|
+
if self.all['im_or_vid'] == 1:
|
|
534
|
+
cap = cv2.VideoCapture(self.data_list[0])
|
|
535
|
+
counter = 0
|
|
536
|
+
while cap.isOpened() and counter < self.vars['img_number']:
|
|
537
|
+
ret, frame = cap.read()
|
|
538
|
+
if self.reduce_image_dim:
|
|
539
|
+
frame = frame[:, :, 0]
|
|
540
|
+
self.analysis_instance[-1, ...] = frame
|
|
541
|
+
# if counter == self.vars['img_number'] - 1:
|
|
542
|
+
# if self.reduce_image_dim:
|
|
543
|
+
# frame = frame[:, :, 0]
|
|
544
|
+
# break
|
|
545
|
+
counter += 1
|
|
546
|
+
self.last_im = frame
|
|
547
|
+
cap.release()
|
|
548
|
+
else:
|
|
549
|
+
# self.last_im = readim(self.data_list[-1], self.all['raw_images'])
|
|
550
|
+
is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
|
|
551
|
+
self.last_im = read_and_rotate(self.data_list[-1], self.first_im, self.all['raw_images'], is_landscape)
|
|
552
|
+
if self.reduce_image_dim:
|
|
553
|
+
self.last_im = self.last_im[:, :, 0]
|
|
554
|
+
self.last_image = OneImageAnalysis(self.last_im)
|
|
555
|
+
|
|
556
|
+
# self.message_when_thread_finished.emit("")
|
|
557
|
+
def fast_image_segmentation(self, is_first_image, biomask=None, backmask=None, spot_size=None):
|
|
558
|
+
if is_first_image:
|
|
559
|
+
self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
|
|
560
|
+
self.all["bio_mask"], self.all["back_mask"], subtract_background=None,
|
|
561
|
+
subtract_background2=None, grid_segmentation=False,
|
|
562
|
+
filter_spec=self.vars["filter_spec"])
|
|
563
|
+
if not self.first_image.drift_correction_already_adjusted:
|
|
564
|
+
self.vars['drift_already_corrected'] = self.first_image.check_if_image_border_attest_drift_correction()
|
|
565
|
+
if self.vars['drift_already_corrected']:
|
|
566
|
+
logging.info("Cellects detected that the images have already been corrected for drift")
|
|
567
|
+
self.first_image.adjust_to_drift_correction(self.vars['convert_for_origin']['logical'])
|
|
568
|
+
if self.vars["grid_segmentation"]:
|
|
569
|
+
self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
|
|
570
|
+
self.all["bio_mask"], self.all["back_mask"],
|
|
571
|
+
subtract_background=None, subtract_background2=None,
|
|
572
|
+
grid_segmentation=True,
|
|
573
|
+
filter_spec=self.vars["filter_spec"])
|
|
574
|
+
|
|
575
|
+
self.first_image.set_spot_shapes_and_size_confint(self.all['starting_blob_shape'])
|
|
576
|
+
logging.info(self.sample_number)
|
|
577
|
+
process_i = ProcessFirstImage(
|
|
578
|
+
[self.first_image, False, False, None, self.vars['several_blob_per_arena'],
|
|
579
|
+
self.sample_number, spot_size, self.vars["color_number"], self.all["bio_mask"], self.all["back_mask"], None])
|
|
580
|
+
process_i.binary_image = self.first_image.binary_image
|
|
581
|
+
process_i.process_binary_image(use_bio_and_back_masks=True)
|
|
582
|
+
|
|
583
|
+
if self.all["back_mask"] is not None:
|
|
584
|
+
if np.any(process_i.shapes[self.all["back_mask"]]):
|
|
585
|
+
process_i.shapes[np.isin(process_i.shapes, np.unique(process_i.shapes[self.all["back_mask"]]))] = 0
|
|
586
|
+
process_i.validated_shapes = (process_i.shapes > 0).astype(np.uint8)
|
|
587
|
+
if self.all["bio_mask"] is not None:
|
|
588
|
+
process_i.validated_shapes[self.all["bio_mask"]] = 1
|
|
589
|
+
if self.all["back_mask"] is not None or self.all["bio_mask"] is not None:
|
|
590
|
+
process_i.shape_number, process_i.shapes = cv2.connectedComponents(process_i.validated_shapes, connectivity=8)
|
|
591
|
+
process_i.shape_number -= 1
|
|
592
|
+
|
|
593
|
+
self.first_image.validated_shapes = process_i.validated_shapes
|
|
594
|
+
self.first_image.shape_number = process_i.shape_number
|
|
595
|
+
if self.first_image.im_combinations is None:
|
|
596
|
+
self.first_image.im_combinations = []
|
|
597
|
+
self.first_image.im_combinations.append({})
|
|
598
|
+
self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
|
|
599
|
+
self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
|
|
600
|
+
self.first_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.first_image.image).astype(np.uint8)
|
|
601
|
+
self.first_image.im_combinations[self.current_combination_id]['shape_number'] = process_i.shape_number
|
|
602
|
+
# self.first_image.generate_color_space_combination(self.vars['convert_for_origin'], subtract_background)
|
|
603
|
+
else:
|
|
604
|
+
# self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
|
|
605
|
+
# if self.vars['drift_already_corrected']:
|
|
606
|
+
# drift_correction, drift_correction2 = self.last_image.adjust_to_drift_correction()
|
|
607
|
+
# self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
|
|
608
|
+
self.cropping(is_first_image=False)
|
|
609
|
+
print(self.vars["filter_spec"])
|
|
610
|
+
self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
|
|
611
|
+
biomask, backmask, self.first_image.subtract_background,
|
|
612
|
+
self.first_image.subtract_background2,
|
|
613
|
+
grid_segmentation=self.vars["grid_segmentation"],
|
|
614
|
+
filter_spec=self.vars["filter_spec"])
|
|
615
|
+
if self.vars['drift_already_corrected'] and not self.last_image.drift_correction_already_adjusted and not self.vars["grid_segmentation"]:
|
|
616
|
+
self.last_image.adjust_to_drift_correction(self.vars['convert_for_motion']['logical'])
|
|
617
|
+
|
|
618
|
+
if self.last_image.im_combinations is None:
|
|
619
|
+
self.last_image.im_combinations = []
|
|
620
|
+
self.last_image.im_combinations.append({})
|
|
621
|
+
self.last_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_motion']
|
|
622
|
+
self.last_image.im_combinations[self.current_combination_id]['binary_image'] = self.last_image.binary_image
|
|
623
|
+
self.last_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.last_image.image).astype(np.uint8)
|
|
624
|
+
|
|
625
|
+
# self.last_image.generate_color_space_combination(self.vars['convert_for_motion'], subtract_background)
|
|
626
|
+
# if self.all["more_than_two_colors"]:
|
|
627
|
+
# self.last_image.kmeans(self.vars["color_number"])
|
|
628
|
+
# else:
|
|
629
|
+
# self.last_image.thresholding()
|
|
630
|
+
# if self.all['are_gravity_centers_moving'] != 1:
|
|
631
|
+
# self.delineate_each_arena()
|
|
632
|
+
|
|
633
|
+
def cropping(self, is_first_image):
|
|
634
|
+
if not self.vars['drift_already_corrected']:
|
|
635
|
+
if is_first_image:
|
|
636
|
+
if not self.first_image.cropped:
|
|
637
|
+
if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
|
|
638
|
+
pickle_rick = PickleRick()
|
|
639
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
640
|
+
if data_to_run_cellects_quickly is not None:
|
|
641
|
+
if 'coordinates' in data_to_run_cellects_quickly:
|
|
642
|
+
logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
|
|
643
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
644
|
+
data_to_run_cellects_quickly['coordinates']
|
|
645
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
646
|
+
else:
|
|
647
|
+
self.first_image.get_crop_coordinates()
|
|
648
|
+
else:
|
|
649
|
+
self.first_image.get_crop_coordinates()
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
# try:
|
|
653
|
+
# with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
|
|
654
|
+
# data_to_run_cellects_quickly = pickle.load(fileopen)
|
|
655
|
+
# if 'coordinates' in data_to_run_cellects_quickly:
|
|
656
|
+
# logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
|
|
657
|
+
# (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
658
|
+
# data_to_run_cellects_quickly['coordinates']
|
|
659
|
+
# self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
660
|
+
# else:
|
|
661
|
+
# self.first_image.get_crop_coordinates()
|
|
662
|
+
# except pickle.UnpicklingError:
|
|
663
|
+
# logging.error("Pickle error")
|
|
664
|
+
# self.first_image.get_crop_coordinates()
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
# if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
|
|
668
|
+
# with open('coordinates.pkl', 'rb') as fileopen:
|
|
669
|
+
# (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top,
|
|
670
|
+
# self.videos.bot) = pickle.load(fileopen)
|
|
671
|
+
else:
|
|
672
|
+
self.first_image.get_crop_coordinates()
|
|
673
|
+
if self.all['automatically_crop']:
|
|
674
|
+
self.first_image.automatically_crop(self.first_image.crop_coord)
|
|
675
|
+
else:
|
|
676
|
+
self.first_image.crop_coord = None
|
|
677
|
+
else:
|
|
678
|
+
if not self.last_image.cropped and self.all['automatically_crop']:
|
|
679
|
+
self.last_image.automatically_crop(self.first_image.crop_coord)
|
|
680
|
+
# if self.all['automatically_crop'] and not self.vars['drift_already_corrected']:
|
|
681
|
+
# if is_first_image:
|
|
682
|
+
# self.first_image.get_crop_coordinates()
|
|
683
|
+
# self.first_image.automatically_crop(self.first_image.crop_coord)
|
|
684
|
+
# else:
|
|
685
|
+
# self.last_image.automatically_crop(self.first_image.crop_coord)
|
|
686
|
+
|
|
687
|
+
def get_average_pixel_size(self):
|
|
688
|
+
logging.info("Get average pixel size")
|
|
689
|
+
(self.first_image.shape_number,
|
|
690
|
+
self.first_image.shapes,
|
|
691
|
+
self.first_image.stats,
|
|
692
|
+
centroids) = cv2.connectedComponentsWithStats(
|
|
693
|
+
self.first_image.validated_shapes,
|
|
694
|
+
connectivity=8)
|
|
695
|
+
self.first_image.shape_number -= 1
|
|
696
|
+
if self.all['scale_with_image_or_cells'] == 0:
|
|
697
|
+
self.vars['average_pixel_size'] = np.square(
|
|
698
|
+
self.all['image_horizontal_size_in_mm'] /
|
|
699
|
+
self.first_im.shape[1])
|
|
700
|
+
else:
|
|
701
|
+
self.vars['average_pixel_size'] = np.square(
|
|
702
|
+
self.all['starting_blob_hsize_in_mm'] /
|
|
703
|
+
np.mean(self.first_image.stats[1:, 2]))
|
|
704
|
+
if self.all['set_spot_size']:
|
|
705
|
+
self.starting_blob_hsize_in_pixels = (
|
|
706
|
+
self.all['starting_blob_hsize_in_mm'] /
|
|
707
|
+
np.sqrt(self.vars['average_pixel_size']))
|
|
708
|
+
else:
|
|
709
|
+
self.starting_blob_hsize_in_pixels = None
|
|
710
|
+
|
|
711
|
+
if self.all['automatic_size_thresholding']:
|
|
712
|
+
self.vars['first_move_threshold'] = 10
|
|
713
|
+
else:
|
|
714
|
+
# if self.vars['origin_state'] != "invisible":
|
|
715
|
+
self.vars['first_move_threshold'] = np.round(
|
|
716
|
+
self.all['first_move_threshold_in_mm²'] /
|
|
717
|
+
self.vars['average_pixel_size']).astype(np.uint8)
|
|
718
|
+
logging.info(f"The average pixel size is: {self.vars['average_pixel_size']} mm²")
|
|
719
|
+
|
|
720
|
+
def delineate_each_arena(self):
|
|
721
|
+
self.videos = OneVideoPerBlob(
|
|
722
|
+
self.first_image,
|
|
723
|
+
self.starting_blob_hsize_in_pixels,
|
|
724
|
+
self.all['raw_images'])
|
|
725
|
+
# self.delineation_number += 1
|
|
726
|
+
# if self.delineation_number > 1:
|
|
727
|
+
# print('stophere')
|
|
728
|
+
analysis_status = {"continue": True, "message": ""}
|
|
729
|
+
if (self.sample_number > 1 and not self.vars['several_blob_per_arena']):
|
|
730
|
+
compute_get_bb: bool = True
|
|
731
|
+
if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
|
|
732
|
+
|
|
733
|
+
pickle_rick = PickleRick()
|
|
734
|
+
data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
|
|
735
|
+
if data_to_run_cellects_quickly is not None:
|
|
736
|
+
if 'coordinates' in data_to_run_cellects_quickly:
|
|
737
|
+
(ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
738
|
+
data_to_run_cellects_quickly['coordinates']
|
|
739
|
+
self.videos.left, self.videos.right, self.videos.top, self.videos.bot = self.left, self.right, self.top, self.bot
|
|
740
|
+
self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
741
|
+
if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
|
|
742
|
+
self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
|
|
743
|
+
logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
|
|
744
|
+
compute_get_bb = False
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
# try:
|
|
748
|
+
# with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
|
|
749
|
+
# data_to_run_cellects_quickly = pickle.load(fileopen)
|
|
750
|
+
# except pickle.UnpicklingError:
|
|
751
|
+
# logging.error("Pickle error")
|
|
752
|
+
# data_to_run_cellects_quickly = {}
|
|
753
|
+
# if 'coordinates' in data_to_run_cellects_quickly:
|
|
754
|
+
# (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
|
|
755
|
+
# data_to_run_cellects_quickly['coordinates']
|
|
756
|
+
# self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
|
|
757
|
+
# if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
|
|
758
|
+
# self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
|
|
759
|
+
# logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
|
|
760
|
+
# compute_get_bb = False
|
|
761
|
+
|
|
762
|
+
# if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
|
|
763
|
+
# with open('coordinates.pkl', 'rb') as fileopen:
|
|
764
|
+
# (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top, self.videos.bot) = pickle.load(fileopen)
|
|
765
|
+
|
|
766
|
+
# if (not self.all['overwrite_unaltered_videos'] and
|
|
767
|
+
# os.path.isfile('coordinates.pkl')):
|
|
768
|
+
# with open('coordinates.pkl', 'rb') as fileopen:
|
|
769
|
+
# (vertical_shape, horizontal_shape, self.videos.left, self.videos.right, self.videos.top,
|
|
770
|
+
# self.videos.bot) = pickle.load(fileopen)
|
|
771
|
+
|
|
772
|
+
if compute_get_bb:
|
|
773
|
+
if self.all['im_or_vid'] == 1:
|
|
774
|
+
self.videos.get_bounding_boxes(
|
|
775
|
+
are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
|
|
776
|
+
img_list=self.analysis_instance,
|
|
777
|
+
color_space_combination=self.vars['convert_for_origin'],#self.vars['convert_for_motion']
|
|
778
|
+
color_number=self.vars["color_number"],
|
|
779
|
+
sample_size=5,
|
|
780
|
+
all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'],
|
|
781
|
+
filter_spec=self.vars['filter_spec'])
|
|
782
|
+
else:
|
|
783
|
+
self.videos.get_bounding_boxes(
|
|
784
|
+
are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
|
|
785
|
+
img_list=self.data_list,
|
|
786
|
+
color_space_combination=self.vars['convert_for_origin'],
|
|
787
|
+
color_number=self.vars["color_number"],
|
|
788
|
+
sample_size=5,
|
|
789
|
+
all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'],
|
|
790
|
+
filter_type=self.vars['filter_spec'])
|
|
791
|
+
if np.any(self.videos.ordered_stats[:, 4] > 100 * np.median(self.videos.ordered_stats[:, 4])):
|
|
792
|
+
analysis_status['message'] = "A specimen is at least 100 times larger: (re)do the first image analysis."
|
|
793
|
+
analysis_status['continue'] = False
|
|
794
|
+
if np.any(self.videos.ordered_stats[:, 4] < 0.01 * np.median(self.videos.ordered_stats[:, 4])):
|
|
795
|
+
analysis_status['message'] = "A specimen is at least 100 times smaller: (re)do the first image analysis."
|
|
796
|
+
analysis_status['continue'] = False
|
|
797
|
+
logging.info(
|
|
798
|
+
str(self.videos.not_analyzed_individuals) + " individuals are out of picture scope and cannot be analyzed")
|
|
799
|
+
self.left, self.right, self.top, self.bot = self.videos.left, self.videos.right, self.videos.top, self.videos.bot
|
|
800
|
+
|
|
801
|
+
else:
|
|
802
|
+
self.left, self.right, self.top, self.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
|
|
803
|
+
self.videos.left, self.videos.right, self.videos.top, self.videos.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
|
|
804
|
+
|
|
805
|
+
self.vars['analyzed_individuals'] = np.arange(self.sample_number) + 1
|
|
806
|
+
if self.videos.not_analyzed_individuals is not None:
|
|
807
|
+
self.vars['analyzed_individuals'] = np.delete(self.vars['analyzed_individuals'],
|
|
808
|
+
self.videos.not_analyzed_individuals - 1)
|
|
809
|
+
# logging.info(self.top)
|
|
810
|
+
return analysis_status
|
|
811
|
+
|
|
812
|
+
def get_background_to_subtract(self):
|
|
813
|
+
"""
|
|
814
|
+
Repenser le moment où ça arrive et trouver pourquoi ça marche pas
|
|
815
|
+
"""
|
|
816
|
+
# self.vars['subtract_background'] = False
|
|
817
|
+
if self.vars['subtract_background']:
|
|
818
|
+
self.first_image.generate_subtract_background(self.vars['convert_for_motion'])
|
|
819
|
+
|
|
820
|
+
def get_origins_and_backgrounds_lists(self):
|
|
821
|
+
logging.info("Create origins and background lists")
|
|
822
|
+
if self.top is None:
|
|
823
|
+
# self.top = [1]
|
|
824
|
+
# self.bot = [self.first_im.shape[0] - 2]
|
|
825
|
+
# self.left = [1]
|
|
826
|
+
# self.right = [self.first_im.shape[1] - 2]
|
|
827
|
+
self.top = np.array([1])
|
|
828
|
+
self.bot = np.array([self.first_im.shape[0] - 2])
|
|
829
|
+
self.left = np.array([1])
|
|
830
|
+
self.right = np.array([self.first_im.shape[1] - 2])
|
|
831
|
+
|
|
832
|
+
add_to_c = 1
|
|
833
|
+
first_im = self.first_image.validated_shapes
|
|
834
|
+
self.vars['origin_list'] = []
|
|
835
|
+
self.vars['background_list'] = []
|
|
836
|
+
self.vars['background_list2'] = []
|
|
837
|
+
for rep in np.arange(len(self.vars['analyzed_individuals'])):
|
|
838
|
+
self.vars['origin_list'].append(first_im[self.top[rep]:(self.bot[rep] + add_to_c),
|
|
839
|
+
self.left[rep]:(self.right[rep] + add_to_c)])
|
|
840
|
+
if self.vars['subtract_background']:
|
|
841
|
+
self.vars['background_list'].append(
|
|
842
|
+
self.first_image.subtract_background[self.top[rep]:(self.bot[rep] + add_to_c),
|
|
843
|
+
self.left[rep]:(self.right[rep] + add_to_c)])
|
|
844
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
845
|
+
self.vars['background_list2'].append(
|
|
846
|
+
self.first_image.subtract_background2[self.top[rep]:(self.bot[rep] + add_to_c),
|
|
847
|
+
self.left[rep]:(self.right[rep] + add_to_c)])
|
|
848
|
+
|
|
849
|
+
def get_origins_and_backgrounds_one_by_one(self):
|
|
850
|
+
add_to_c = 1
|
|
851
|
+
self.vars['origin_list'] = []
|
|
852
|
+
self.vars['background_list'] = []
|
|
853
|
+
self.vars['background_list2'] = []
|
|
854
|
+
|
|
855
|
+
for arena in np.arange(len(self.vars['analyzed_individuals'])):
|
|
856
|
+
bgr_image = self.first_image.bgr[self.top[arena]:(self.bot[arena] + add_to_c),
|
|
857
|
+
self.left[arena]:(self.right[arena] + add_to_c), ...]
|
|
858
|
+
image = OneImageAnalysis(bgr_image)
|
|
859
|
+
if self.vars['subtract_background']:
|
|
860
|
+
image.generate_subtract_background(self.vars['convert_for_motion'])
|
|
861
|
+
self.vars['background_list'].append(image.image)
|
|
862
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
863
|
+
self.vars['background_list2'].append(image.image2)
|
|
864
|
+
|
|
865
|
+
# self.vars['origins_list'].append(self.first_image.validated_shapes[self.top[arena]:(self.bot[arena]),
|
|
866
|
+
# self.left[arena]:(self.right[arena])])
|
|
867
|
+
#
|
|
868
|
+
if self.vars['several_blob_per_arena']:
|
|
869
|
+
image.validated_shapes = image.binary_image
|
|
870
|
+
else:
|
|
871
|
+
image.get_largest_shape()
|
|
872
|
+
|
|
873
|
+
self.vars['origin_list'].append(image.validated_shapes)
|
|
874
|
+
|
|
875
|
+
def choose_color_space_combination(self):
|
|
876
|
+
# self = po
|
|
877
|
+
# 2) Represent the segmentation using a particular color space combination
|
|
878
|
+
if self.all['are_gravity_centers_moving'] != 1:
|
|
879
|
+
analysis_status = self.delineate_each_arena()
|
|
880
|
+
# self.fi.automatically_crop(self.first_image.crop_coord)
|
|
881
|
+
self.last_image = OneImageAnalysis(self.last_im)
|
|
882
|
+
self.last_image.automatically_crop(self.videos.first_image.crop_coord)
|
|
883
|
+
# csc = ColorSpaceCombination(self.last_image.image)
|
|
884
|
+
|
|
885
|
+
concomp_nb = [self.sample_number, self.sample_number * 50]
|
|
886
|
+
if self.all['are_zigzag'] == "columns":
|
|
887
|
+
inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
|
|
888
|
+
elif self.all['are_zigzag'] == "rows":
|
|
889
|
+
inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
|
|
890
|
+
else:
|
|
891
|
+
dist1 = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
|
|
892
|
+
dist2 = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
|
|
893
|
+
inter_dist = np.max(dist1, dist2)
|
|
894
|
+
if self.all['starting_blob_shape'] == "circle":
|
|
895
|
+
max_shape_size = np.pi * np.square(inter_dist)
|
|
896
|
+
else:
|
|
897
|
+
max_shape_size = np.square(2 * inter_dist)
|
|
898
|
+
total_surfarea = max_shape_size * self.sample_number
|
|
899
|
+
if self.all['are_gravity_centers_moving'] != 1:
|
|
900
|
+
out_of_arenas = np.ones_like(self.videos.first_image.validated_shapes)
|
|
901
|
+
for blob_i in np.arange(len(self.vars['analyzed_individuals'])):
|
|
902
|
+
out_of_arenas[self.top[blob_i]: (self.bot[blob_i] + 1),
|
|
903
|
+
self.left[blob_i]: (self.right[blob_i] + 1)] = 0
|
|
904
|
+
else:
|
|
905
|
+
out_of_arenas = None
|
|
906
|
+
ref_image = self.videos.first_image.validated_shapes
|
|
907
|
+
self.last_image.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
|
|
908
|
+
# csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
|
|
909
|
+
# csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image, self.first_image.subtract_background)
|
|
910
|
+
self.vars['convert_for_motion'] = self.last_image.channel_combination
|
|
911
|
+
|
|
912
|
+
self.fast_image_segmentation(False)
|
|
913
|
+
# if self.vars['subtract_background']:
|
|
914
|
+
# csc = ColorSpaceCombination(self.last_image.image)
|
|
915
|
+
# csc.generate(self.vars['convert_for_motion'], self.first_image.subtract_background)
|
|
916
|
+
# if self.all["more_than_two_colors"]:
|
|
917
|
+
# csc.kmeans(self.vars["color_number"])
|
|
918
|
+
# else:
|
|
919
|
+
# csc.thresholding()
|
|
920
|
+
# self.last_image.image = csc.image
|
|
921
|
+
# self.last_image.binary_image = csc.binary_image
|
|
922
|
+
|
|
923
|
+
def untype_csc_dict(self):
|
|
924
|
+
new_convert_for_origin = {}
|
|
925
|
+
for k, v in self.vars['convert_for_origin'].items():
|
|
926
|
+
new_convert_for_origin[k] = v
|
|
927
|
+
if self.vars['logical_between_csc_for_origin'] is not None:
|
|
928
|
+
new_convert_for_origin['logical'] = self.vars['logical_between_csc_for_origin']
|
|
929
|
+
for k, v in self.vars['convert_for_origin2'].items():
|
|
930
|
+
new_convert_for_origin[k] = v
|
|
931
|
+
self.vars['convert_for_origin'] = new_convert_for_origin
|
|
932
|
+
self.vars['convert_for_origin2'] = {}
|
|
933
|
+
|
|
934
|
+
new_convert_for_motion = {}
|
|
935
|
+
for k, v in self.vars['convert_for_motion'].items():
|
|
936
|
+
new_convert_for_motion[k] = v
|
|
937
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
938
|
+
new_convert_for_motion['logical'] = self.vars['convert_for_motion']['logical']
|
|
939
|
+
for k, v in self.vars['convert_for_motion2'].items():
|
|
940
|
+
new_convert_for_motion[k] = v
|
|
941
|
+
self.vars['convert_for_motion'] = new_convert_for_motion
|
|
942
|
+
self.vars['convert_for_motion2'] = {}
|
|
943
|
+
|
|
944
|
+
def type_csc_dict(self):
|
|
945
|
+
# self.vars['convert_for_motion']['logical'] = 'And'
|
|
946
|
+
# self.vars['convert_for_motion']['hsv'] = np.array((0, 0, 1))
|
|
947
|
+
# self.vars['convert_for_motion']['logical'] = 'And'
|
|
948
|
+
# self.vars['convert_for_motion']['lab2'] = np.array((0, 0, 1))
|
|
949
|
+
|
|
950
|
+
new_convert_for_origin = TDict()
|
|
951
|
+
self.vars['convert_for_origin2'] = TDict()
|
|
952
|
+
self.vars['logical_between_csc_for_origin'] = None
|
|
953
|
+
for k, v in self.vars['convert_for_origin'].items():
|
|
954
|
+
if k != 'logical' and v.sum() > 0:
|
|
955
|
+
if k[-1] != '2':
|
|
956
|
+
new_convert_for_origin[k] = v
|
|
957
|
+
else:
|
|
958
|
+
self.vars['convert_for_origin2'][k[:-1]] = v
|
|
959
|
+
else:
|
|
960
|
+
self.vars['logical_between_csc_for_origin'] = v
|
|
961
|
+
self.vars['convert_for_origin'] = new_convert_for_origin
|
|
962
|
+
|
|
963
|
+
new_convert_for_motion = TDict()
|
|
964
|
+
self.vars['convert_for_motion2'] = TDict()
|
|
965
|
+
self.vars['convert_for_motion']['logical'] = None
|
|
966
|
+
for k, v in self.vars['convert_for_motion'].items():
|
|
967
|
+
if k != 'logical' and v.sum() > 0:
|
|
968
|
+
if k[-1] != '2':
|
|
969
|
+
new_convert_for_motion[k] = v
|
|
970
|
+
else:
|
|
971
|
+
self.vars['convert_for_motion2'][k[:-1]] = v
|
|
972
|
+
else:
|
|
973
|
+
self.vars['convert_for_motion']['logical'] = v
|
|
974
|
+
self.vars['convert_for_motion'] = new_convert_for_motion
|
|
975
|
+
|
|
976
|
+
if self.vars['color_number'] > 2:
|
|
977
|
+
self.vars['bio_label'] = None#self.first_image.bio_label
|
|
978
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
979
|
+
self.vars['bio_label2'] = None
|
|
980
|
+
|
|
981
|
+
def find_if_lighter_background(self):
|
|
982
|
+
logging.info("Find if the background is lighter or darker than the cells")
|
|
983
|
+
self.vars['lighter_background']: bool = True
|
|
984
|
+
self.vars['contour_color']: np.uint8 = 0
|
|
985
|
+
are_dicts_equal: bool = True
|
|
986
|
+
for key in self.vars['convert_for_origin'].keys():
|
|
987
|
+
are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_motion'] and self.vars['convert_for_origin'][key] == self.vars['convert_for_motion'][key])
|
|
988
|
+
for key in self.vars['convert_for_motion'].keys():
|
|
989
|
+
are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_origin'] and self.vars['convert_for_motion'][key] == self.vars['convert_for_origin'][key])
|
|
990
|
+
|
|
991
|
+
if are_dicts_equal:
|
|
992
|
+
|
|
993
|
+
if self.first_im is None:
|
|
994
|
+
self.get_first_image()
|
|
995
|
+
self.fast_image_segmentation(True)
|
|
996
|
+
self.cropping(is_first_image=True)
|
|
997
|
+
among = np.nonzero(self.first_image.validated_shapes)
|
|
998
|
+
not_among = np.nonzero(1 - self.first_image.validated_shapes)
|
|
999
|
+
# Use the converted image to tell if the background is lighter, for analysis purposes
|
|
1000
|
+
if self.first_image.image[among[0], among[1]].mean() > self.first_image.image[not_among[0], not_among[1]].mean():
|
|
1001
|
+
self.vars['lighter_background'] = False
|
|
1002
|
+
# Use the original image to tell if the background is lighter, for display purposes
|
|
1003
|
+
if self.first_image.bgr[among[0], among[1], ...].mean() > self.first_image.bgr[not_among[0], not_among[1], ...].mean():
|
|
1004
|
+
self.vars['contour_color'] = 255
|
|
1005
|
+
else:
|
|
1006
|
+
if self.last_im is None:
|
|
1007
|
+
self.get_last_image()
|
|
1008
|
+
# self.cropping(is_first_image=False)
|
|
1009
|
+
self.fast_image_segmentation(is_first_image=False)
|
|
1010
|
+
if self.last_image.binary_image.sum() == 0:
|
|
1011
|
+
self.fast_image_segmentation(is_first_image=False)
|
|
1012
|
+
among = np.nonzero(self.last_image.binary_image)
|
|
1013
|
+
not_among = np.nonzero(1 - self.last_image.binary_image)
|
|
1014
|
+
# Use the converted image to tell if the background is lighter, for analysis purposes
|
|
1015
|
+
if self.last_image.image[among[0], among[1]].mean() > self.last_image.image[not_among[0], not_among[1]].mean():
|
|
1016
|
+
self.vars['lighter_background'] = False
|
|
1017
|
+
# Use the original image to tell if the background is lighter, for display purposes
|
|
1018
|
+
if self.last_image.bgr[among[0], among[1], ...].mean() > self.last_image.bgr[not_among[0], not_among[1], ...].mean():
|
|
1019
|
+
self.vars['contour_color'] = 255
|
|
1020
|
+
if self.vars['origin_state'] == "invisible":
|
|
1021
|
+
binary_image = deepcopy(self.first_image.binary_image)
|
|
1022
|
+
self.first_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
|
|
1023
|
+
None, None, subtract_background=None,
|
|
1024
|
+
subtract_background2=None,
|
|
1025
|
+
grid_segmentation=self.vars["grid_segmentation"],
|
|
1026
|
+
filter_spec=self.vars["filter_spec"])
|
|
1027
|
+
covered_values = self.first_image.image[np.nonzero(binary_image)]
|
|
1028
|
+
if self.vars['lighter_background']:
|
|
1029
|
+
if np.max(covered_values) < 255:
|
|
1030
|
+
self.vars['luminosity_threshold'] = np.max(covered_values) + 1
|
|
1031
|
+
else:
|
|
1032
|
+
self.vars['luminosity_threshold'] = 127
|
|
1033
|
+
else:
|
|
1034
|
+
if np.min(covered_values) > 0:
|
|
1035
|
+
self.vars['luminosity_threshold'] = np.min(covered_values) - 1
|
|
1036
|
+
else:
|
|
1037
|
+
self.vars['luminosity_threshold'] = 127
|
|
1038
|
+
|
|
1039
|
+
def load_one_arena(self, arena):
|
|
1040
|
+
#self.delineate_each_arena()
|
|
1041
|
+
#self.choose_color_space_combination()
|
|
1042
|
+
add_to_c = 1
|
|
1043
|
+
self.one_arena_done = True
|
|
1044
|
+
i = np.nonzero(self.vars['analyzed_individuals'] == arena)[0][0]
|
|
1045
|
+
self.converted_video = np.zeros(
|
|
1046
|
+
(len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c),
|
|
1047
|
+
dtype=float)
|
|
1048
|
+
if not self.vars['already_greyscale']:
|
|
1049
|
+
self.visu = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c, 3), dtype=np.uint8)
|
|
1050
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1051
|
+
self.converted_video2 = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c), dtype=float)
|
|
1052
|
+
first_dict = TDict()
|
|
1053
|
+
second_dict = TDict()
|
|
1054
|
+
c_spaces = []
|
|
1055
|
+
for k, v in self.vars['convert_for_motion'].items():
|
|
1056
|
+
if k != 'logical' and v.sum() > 0:
|
|
1057
|
+
if k[-1] != '2':
|
|
1058
|
+
first_dict[k] = v
|
|
1059
|
+
c_spaces.append(k)
|
|
1060
|
+
else:
|
|
1061
|
+
second_dict[k[:-1]] = v
|
|
1062
|
+
c_spaces.append(k[:-1])
|
|
1063
|
+
prev_img = None
|
|
1064
|
+
background = None
|
|
1065
|
+
background2 = None
|
|
1066
|
+
is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
|
|
1067
|
+
for image_i, image_name in enumerate(self.data_list):
|
|
1068
|
+
img = read_and_rotate(image_name, prev_img, self.all['raw_images'], is_landscape, self.first_image.crop_coord)
|
|
1069
|
+
prev_img = deepcopy(img)
|
|
1070
|
+
# if self.videos.first_image.crop_coord is not None:
|
|
1071
|
+
# img = img[self.videos.first_image.crop_coord[0]:self.videos.first_image.crop_coord[1],
|
|
1072
|
+
# self.videos.first_image.crop_coord[2]:self.videos.first_image.crop_coord[3], :]
|
|
1073
|
+
img = img[self.top[arena - 1]: (self.bot[arena - 1] + add_to_c),
|
|
1074
|
+
self.left[arena - 1]: (self.right[arena - 1] + add_to_c), :]
|
|
1075
|
+
|
|
1076
|
+
if self.vars['already_greyscale']:
|
|
1077
|
+
if self.reduce_image_dim:
|
|
1078
|
+
self.converted_video[image_i, ...] = img[:, :, 0]
|
|
1079
|
+
else:
|
|
1080
|
+
self.converted_video[image_i, ...] = img
|
|
1081
|
+
else:
|
|
1082
|
+
self.visu[image_i, ...] = img
|
|
1083
|
+
if self.vars['subtract_background']:
|
|
1084
|
+
background = self.vars['background_list'][i]
|
|
1085
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1086
|
+
background2 = self.vars['background_list2'][i]
|
|
1087
|
+
greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces, first_dict,
|
|
1088
|
+
second_dict, background, background2,
|
|
1089
|
+
self.vars[
|
|
1090
|
+
'lose_accuracy_to_save_memory'])
|
|
1091
|
+
self.converted_video[image_i, ...] = greyscale_image
|
|
1092
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1093
|
+
self.converted_video2[image_i, ...] = greyscale_image2
|
|
1094
|
+
# csc = OneImageAnalysis(img)
|
|
1095
|
+
# else:
|
|
1096
|
+
# csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
|
|
1097
|
+
# # self.converted_video[image_i, ...] = csc.image
|
|
1098
|
+
# self.converted_video[image_i, ...] = csc.image
|
|
1099
|
+
# if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1100
|
+
# self.converted_video2[image_i, ...] = csc.image2
|
|
1101
|
+
|
|
1102
|
+
# write_video(self.visu, f"ind_{arena}{self.vars['videos_extension']}", is_color=True, fps=1)
|
|
1103
|
+
|
|
1104
|
+
def update_output_list(self):
|
|
1105
|
+
self.vars['descriptors'] = {}
|
|
1106
|
+
# self.vars['descriptors']['final_area'] = True # [False, True, False]
|
|
1107
|
+
# if self.vars['first_move_threshold'] is not None:
|
|
1108
|
+
# self.vars['descriptors']['first_move'] = True # [False, True, False]
|
|
1109
|
+
|
|
1110
|
+
# if self.vars['iso_digi_analysis']:
|
|
1111
|
+
# self.vars['descriptors']['is_growth_isotropic'] = True # [False, True, False]
|
|
1112
|
+
# self.vars['descriptors']['iso_digi_transi'] = True # [False, True, False]
|
|
1113
|
+
|
|
1114
|
+
# if self.vars['oscilacyto_analysis']:
|
|
1115
|
+
# self.vars['descriptors']['max_magnitude'] = True#[False, True, False]
|
|
1116
|
+
# self.vars['descriptors']['frequency_of_max_magnitude'] = True#[False, True, False]
|
|
1117
|
+
for descriptor in self.all['descriptors'].keys():
|
|
1118
|
+
if descriptor == 'standard_deviation_xy':
|
|
1119
|
+
self.vars['descriptors']['standard_deviation_x'] = self.all['descriptors'][descriptor]
|
|
1120
|
+
self.vars['descriptors']['standard_deviation_y'] = self.all['descriptors'][descriptor]
|
|
1121
|
+
elif descriptor == 'skewness_xy':
|
|
1122
|
+
self.vars['descriptors']['skewness_x'] = self.all['descriptors'][descriptor]
|
|
1123
|
+
self.vars['descriptors']['skewness_y'] = self.all['descriptors'][descriptor]
|
|
1124
|
+
elif descriptor == 'kurtosis_xy':
|
|
1125
|
+
self.vars['descriptors']['kurtosis_x'] = self.all['descriptors'][descriptor]
|
|
1126
|
+
self.vars['descriptors']['kurtosis_y'] = self.all['descriptors'][descriptor]
|
|
1127
|
+
elif descriptor == 'major_axes_len_and_angle':
|
|
1128
|
+
self.vars['descriptors']['major_axis_len'] = self.all['descriptors'][descriptor]
|
|
1129
|
+
self.vars['descriptors']['minor_axis_len'] = self.all['descriptors'][descriptor]
|
|
1130
|
+
self.vars['descriptors']['axes_orientation'] = self.all['descriptors'][descriptor]
|
|
1131
|
+
else:
|
|
1132
|
+
if np.isin(descriptor, list(from_shape_descriptors_class.keys())):
|
|
1133
|
+
|
|
1134
|
+
self.vars['descriptors'][descriptor] = self.all['descriptors'][descriptor]
|
|
1135
|
+
self.vars['descriptors']['cluster_number'] = self.vars['oscilacyto_analysis']
|
|
1136
|
+
self.vars['descriptors']['mean_cluster_area'] = self.vars['oscilacyto_analysis']
|
|
1137
|
+
self.vars['descriptors']['vertices_number'] = self.vars['network_analysis']
|
|
1138
|
+
self.vars['descriptors']['edges_number'] = self.vars['network_analysis']
|
|
1139
|
+
self.vars['descriptors']['newly_explored_area'] = self.vars['do_fading']
|
|
1140
|
+
""" if self.vars['descriptors_means']:
|
|
1141
|
+
self.vars['output_list'] += [f'{descriptor}_mean']
|
|
1142
|
+
self.vars['output_list'] += [f'{descriptor}_std']
|
|
1143
|
+
if self.vars['descriptors_regressions']:
|
|
1144
|
+
self.vars['output_list'] += [f"{descriptor}_reg_start"]
|
|
1145
|
+
self.vars['output_list'] += [f"{descriptor}_reg_end"]
|
|
1146
|
+
self.vars['output_list'] += [f'{descriptor}_slope']
|
|
1147
|
+
self.vars['output_list'] += [f'{descriptor}_intercept']
|
|
1148
|
+
"""
|
|
1149
|
+
|
|
1150
|
+
def update_available_core_nb(self, image_bit_number=256, video_bit_number=140):# video_bit_number=176
|
|
1151
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
1152
|
+
video_bit_number -= 56
|
|
1153
|
+
if self.vars['convert_for_motion']['logical'] != 'None':
|
|
1154
|
+
video_bit_number += 64
|
|
1155
|
+
if self.vars['lose_accuracy_to_save_memory']:
|
|
1156
|
+
video_bit_number -= 56
|
|
1157
|
+
if self.vars['already_greyscale']:
|
|
1158
|
+
video_bit_number -= 64
|
|
1159
|
+
if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
|
|
1160
|
+
video_bit_number += 16
|
|
1161
|
+
image_bit_number += 128
|
|
1162
|
+
if self.vars['save_coord_network'] or self.vars['network_analysis']:
|
|
1163
|
+
video_bit_number += 8
|
|
1164
|
+
image_bit_number += 64
|
|
1165
|
+
|
|
1166
|
+
if isinstance(self.bot, list):
|
|
1167
|
+
one_image_memory = np.multiply((self.bot[0] - self.top[0] + 1),
|
|
1168
|
+
(self.right[0] - self.left[0] + 1)).max().astype(np.uint64)
|
|
1169
|
+
else:
|
|
1170
|
+
one_image_memory = np.multiply((self.bot - self.top + 1).astype(np.uint64),
|
|
1171
|
+
(self.right - self.left + 1).astype(np.uint64)).max()
|
|
1172
|
+
one_video_memory = self.vars['img_number'] * one_image_memory
|
|
1173
|
+
necessary_memory = (one_image_memory * image_bit_number + one_video_memory * video_bit_number) * 1.16415e-10
|
|
1174
|
+
available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
|
|
1175
|
+
max_repeat_in_memory = (available_memory // necessary_memory).astype(np.uint16)
|
|
1176
|
+
if max_repeat_in_memory > 1:
|
|
1177
|
+
max_repeat_in_memory = np.max(((available_memory // (2 * necessary_memory)).astype(np.uint16), 1))
|
|
1178
|
+
# if sys.platform.startswith('win'):
|
|
1179
|
+
# available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
|
|
1180
|
+
# else:
|
|
1181
|
+
|
|
1182
|
+
self.cores = np.min((self.all['cores'], max_repeat_in_memory))
|
|
1183
|
+
if self.cores > self.sample_number:
|
|
1184
|
+
self.cores = self.sample_number
|
|
1185
|
+
return np.round(np.absolute(available_memory - necessary_memory), 3)
|
|
1186
|
+
|
|
1187
|
+
|
|
1188
|
+
def update_one_row_per_arena(self, i, table_to_add):
|
|
1189
|
+
if not self.vars['several_blob_per_arena']:
|
|
1190
|
+
if self.one_row_per_arena is None:
|
|
1191
|
+
self.one_row_per_arena = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(table_to_add)), dtype=float),
|
|
1192
|
+
columns=table_to_add.keys())
|
|
1193
|
+
self.one_row_per_arena.iloc[i, :] = table_to_add.values()
|
|
1194
|
+
|
|
1195
|
+
|
|
1196
|
+
def update_one_row_per_frame(self, i, j, table_to_add):
|
|
1197
|
+
if not self.vars['several_blob_per_arena']:
|
|
1198
|
+
if self.one_row_per_frame is None:
|
|
1199
|
+
self.one_row_per_frame = pd.DataFrame(index=range(len(self.vars['analyzed_individuals']) *
|
|
1200
|
+
self.vars['img_number']),
|
|
1201
|
+
columns=table_to_add.keys())
|
|
1202
|
+
|
|
1203
|
+
self.one_row_per_frame.iloc[i:j, :] = table_to_add
|
|
1204
|
+
|
|
1205
|
+
|
|
1206
|
+
def instantiate_tables(self):
|
|
1207
|
+
self.update_output_list()
|
|
1208
|
+
logging.info("Instantiate results tables and validation images")
|
|
1209
|
+
self.one_row_per_oscillating_cluster = None
|
|
1210
|
+
self.fractal_box_sizes = None
|
|
1211
|
+
# if self.vars['oscilacyto_analysis']:
|
|
1212
|
+
# self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
|
|
1213
|
+
# 'edge_distance'])
|
|
1214
|
+
# if self.vars['fractal_analysis']:
|
|
1215
|
+
# self.fractal_box_sizes = pd.DataFrame(columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
|
|
1216
|
+
|
|
1217
|
+
if self.vars['already_greyscale']:
|
|
1218
|
+
if len(self.first_image.bgr.shape) == 2:
|
|
1219
|
+
self.first_image.bgr = np.stack((self.first_image.bgr, self.first_image.bgr, self.first_image.bgr), axis=2).astype(np.uint8)
|
|
1220
|
+
if len(self.last_image.bgr.shape) == 2:
|
|
1221
|
+
self.last_image.bgr = np.stack((self.last_image.bgr, self.last_image.bgr, self.last_image.bgr), axis=2).astype(np.uint8)
|
|
1222
|
+
self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
|
|
1223
|
+
|
|
1224
|
+
def add_analysis_visualization_to_first_and_last_images(self, i, first_visualization, last_visualization):
|
|
1225
|
+
cr = ((self.top[i], self.bot[i] + 1),
|
|
1226
|
+
(self.left[i], self.right[i] + 1))
|
|
1227
|
+
if self.vars['arena_shape'] == 'circle':
|
|
1228
|
+
ellipse = Ellipse((cr[0][1] - cr[0][0], cr[1][1] - cr[1][0])).create()
|
|
1229
|
+
ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
|
|
1230
|
+
first_visualization *= ellipse
|
|
1231
|
+
self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
|
|
1232
|
+
self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += first_visualization
|
|
1233
|
+
last_visualization *= ellipse
|
|
1234
|
+
self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
|
|
1235
|
+
self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += last_visualization
|
|
1236
|
+
else:
|
|
1237
|
+
self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = first_visualization
|
|
1238
|
+
self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = last_visualization
|
|
1239
|
+
|
|
1240
|
+
|
|
1241
|
+
def save_tables(self):
|
|
1242
|
+
logging.info("Save results tables and validation images")
|
|
1243
|
+
if not self.vars['several_blob_per_arena']:
|
|
1244
|
+
try:
|
|
1245
|
+
self.one_row_per_arena.to_csv("one_row_per_arena.csv", sep=";", index=False, lineterminator='\n')
|
|
1246
|
+
del self.one_row_per_arena
|
|
1247
|
+
except PermissionError:
|
|
1248
|
+
logging.error("Never let one_row_per_arena.csv open when Cellects runs")
|
|
1249
|
+
self.message_from_thread.emit(f"Never let one_row_per_arena.csv open when Cellects runs")
|
|
1250
|
+
try:
|
|
1251
|
+
self.one_row_per_frame.to_csv("one_row_per_frame.csv", sep=";", index=False, lineterminator='\n')
|
|
1252
|
+
del self.one_row_per_frame
|
|
1253
|
+
except PermissionError:
|
|
1254
|
+
logging.error("Never let one_row_per_frame.csv open when Cellects runs")
|
|
1255
|
+
self.message_from_thread.emit(f"Never let one_row_per_frame.csv open when Cellects runs")
|
|
1256
|
+
if self.vars['oscilacyto_analysis']:
|
|
1257
|
+
try:
|
|
1258
|
+
if self.one_row_per_oscillating_cluster is None:
|
|
1259
|
+
self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
|
|
1260
|
+
'edge_distance'])
|
|
1261
|
+
self.one_row_per_oscillating_cluster.to_csv("one_row_per_oscillating_cluster.csv", sep=";", index=False,
|
|
1262
|
+
lineterminator='\n')
|
|
1263
|
+
del self.one_row_per_oscillating_cluster
|
|
1264
|
+
except PermissionError:
|
|
1265
|
+
logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
|
|
1266
|
+
self.message_from_thread.emit(f"Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
|
|
1267
|
+
|
|
1268
|
+
if self.vars['fractal_analysis']:
|
|
1269
|
+
if os.path.isfile(f"oscillating_clusters_temporal_dynamics.h5"):
|
|
1270
|
+
array_names = get_h5_keys(f"oscillating_clusters_temporal_dynamics.h5")
|
|
1271
|
+
arena_fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_names[0])
|
|
1272
|
+
arena_fractal_dynamics = np.hstack((np.repeat(np.uint32(array_names[0][-1]), arena_fractal_dynamics.shape[0]), arena_fractal_dynamics))
|
|
1273
|
+
for array_name in array_names[1:]:
|
|
1274
|
+
fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_name)
|
|
1275
|
+
fractal_dynamics = np.hstack((np.repeat(np.uint32(array_name[-1]), fractal_dynamics.shape[0]), fractal_dynamics))
|
|
1276
|
+
arena_fractal_dynamics = np.vstack((arena_fractal_dynamics, fractal_dynamics))
|
|
1277
|
+
arena_fractal_dynamics = pd.DataFrame(arena_fractal_dynamics, columns=["arena", "time", "cluster_id", "flow", "centroid_y", "centroid_x", "area", "inner_network_area", "box_count_dim", "inner_network_box_count_dim"])
|
|
1278
|
+
arena_fractal_dynamics.to_csv(f"oscillating_clusters_temporal_dynamics.csv", sep=";", index=False,
|
|
1279
|
+
lineterminator='\n')
|
|
1280
|
+
del arena_fractal_dynamics
|
|
1281
|
+
os.remove(f"oscillating_clusters_temporal_dynamics.h5")
|
|
1282
|
+
if self.all['extension'] == '.JPG':
|
|
1283
|
+
extension = '.PNG'
|
|
1284
|
+
else:
|
|
1285
|
+
extension = '.JPG'
|
|
1286
|
+
cv2.imwrite(f"Analysis efficiency, last image{extension}", self.last_image.bgr)
|
|
1287
|
+
cv2.imwrite(
|
|
1288
|
+
f"Analysis efficiency, {np.ceil(self.vars['img_number'] / 10).astype(np.uint64)}th image{extension}",
|
|
1289
|
+
self.first_image.bgr)
|
|
1290
|
+
# self.save_analysis_parameters.to_csv("analysis_parameters.csv", sep=";")
|
|
1291
|
+
|
|
1292
|
+
software_settings = deepcopy(self.vars)
|
|
1293
|
+
for key in ['descriptors', 'analyzed_individuals', 'exif', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
|
|
1294
|
+
software_settings.pop(key, None)
|
|
1295
|
+
global_settings = deepcopy(self.all)
|
|
1296
|
+
for key in ['analyzed_individuals', 'night_mode', 'expert_mode', 'is_auto', 'arena', 'video_option', 'compute_all_options', 'vars', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
|
|
1297
|
+
global_settings.pop(key, None)
|
|
1298
|
+
software_settings.update(global_settings)
|
|
1299
|
+
software_settings = pd.DataFrame.from_dict(software_settings, columns=["Setting"], orient='index')
|
|
1300
|
+
try:
|
|
1301
|
+
software_settings.to_csv("software_settings.csv", sep=";")
|
|
1302
|
+
except PermissionError:
|
|
1303
|
+
logging.error("Never let software_settings.csv open when Cellects runs")
|
|
1304
|
+
self.message_from_thread.emit(f"Never let software_settings.csv open when Cellects runs")
|
|
1305
|
+
|
|
1306
|
+
|
|
1307
|
+
|
|
1308
|
+
# if __name__ == "__main__":
|
|
1309
|
+
# po = ProgramOrganizer()
|
|
1310
|
+
# os.chdir(Path("D:\Directory\Data\Example\Example\Example"))
|
|
1311
|
+
# # po.all['global_pathway'] = Path("C:/Users/APELab/Documents/Aurèle/Cellects/install/Installer_and_example/Example")
|
|
1312
|
+
# po.load_variable_dict()
|
|
1313
|
+
# po.all['global_pathway']
|
|
1314
|
+
# po.load_data_to_run_cellects_quickly()
|
|
1315
|
+
# po.all['global_pathway']
|
|
1316
|
+
# po.save_data_to_run_cellects_quickly()
|