cellects 0.1.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. cellects/__init__.py +0 -0
  2. cellects/__main__.py +49 -0
  3. cellects/config/__init__.py +0 -0
  4. cellects/config/all_vars_dict.py +154 -0
  5. cellects/core/__init__.py +0 -0
  6. cellects/core/cellects_paths.py +30 -0
  7. cellects/core/cellects_threads.py +1464 -0
  8. cellects/core/motion_analysis.py +1931 -0
  9. cellects/core/one_image_analysis.py +1065 -0
  10. cellects/core/one_video_per_blob.py +679 -0
  11. cellects/core/program_organizer.py +1347 -0
  12. cellects/core/script_based_run.py +154 -0
  13. cellects/gui/__init__.py +0 -0
  14. cellects/gui/advanced_parameters.py +1258 -0
  15. cellects/gui/cellects.py +189 -0
  16. cellects/gui/custom_widgets.py +789 -0
  17. cellects/gui/first_window.py +449 -0
  18. cellects/gui/if_several_folders_window.py +239 -0
  19. cellects/gui/image_analysis_window.py +1909 -0
  20. cellects/gui/required_output.py +232 -0
  21. cellects/gui/video_analysis_window.py +656 -0
  22. cellects/icons/__init__.py +0 -0
  23. cellects/icons/cellects_icon.icns +0 -0
  24. cellects/icons/cellects_icon.ico +0 -0
  25. cellects/image_analysis/__init__.py +0 -0
  26. cellects/image_analysis/cell_leaving_detection.py +54 -0
  27. cellects/image_analysis/cluster_flux_study.py +102 -0
  28. cellects/image_analysis/extract_exif.py +61 -0
  29. cellects/image_analysis/fractal_analysis.py +184 -0
  30. cellects/image_analysis/fractal_functions.py +108 -0
  31. cellects/image_analysis/image_segmentation.py +272 -0
  32. cellects/image_analysis/morphological_operations.py +867 -0
  33. cellects/image_analysis/network_functions.py +1244 -0
  34. cellects/image_analysis/one_image_analysis_threads.py +289 -0
  35. cellects/image_analysis/progressively_add_distant_shapes.py +246 -0
  36. cellects/image_analysis/shape_descriptors.py +981 -0
  37. cellects/utils/__init__.py +0 -0
  38. cellects/utils/formulas.py +881 -0
  39. cellects/utils/load_display_save.py +1016 -0
  40. cellects/utils/utilitarian.py +516 -0
  41. cellects-0.1.0.dev1.dist-info/LICENSE.odt +0 -0
  42. cellects-0.1.0.dev1.dist-info/METADATA +131 -0
  43. cellects-0.1.0.dev1.dist-info/RECORD +46 -0
  44. cellects-0.1.0.dev1.dist-info/WHEEL +5 -0
  45. cellects-0.1.0.dev1.dist-info/entry_points.txt +2 -0
  46. cellects-0.1.0.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1347 @@
1
+ #!/usr/bin/env python3
2
+ """This file contains the class constituting the link between the graphical interface and the computations
3
+ First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
4
+ and the background.
5
+ Second, Cellects automatically delineate each arena.
6
+ Third, Cellects write one video for each arena.
7
+ Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
8
+ Fifth, Cellects extract variables and store them in .csv files.
9
+ """
10
+
11
+ import logging
12
+ import os
13
+ import pickle
14
+ import sys
15
+ from copy import deepcopy
16
+ import cv2
17
+ from numba.typed import Dict as TDict
18
+ import pandas as pd
19
+ import numpy as np
20
+ from psutil import virtual_memory
21
+ from pathlib import Path
22
+ import natsort
23
+ from cellects.image_analysis.image_segmentation import generate_color_space_combination
24
+ from cellects.image_analysis.extract_exif import extract_time # named exif
25
+ from cellects.image_analysis.one_image_analysis_threads import ProcessFirstImage
26
+ from cellects.core.one_image_analysis import OneImageAnalysis
27
+ from cellects.utils.load_display_save import PickleRick, read_and_rotate, readim, is_raw_image, read_h5_array, get_h5_keys
28
+ from cellects.utils.utilitarian import insensitive_glob, vectorized_len
29
+ from cellects.image_analysis.morphological_operations import Ellipse, cross_33
30
+ from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
31
+ from cellects.core.motion_analysis import MotionAnalysis
32
+ from cellects.core.one_video_per_blob import OneVideoPerBlob
33
+ from cellects.config.all_vars_dict import DefaultDicts
34
+ from cellects.image_analysis.shape_descriptors import from_shape_descriptors_class
35
+
36
+
37
+ class ProgramOrganizer:
38
+ def __init__(self):
39
+ """
40
+ This class stores all variables required for analysis as well as
41
+ methods to process it.
42
+ Global variables (i.e. that does not concern the MotionAnalysis)
43
+ are directly stored in self.
44
+ Variables used in the MotionAnalysis class are stored in a dict
45
+ called self.vars
46
+ """
47
+ if os.path.isfile('PickleRick.pkl'):
48
+ os.remove('PickleRick.pkl')
49
+ if os.path.isfile('PickleRick0.pkl'):
50
+ os.remove('PickleRick0.pkl')
51
+ if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick.pkl')):
52
+ os.remove(Path(CELLECTS_DIR.parent / 'PickleRick.pkl'))
53
+ if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl')):
54
+ os.remove(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl'))
55
+ # self.delineation_number = 0
56
+ self.one_arena_done: bool = False
57
+ self.reduce_image_dim: bool = False
58
+ self.first_exp_ready_to_run: bool = False
59
+ self.data_to_save = {'first_image': False, 'coordinates': False, 'exif': False, 'vars': False}
60
+ self.videos = None
61
+ self.motion = None
62
+ self.analysis_instance = None
63
+ self.computed_video_options = np.zeros(5, bool)
64
+ self.vars = {}
65
+ self.all = {}
66
+ self.all['folder_list'] = []
67
+ self.all['first_detection_frame'] = 1
68
+ self.first_im = None
69
+ self.last_im = None
70
+ self.vars['background_list'] = []
71
+ self.starting_blob_hsize_in_pixels = None
72
+ self.vars['first_move_threshold'] = None
73
+ self.vars['convert_for_origin'] = None
74
+ self.vars['convert_for_motion'] = None
75
+ self.current_combination_id = 0
76
+ self.data_list = []
77
+ self.one_row_per_arena = None
78
+ self.one_row_per_frame = None
79
+ self.one_row_per_oscillating_cluster = None
80
+ # self.fractal_box_sizes = None
81
+
82
+ def save_variable_dict(self):
83
+ logging.info("Save the parameters dictionaries in the Cellects folder")
84
+ self.all['vars'] = self.vars
85
+ all_vars = deepcopy(self.all)
86
+ if not self.all['keep_cell_and_back_for_all_folders']:
87
+ all_vars['bio_mask'] = None
88
+ all_vars['back_mask'] = None
89
+ pickle_rick = PickleRick(0)
90
+ pickle_rick.write_file(all_vars, ALL_VARS_PKL_FILE)
91
+
92
+ def load_variable_dict(self):
93
+ # loading_succeed: bool = False
94
+ # if os.path.isfile('Data to run Cellects quickly.pkl'):
95
+ # try:
96
+ # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
97
+ # data_to_run_cellects_quickly = pickle.load(fileopen)
98
+ # if 'vars' in data_to_run_cellects_quickly:
99
+ # self.vars = data_to_run_cellects_quickly['vars']
100
+ # loading_succeed = True
101
+ # logging.info("Success to load vars from the data folder")
102
+ # except EOFError:
103
+ # logging.error("Pickle error: will try to load vars from the Cellects folder")
104
+
105
+ if os.path.isfile(ALL_VARS_PKL_FILE):
106
+ logging.info("Load the parameters from all_vars.pkl in the config of the Cellects folder")
107
+ try: # NEW
108
+ with open(ALL_VARS_PKL_FILE, 'rb') as fileopen: # NEW
109
+ self.all = pickle.load(fileopen) # NEW
110
+ self.vars = self.all['vars']
111
+ self.update_data()
112
+ logging.info("Success to load the parameters dictionaries from the Cellects folder")
113
+ logging.info(os.getcwd())
114
+ except Exception as exc: # NEW
115
+ logging.error(f"Initialize default parameters because error: {exc}") # NEW
116
+ default_dicts = DefaultDicts() # NEW
117
+ self.all = default_dicts.all # NEW
118
+ self.vars = default_dicts.vars # NEW
119
+
120
+ # pickle_rick = PickleRick(0)
121
+ # self.all = pickle_rick.read_file(ALL_VARS_PKL_FILE)
122
+ # self.vars = self.all['vars']
123
+ # logging.info("Success to load the parameters dictionaries from the Cellects folder")
124
+ # logging.info(os.getcwd())
125
+ else:
126
+ logging.info("Initialize default parameters")
127
+ default_dicts = DefaultDicts()
128
+ self.all = default_dicts.all
129
+ self.vars = default_dicts.vars
130
+
131
+ # self.initialize_all_dict()
132
+ # self.initialize_vars_dict()
133
+
134
+ # if os.path.isfile(ALL_VARS_PKL_FILE):
135
+ # try:
136
+ # with open(ALL_VARS_PKL_FILE, 'rb') as fileopen:
137
+ # self.all = pickle.load(fileopen)
138
+ # self.vars = self.all['vars']
139
+ # logging.info("Success to load the parameters dictionaries from the Cellects folder")
140
+ # except EOFError:
141
+ # logging.error("Pickle error, reinitialize the parameters dictionaries")
142
+ # self.initialize_all_dict()
143
+ # self.initialize_vars_dict()
144
+ # # self.all = np.load(
145
+ # # soft_path + "\\all_vars.npy", allow_pickle='TRUE').item()
146
+ # else:
147
+ # self.initialize_all_dict()
148
+ # self.initialize_vars_dict()
149
+
150
+ if self.all['cores'] == 1:
151
+ self.all['cores'] = os.cpu_count() - 1
152
+ # if self.vars['min_ram_free'] == 0.87:
153
+ # self.vars['min_ram_free'] = (
154
+ # 20709376) * 0.10
155
+
156
+ def analyze_without_gui(self):
157
+ # Eventually load "all" dir before calling this function
158
+ # self = po
159
+ # if len(self.all['folder_list']) == 0:
160
+ # folder_list = "/"
161
+ # else:
162
+ # folder_list = self.all['folder_list']
163
+ # for exp_i, folder_name in enumerate(folder_list):
164
+ # # exp_i = 0 ; folder_name = folder_list
165
+
166
+ self=ProgramOrganizer()
167
+ self.load_variable_dict()
168
+ # dd = DefaultDicts()
169
+ # self.all = dd.all
170
+ # self.vars = dd.vars
171
+ self.all['global_pathway'] = "/Users/Directory/Data/dossier1"
172
+ self.all['first_folder_sample_number'] = 6
173
+ # self.all['global_pathway'] = "D:\Directory\Data\Audrey\dosier1"
174
+ # self.all['first_folder_sample_number'] = 6
175
+ # self.all['radical'] = "IMG"
176
+ # self.all['extension'] = ".jpg"
177
+ # self.all['im_or_vid'] = 0
178
+ self.look_for_data()
179
+ self.load_data_to_run_cellects_quickly()
180
+ if not self.first_exp_ready_to_run:
181
+ self.get_first_image()
182
+ self.fast_image_segmentation(True)
183
+ # self.first_image.find_first_im_csc(sample_number=self.sample_number,
184
+ # several_blob_per_arena=None,
185
+ # spot_shape=None, spot_size=None,
186
+ # kmeans_clust_nb=2,
187
+ # biomask=None, backmask=None,
188
+ # color_space_dictionaries=None,
189
+ # carefully=True)
190
+ self.cropping(is_first_image=True)
191
+ self.get_average_pixel_size()
192
+ self.delineate_each_arena()
193
+ self.get_background_to_subtract()
194
+ self.get_origins_and_backgrounds_lists()
195
+ self.get_last_image()
196
+ self.fast_image_segmentation(is_first_image=False)
197
+ self.find_if_lighter_background()
198
+ self.extract_exif()
199
+ self.update_output_list()
200
+ look_for_existing_videos = insensitive_glob('ind_' + '*' + '.npy')
201
+ there_already_are_videos = len(look_for_existing_videos) == len(self.vars['analyzed_individuals'])
202
+ logging.info(
203
+ f"{len(look_for_existing_videos)} .npy video files found for {len(self.vars['analyzed_individuals'])} arenas to analyze")
204
+ do_write_videos = not there_already_are_videos or (
205
+ there_already_are_videos and self.all['overwrite_unaltered_videos'])
206
+ if do_write_videos:
207
+ self.videos = OneVideoPerBlob(self.first_image, self.starting_blob_hsize_in_pixels, self.all['raw_images'])
208
+ self.videos.left = self.left
209
+ self.videos.right = self.right
210
+ self.videos.top = self.top
211
+ self.videos.bot = self.bot
212
+ self.videos.first_image.shape_number = self.sample_number
213
+ self.videos.write_videos_as_np_arrays(
214
+ self.data_list, self.vars['min_ram_free'], not self.vars['already_greyscale'], self.reduce_image_dim)
215
+ self.instantiate_tables()
216
+
217
+ i=1
218
+ show_seg=True
219
+
220
+ if os.path.isfile(f"coord_specimen{i + 1}_t720_y1475_x1477.npy"):
221
+ binary_coord = np.load(f"coord_specimen{i + 1}_t720_y1475_x1477.npy")
222
+ l = [i, i + 1, self.vars, False, False, show_seg, None]
223
+ sav = self
224
+ self = MotionAnalysis(l)
225
+ self.binary = np.zeros((720, 1475, 1477), dtype=np.uint8)
226
+ self.binary[binary_coord[0, :], binary_coord[1, :], binary_coord[2, :]] = 1
227
+ else:
228
+ l = [i, i + 1, self.vars, True, False, show_seg, None]
229
+ sav = self
230
+ self = MotionAnalysis(l)
231
+ self.get_descriptors_from_binary()
232
+ self.detect_growth_transitions()
233
+ # self.networks_detection(show_seg)
234
+ self.study_cytoscillations(show_seg)
235
+
236
+ # for i, arena in enumerate(self.vars['analyzed_individuals']):
237
+ # l = [i, i + 1, self.vars, True, False, False, None]
238
+ # analysis_i = MotionAnalysis(l)
239
+ # self.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
240
+ # analysis_i.efficiency_test_2)
241
+ # self.save_tables()
242
+ #
243
+ # self = MotionAnalysis(l)
244
+ # l = [5, 6, self.vars, True, False, False, None]
245
+ # sav=self
246
+ # self.get_descriptors_from_binary()
247
+ # self.detect_growth_transitions()
248
+
249
+ def look_for_data(self):
250
+ # global_pathway = 'I:\Directory\Tracking_data\generalization_and_potentiation\drop_nak1'
251
+ os.chdir(Path(self.all['global_pathway']))
252
+ logging.info(f"Dir: {self.all['global_pathway']}")
253
+ self.data_list = insensitive_glob(
254
+ self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
255
+ self.data_list = insensitive_glob(self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
256
+ lengths = vectorized_len(self.data_list)
257
+ if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
258
+ logging.error(f"File names present strong variations and cannot be correctly sorted.")
259
+ self.data_list = natsort.natsorted(self.data_list)
260
+ self.all['folder_list'] = []
261
+ self.all['folder_number'] = 1
262
+ if len(self.data_list) == 0:
263
+ content = os.listdir()
264
+ for obj in content:
265
+ if not os.path.isfile(obj):
266
+ data_list = insensitive_glob(obj + "/" + self.all['radical'] + '*' + self.all['extension'])
267
+ if len(data_list) > 0:
268
+ self.all['folder_list'].append(obj)
269
+ self.all['folder_number'] += 1
270
+ self.all['folder_list'] = np.sort(self.all['folder_list'])
271
+
272
+ if isinstance(self.all['sample_number_per_folder'], int) or len(self.all['sample_number_per_folder']) == 1:
273
+ self.all['sample_number_per_folder'] = np.repeat(self.all['sample_number_per_folder'],
274
+ self.all['folder_number'])
275
+ else:
276
+ # if isinstance(self.all['sample_number_per_folder'], int):
277
+ # self.sample_number = self.all['sample_number_per_folder']
278
+ # else:
279
+ self.sample_number = self.all['first_folder_sample_number']
280
+ # if len(self.all['folder_list']) > 0:
281
+ # self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
282
+ # else:
283
+ # self.all['folder_list'] = []
284
+ # self.sample_number = self.all['sample_number_per_folder']
285
+
286
+ def update_folder_id(self, sample_number, folder_name=""):
287
+ os.chdir(Path(self.all['global_pathway']) / folder_name)
288
+ self.data_list = insensitive_glob(
289
+ self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
290
+ # Sorting is necessary when some modifications (like rotation) modified the last modification date
291
+ lengths = vectorized_len(self.data_list)
292
+ if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
293
+ logging.error(f"File names present strong variations and cannot be correctly sorted.")
294
+ self.data_list = natsort.natsorted(self.data_list)
295
+ if self.all['im_or_vid'] == 1:
296
+ self.sample_number = len(self.data_list)
297
+ else:
298
+ self.vars['img_number'] = len(self.data_list)
299
+ self.sample_number = sample_number
300
+ if len(self.vars['analyzed_individuals']) != sample_number:
301
+ self.vars['analyzed_individuals'] = np.arange(sample_number) + 1
302
+
303
+ def load_data_to_run_cellects_quickly(self):
304
+ current_global_pathway = self.all['global_pathway']
305
+ folder_number = self.all['folder_number']
306
+ if folder_number > 1:
307
+ folder_list = deepcopy(self.all['folder_list'])
308
+ sample_number_per_folder = deepcopy(self.all['sample_number_per_folder'])
309
+
310
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
311
+ pickle_rick = PickleRick()
312
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
313
+ if data_to_run_cellects_quickly is None:
314
+ data_to_run_cellects_quickly = {}
315
+
316
+ # try:
317
+ # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
318
+ # data_to_run_cellects_quickly = pickle.load(fileopen)
319
+ # except pickle.UnpicklingError:
320
+ # logging.error("Pickle error")
321
+ # data_to_run_cellects_quickly = {}
322
+ if ('validated_shapes' in data_to_run_cellects_quickly) and ('coordinates' in data_to_run_cellects_quickly) and ('all' in data_to_run_cellects_quickly):
323
+ logging.info("Success to load Data to run Cellects quickly.pkl from the user chosen directory")
324
+ self.all = data_to_run_cellects_quickly['all']
325
+ # If you want to add a new variable, first run an updated version of all_vars_dict,
326
+ # then put a breakpoint here and run the following + self.save_data_to_run_cellects_quickly() :
327
+ # self.all['vars']['lose_accuracy_to_save_memory'] = False
328
+ self.vars = self.all['vars']
329
+ self.update_data()
330
+ print(self.vars['convert_for_motion'])
331
+ folder_changed = False
332
+ if current_global_pathway != self.all['global_pathway']:
333
+ folder_changed = True
334
+ logging.info(
335
+ "Although the folder is ready, it is not at the same place as it was during creation, updating")
336
+ self.all['global_pathway'] = current_global_pathway
337
+ if folder_number > 1:
338
+ self.all['global_pathway'] = current_global_pathway
339
+ self.all['folder_list'] = folder_list
340
+ self.all['folder_number'] = folder_number
341
+ self.all['sample_number_per_folder'] = sample_number_per_folder
342
+
343
+ if len(self.data_list) == 0:
344
+ self.look_for_data()
345
+ if folder_changed and folder_number > 1 and len(self.all['folder_list']) > 0:
346
+ self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
347
+ self.get_first_image()
348
+ self.get_last_image()
349
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = data_to_run_cellects_quickly[
350
+ 'coordinates']
351
+ if self.all['automatically_crop']:
352
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
353
+ logging.info("Crop first image")
354
+ self.first_image.automatically_crop(self.first_image.crop_coord)
355
+ logging.info("Crop last image")
356
+ self.last_image.automatically_crop(self.first_image.crop_coord)
357
+ else:
358
+ self.first_image.crop_coord = None
359
+ # self.cropping(True)
360
+ # self.cropping(False)
361
+ self.first_image.validated_shapes = data_to_run_cellects_quickly['validated_shapes']
362
+ self.first_image.im_combinations = []
363
+ self.current_combination_id = 0
364
+ self.first_image.im_combinations.append({})
365
+ self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
366
+ self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
367
+ self.first_image.im_combinations[self.current_combination_id]['shape_number'] = data_to_run_cellects_quickly['shape_number']
368
+
369
+ self.first_exp_ready_to_run = True
370
+ print(f"Overwrite is {self.all['overwrite_unaltered_videos']}")
371
+ if self.vars['subtract_background'] and len(self.vars['background_list']) == 0:
372
+ self.first_exp_ready_to_run = False
373
+ else:
374
+ self.first_exp_ready_to_run = False
375
+ else:
376
+ self.first_exp_ready_to_run = False
377
+ if self.first_exp_ready_to_run:
378
+ logging.info("The current (or the first) folder is ready to run")
379
+ else:
380
+ logging.info("The current (or the first) folder is not ready to run")
381
+
382
+ def update_data(self):
383
+ dd = DefaultDicts()
384
+ all = len(dd.all) != len(self.all)
385
+ vars = len(dd.vars) != len(self.vars)
386
+ all_desc = len(dd.all['descriptors']) != len(self.all['descriptors'])
387
+ vars_desc = len(dd.vars['descriptors']) != len(self.vars['descriptors'])
388
+ if all:
389
+ for key, val in dd.all.items():
390
+ if not key in self.all:
391
+ self.all[key] = val
392
+ if vars:
393
+ for key, val in dd.vars.items():
394
+ if not key in self.vars:
395
+ self.vars[key] = val
396
+ if all_desc:
397
+ for key, val in dd.all['descriptors'].items():
398
+ if not key in self.all['descriptors']:
399
+ self.all['descriptors'][key] = val
400
+ if vars_desc:
401
+ for key, val in dd.vars['descriptors'].items():
402
+ if not key in self.vars['descriptors']:
403
+ self.vars['descriptors'][key] = val
404
+
405
+ def save_data_to_run_cellects_quickly(self, new_one_if_does_not_exist=True):
406
+ data_to_run_cellects_quickly = None
407
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
408
+ logging.info("Update -Data to run Cellects quickly.pkl- in the user chosen directory")
409
+ pickle_rick = PickleRick()
410
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
411
+ if data_to_run_cellects_quickly is None:
412
+ logging.error("Failed to load Data to run Cellects quickly.pkl before update. Abort saving.")
413
+
414
+ # try:
415
+ # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
416
+ # data_to_run_cellects_quickly = pickle.load(fileopen)
417
+ # except pickle.UnpicklingError:
418
+ # logging.error("Pickle error")
419
+ # data_to_run_cellects_quickly = {}
420
+ else:
421
+ if new_one_if_does_not_exist:
422
+ logging.info("Create Data to run Cellects quickly.pkl in the user chosen directory")
423
+ data_to_run_cellects_quickly = {}
424
+ if data_to_run_cellects_quickly is not None:
425
+ if self.data_to_save['first_image']:
426
+ data_to_run_cellects_quickly['validated_shapes'] = self.first_image.im_combinations[self.current_combination_id]['binary_image']
427
+ data_to_run_cellects_quickly['shape_number'] = self.first_image.im_combinations[self.current_combination_id]['shape_number']
428
+ # data_to_run_cellects_quickly['converted_image'] = self.first_image.im_combinations[self.current_combination_id]['converted_image']
429
+ if self.data_to_save['coordinates']:
430
+ data_to_run_cellects_quickly['coordinates'] = self.list_coordinates()
431
+ logging.info("When they exist, do overwrite unaltered video")
432
+ self.all['overwrite_unaltered_videos'] = True
433
+ if self.data_to_save['exif']:
434
+ self.vars['exif'] = self.extract_exif()
435
+ # data_to_run_cellects_quickly['exif'] = self.extract_exif()
436
+ # if self.data_to_save['background_and_origin_list']:
437
+ # logging.info(f"Origin shape is {self.vars['origin_list'][0].shape}")
438
+ # data_to_run_cellects_quickly['background_and_origin_list'] = [self.vars['origin_list'], self.vars['background_list'], self.vars['background_list2']]
439
+ self.all['vars'] = self.vars
440
+ print(self.vars['convert_for_motion'])
441
+ data_to_run_cellects_quickly['all'] = self.all
442
+ # data_to_run_cellects_quickly['all']['vars']['origin_state'] = "fluctuating"
443
+ pickle_rick = PickleRick()
444
+ pickle_rick.write_file(data_to_run_cellects_quickly, 'Data to run Cellects quickly.pkl')
445
+
446
+ def list_coordinates(self):
447
+ if self.first_image.crop_coord is None:
448
+ self.first_image.crop_coord = [0, self.first_image.image.shape[0], 0,
449
+ self.first_image.image.shape[1]]
450
+ videos_coordinates = self.first_image.crop_coord + [self.left, self.right, self.top, self.bot]
451
+ return videos_coordinates
452
+
453
+ def extract_exif(self):
454
+ if self.all['im_or_vid'] == 1:
455
+ timings = np.arange(self.vars['dims'][0])
456
+ else:
457
+ if sys.platform.startswith('win'):
458
+ pathway = os.getcwd() + '\\'
459
+ else:
460
+ pathway = os.getcwd() + '/'
461
+ arbitrary_time_step: bool = True
462
+ if self.all['extract_time_interval']:
463
+ self.vars['time_step'] = 1
464
+ try:
465
+ timings = extract_time(self.data_list, pathway, self.all['raw_images'])
466
+ timings = timings - timings[0]
467
+ timings = timings / 60
468
+ time_step = np.mean(np.diff(timings))
469
+ digit_nb = 0
470
+ for i in str(time_step):
471
+ if i in {'.'}:
472
+ pass
473
+ elif i in {'0'}:
474
+ digit_nb += 1
475
+ else:
476
+ break
477
+ self.vars['time_step'] = np.round(time_step, digit_nb + 1)
478
+ arbitrary_time_step = False
479
+ except:
480
+ pass
481
+ if arbitrary_time_step:
482
+ timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
483
+ timings = timings - timings[0]
484
+ timings = timings / 60
485
+ return timings
486
+
487
+ #
488
+ # if not os.path.isfile("timings.csv") or self.all['overwrite_cellects_data']:
489
+ # if self.vars['time_step'] == 0:
490
+ # if self.all['im_or_vid'] == 1:
491
+ # savetxt("timings.csv", np.arange(self.vars['dims'][0]), fmt='%10d', delimiter=',')
492
+ # else:
493
+ # if sys.platform.startswith('win'):
494
+ # pathway = os.getcwd() + '\\'
495
+ # else:
496
+ # pathway = os.getcwd() + '/'
497
+ # timings = extract_time(self.data_list, pathway, self.all['raw_images'])
498
+ # timings = timings - timings[0]
499
+ # timings = timings / 60
500
+ # else:
501
+ # timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
502
+ # savetxt("timings.csv", timings, fmt='%1.2f', delimiter=',')
503
+
504
+ def get_first_image(self):
505
+ logging.info("Load first image")
506
+ just_read_image = self.first_im is not None
507
+ self.reduce_image_dim = False
508
+ # just_read_image = self.analysis_instance is not None
509
+ if self.all['im_or_vid'] == 1:
510
+ cap = cv2.VideoCapture(self.data_list[0])
511
+ counter = 0
512
+ if not just_read_image:
513
+ self.sample_number = len(self.data_list)
514
+ self.vars['img_number'] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
515
+ self.analysis_instance = np.zeros(
516
+ [int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
517
+ int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3])
518
+ while cap.isOpened() and counter < 1:
519
+ ret, frame = cap.read()
520
+ if counter == 0:
521
+ self.first_im = frame
522
+ self.analysis_instance[0, ...] = self.first_im
523
+ break
524
+ cap.release()
525
+ elif np.sum(self.analysis_instance[self.all['first_detection_frame'] - 1, ...] == 0):
526
+ cap = cv2.VideoCapture(self.data_list[0])
527
+ counter = 0
528
+ while cap.isOpened() and (counter < self.all['first_detection_frame']):
529
+ ret, frame = cap.read()
530
+ # if self.reduce_image_dim:
531
+ # frame = frame[:, :, 0]
532
+ self.analysis_instance[counter, ...] = frame
533
+ counter += 1
534
+
535
+ cap.release()
536
+ self.first_im = self.analysis_instance[
537
+ self.all['first_detection_frame'] - 1, ...]
538
+ self.vars['dims'] = self.analysis_instance.shape[:3]
539
+
540
+ else:
541
+ self.vars['img_number'] = len(self.data_list)
542
+ self.all['raw_images'] = is_raw_image(self.data_list[0])
543
+ self.first_im = readim(self.data_list[self.all['first_detection_frame'] - 1], self.all['raw_images'])
544
+ # if self.reduce_image_dim:
545
+ # self.first_im = self.first_im[:, :, 0]
546
+ self.vars['dims'] = [self.vars['img_number'], self.first_im.shape[0], self.first_im.shape[1]]
547
+ # self.first_im = readim(self.data_list[0], self.all['raw_images'])
548
+ if len(self.first_im.shape) == 3:
549
+ if np.all(np.equal(self.first_im[:, :, 0], self.first_im[:, :, 1])) and np.all(
550
+ np.equal(self.first_im[:, :, 1], self.first_im[:, :, 2])):
551
+ self.reduce_image_dim = True
552
+ if self.reduce_image_dim:
553
+ self.first_im = self.first_im[:, :, 0]
554
+ if self.all['im_or_vid'] == 1:
555
+ self.analysis_instance = self.analysis_instance[:, :, :, 0]
556
+ self.first_image = OneImageAnalysis(self.first_im)
557
+ self.vars['already_greyscale'] = self.first_image.already_greyscale
558
+ if self.vars['already_greyscale']:
559
+ self.vars["convert_for_origin"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
560
+ self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
561
+ if np.mean((np.mean(self.first_image.image[2, :, ...]), np.mean(self.first_image.image[-3, :, ...]), np.mean(self.first_image.image[:, 2, ...]), np.mean(self.first_image.image[:, -3, ...]))) > 127:
562
+ self.vars['contour_color']: np.uint8 = 0
563
+ else:
564
+ self.vars['contour_color']: np.uint8 = 255
565
+ if self.all['first_detection_frame'] > 1:
566
+ self.vars['origin_state'] = 'invisible'
567
+
568
+ def get_last_image(self):
569
+ logging.info("Load last image")
570
+ if self.all['im_or_vid'] == 1:
571
+ cap = cv2.VideoCapture(self.data_list[0])
572
+ counter = 0
573
+ while cap.isOpened() and counter < self.vars['img_number']:
574
+ ret, frame = cap.read()
575
+ if self.reduce_image_dim:
576
+ frame = frame[:, :, 0]
577
+ self.analysis_instance[-1, ...] = frame
578
+ # if counter == self.vars['img_number'] - 1:
579
+ # if self.reduce_image_dim:
580
+ # frame = frame[:, :, 0]
581
+ # break
582
+ counter += 1
583
+ self.last_im = frame
584
+ cap.release()
585
+ else:
586
+ # self.last_im = readim(self.data_list[-1], self.all['raw_images'])
587
+ is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
588
+ self.last_im = read_and_rotate(self.data_list[-1], self.first_im, self.all['raw_images'], is_landscape)
589
+ if self.reduce_image_dim:
590
+ self.last_im = self.last_im[:, :, 0]
591
+ self.last_image = OneImageAnalysis(self.last_im)
592
+
593
+ # self.message_when_thread_finished.emit("")
594
+ def fast_image_segmentation(self, is_first_image, biomask=None, backmask=None, spot_size=None):
595
+ if is_first_image:
596
+ self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
597
+ self.all["bio_mask"], self.all["back_mask"], subtract_background=None,
598
+ subtract_background2=None, grid_segmentation=False)
599
+ if not self.first_image.drift_correction_already_adjusted:
600
+ self.vars['drift_already_corrected'] = self.first_image.check_if_image_border_attest_drift_correction()
601
+ if self.vars['drift_already_corrected']:
602
+ logging.info("Cellects detected that the images have already been corrected for drift")
603
+ self.first_image.adjust_to_drift_correction(self.vars['convert_for_origin']['logical'])
604
+ if self.vars["grid_segmentation"]:
605
+ self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
606
+ self.all["bio_mask"], self.all["back_mask"],
607
+ subtract_background=None,
608
+ subtract_background2=None, grid_segmentation=True)
609
+
610
+ self.first_image.set_spot_shapes_and_size_confint(self.all['starting_blob_shape'])
611
+ logging.info(self.sample_number)
612
+ process_i = ProcessFirstImage(
613
+ [self.first_image, False, False, None, self.vars['several_blob_per_arena'],
614
+ self.sample_number, spot_size, self.vars["color_number"], self.all["bio_mask"], self.all["back_mask"], None])
615
+ process_i.binary_image = self.first_image.binary_image
616
+ process_i.process_binary_image(use_bio_and_back_masks=True)
617
+
618
+ if self.all["back_mask"] is not None:
619
+ if np.any(process_i.shapes[self.all["back_mask"]]):
620
+ process_i.shapes[np.isin(process_i.shapes, np.unique(process_i.shapes[self.all["back_mask"]]))] = 0
621
+ process_i.validated_shapes = (process_i.shapes > 0).astype(np.uint8)
622
+ if self.all["bio_mask"] is not None:
623
+ process_i.validated_shapes[self.all["bio_mask"]] = 1
624
+ if self.all["back_mask"] is not None or self.all["bio_mask"] is not None:
625
+ process_i.shape_number, process_i.shapes = cv2.connectedComponents(process_i.validated_shapes, connectivity=8)
626
+ process_i.shape_number -= 1
627
+
628
+ self.first_image.validated_shapes = process_i.validated_shapes
629
+ self.first_image.shape_number = process_i.shape_number
630
+ if self.first_image.im_combinations is None:
631
+ self.first_image.im_combinations = []
632
+ self.first_image.im_combinations.append({})
633
+ self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
634
+ self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
635
+ self.first_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.first_image.image).astype(np.uint8)
636
+ self.first_image.im_combinations[self.current_combination_id]['shape_number'] = process_i.shape_number
637
+ # self.first_image.generate_color_space_combination(self.vars['convert_for_origin'], subtract_background)
638
+ else:
639
+ # self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
640
+ # if self.vars['drift_already_corrected']:
641
+ # drift_correction, drift_correction2 = self.last_image.adjust_to_drift_correction()
642
+ # self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
643
+ self.cropping(is_first_image=False)
644
+ self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
645
+ biomask, backmask, self.first_image.subtract_background,
646
+ self.first_image.subtract_background2,
647
+ grid_segmentation=self.vars["grid_segmentation"])
648
+ if self.vars['drift_already_corrected'] and not self.last_image.drift_correction_already_adjusted and not self.vars["grid_segmentation"]:
649
+ self.last_image.adjust_to_drift_correction(self.vars['convert_for_motion']['logical'])
650
+
651
+ if self.last_image.im_combinations is None:
652
+ self.last_image.im_combinations = []
653
+ self.last_image.im_combinations.append({})
654
+ self.last_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_motion']
655
+ self.last_image.im_combinations[self.current_combination_id]['binary_image'] = self.last_image.binary_image
656
+ self.last_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.last_image.image).astype(np.uint8)
657
+
658
+ # self.last_image.generate_color_space_combination(self.vars['convert_for_motion'], subtract_background)
659
+ # if self.all["more_than_two_colors"]:
660
+ # self.last_image.kmeans(self.vars["color_number"])
661
+ # else:
662
+ # self.last_image.thresholding()
663
+ # if self.all['are_gravity_centers_moving'] != 1:
664
+ # self.delineate_each_arena()
665
+
666
+ def cropping(self, is_first_image):
667
+ if not self.vars['drift_already_corrected']:
668
+ if is_first_image:
669
+ if not self.first_image.cropped:
670
+ if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
671
+ pickle_rick = PickleRick()
672
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
673
+ if data_to_run_cellects_quickly is not None:
674
+ if 'coordinates' in data_to_run_cellects_quickly:
675
+ logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
676
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
677
+ data_to_run_cellects_quickly['coordinates']
678
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
679
+ else:
680
+ self.first_image.get_crop_coordinates()
681
+ else:
682
+ self.first_image.get_crop_coordinates()
683
+
684
+
685
+ # try:
686
+ # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
687
+ # data_to_run_cellects_quickly = pickle.load(fileopen)
688
+ # if 'coordinates' in data_to_run_cellects_quickly:
689
+ # logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
690
+ # (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
691
+ # data_to_run_cellects_quickly['coordinates']
692
+ # self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
693
+ # else:
694
+ # self.first_image.get_crop_coordinates()
695
+ # except pickle.UnpicklingError:
696
+ # logging.error("Pickle error")
697
+ # self.first_image.get_crop_coordinates()
698
+
699
+
700
+ # if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
701
+ # with open('coordinates.pkl', 'rb') as fileopen:
702
+ # (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top,
703
+ # self.videos.bot) = pickle.load(fileopen)
704
+ else:
705
+ self.first_image.get_crop_coordinates()
706
+ if self.all['automatically_crop']:
707
+ self.first_image.automatically_crop(self.first_image.crop_coord)
708
+ else:
709
+ self.first_image.crop_coord = None
710
+ else:
711
+ if not self.last_image.cropped and self.all['automatically_crop']:
712
+ self.last_image.automatically_crop(self.first_image.crop_coord)
713
+ # if self.all['automatically_crop'] and not self.vars['drift_already_corrected']:
714
+ # if is_first_image:
715
+ # self.first_image.get_crop_coordinates()
716
+ # self.first_image.automatically_crop(self.first_image.crop_coord)
717
+ # else:
718
+ # self.last_image.automatically_crop(self.first_image.crop_coord)
719
+
720
+ def get_average_pixel_size(self):
721
+ logging.info("Get average pixel size")
722
+ (self.first_image.shape_number,
723
+ self.first_image.shapes,
724
+ self.first_image.stats,
725
+ centroids) = cv2.connectedComponentsWithStats(
726
+ self.first_image.validated_shapes,
727
+ connectivity=8)
728
+ self.first_image.shape_number -= 1
729
+ if self.all['scale_with_image_or_cells'] == 0:
730
+ self.vars['average_pixel_size'] = np.square(
731
+ self.all['image_horizontal_size_in_mm'] /
732
+ self.first_im.shape[1])
733
+ else:
734
+ self.vars['average_pixel_size'] = np.square(
735
+ self.all['starting_blob_hsize_in_mm'] /
736
+ np.mean(self.first_image.stats[1:, 2]))
737
+ if self.all['set_spot_size']:
738
+ self.starting_blob_hsize_in_pixels = (
739
+ self.all['starting_blob_hsize_in_mm'] /
740
+ np.sqrt(self.vars['average_pixel_size']))
741
+ else:
742
+ self.starting_blob_hsize_in_pixels = None
743
+
744
+ if self.all['automatic_size_thresholding']:
745
+ self.vars['first_move_threshold'] = 10
746
+ else:
747
+ # if self.vars['origin_state'] != "invisible":
748
+ self.vars['first_move_threshold'] = np.round(
749
+ self.all['first_move_threshold_in_mm²'] /
750
+ self.vars['average_pixel_size']).astype(np.uint8)
751
+ logging.info(f"The average pixel size is: {self.vars['average_pixel_size']} mm²")
752
+
753
+ def delineate_each_arena(self):
754
+ self.videos = OneVideoPerBlob(
755
+ self.first_image,
756
+ self.starting_blob_hsize_in_pixels,
757
+ self.all['raw_images'])
758
+ # self.delineation_number += 1
759
+ # if self.delineation_number > 1:
760
+ # print('stophere')
761
+ analysis_status = {"continue": True, "message": ""}
762
+ if (self.sample_number > 1 and not self.vars['several_blob_per_arena']):
763
+ compute_get_bb: bool = True
764
+ if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
765
+
766
+ pickle_rick = PickleRick()
767
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
768
+ if data_to_run_cellects_quickly is not None:
769
+ if 'coordinates' in data_to_run_cellects_quickly:
770
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
771
+ data_to_run_cellects_quickly['coordinates']
772
+ self.videos.left, self.videos.right, self.videos.top, self.videos.bot = self.left, self.right, self.top, self.bot
773
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
774
+ if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
775
+ self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
776
+ logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
777
+ compute_get_bb = False
778
+
779
+
780
+ # try:
781
+ # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
782
+ # data_to_run_cellects_quickly = pickle.load(fileopen)
783
+ # except pickle.UnpicklingError:
784
+ # logging.error("Pickle error")
785
+ # data_to_run_cellects_quickly = {}
786
+ # if 'coordinates' in data_to_run_cellects_quickly:
787
+ # (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
788
+ # data_to_run_cellects_quickly['coordinates']
789
+ # self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
790
+ # if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
791
+ # self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
792
+ # logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
793
+ # compute_get_bb = False
794
+
795
+ # if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
796
+ # with open('coordinates.pkl', 'rb') as fileopen:
797
+ # (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top, self.videos.bot) = pickle.load(fileopen)
798
+
799
+ # if (not self.all['overwrite_unaltered_videos'] and
800
+ # os.path.isfile('coordinates.pkl')):
801
+ # with open('coordinates.pkl', 'rb') as fileopen:
802
+ # (vertical_shape, horizontal_shape, self.videos.left, self.videos.right, self.videos.top,
803
+ # self.videos.bot) = pickle.load(fileopen)
804
+
805
+ if compute_get_bb:
806
+ if self.all['im_or_vid'] == 1:
807
+ self.videos.get_bounding_boxes(
808
+ are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
809
+ img_list=self.analysis_instance,
810
+ color_space_combination=self.vars['convert_for_origin'],#self.vars['convert_for_motion']
811
+ color_number=self.vars["color_number"],
812
+ sample_size=5,
813
+ all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'])
814
+ else:
815
+ self.videos.get_bounding_boxes(
816
+ are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
817
+ img_list=self.data_list,
818
+ color_space_combination=self.vars['convert_for_origin'],
819
+ color_number=self.vars["color_number"],
820
+ sample_size=5,
821
+ all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'])
822
+ if np.any(self.videos.ordered_stats[:, 4] > 100 * np.median(self.videos.ordered_stats[:, 4])):
823
+ analysis_status['message'] = "A specimen is at least 100 times larger: (re)do the first image analysis."
824
+ analysis_status['continue'] = False
825
+ if np.any(self.videos.ordered_stats[:, 4] < 0.01 * np.median(self.videos.ordered_stats[:, 4])):
826
+ analysis_status['message'] = "A specimen is at least 100 times smaller: (re)do the first image analysis."
827
+ analysis_status['continue'] = False
828
+ # self.all['overwrite_unaltered_videos'] = True
829
+ # self.videos.print_bounding_boxes(0)
830
+ logging.info(
831
+ str(self.videos.not_analyzed_individuals) + " individuals are out of picture scope and cannot be analyzed")
832
+ self.left, self.right, self.top, self.bot = self.videos.left, self.videos.right, self.videos.top, self.videos.bot
833
+
834
+ else:
835
+ self.left, self.right, self.top, self.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
836
+ self.videos.left, self.videos.right, self.videos.top, self.videos.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
837
+
838
+ self.vars['analyzed_individuals'] = np.arange(self.sample_number) + 1
839
+ if self.videos.not_analyzed_individuals is not None:
840
+ self.vars['analyzed_individuals'] = np.delete(self.vars['analyzed_individuals'],
841
+ self.videos.not_analyzed_individuals - 1)
842
+ # logging.info(self.top)
843
+ return analysis_status
844
+
845
+ def get_background_to_subtract(self):
846
+ """
847
+ Repenser le moment où ça arrive et trouver pourquoi ça marche pas
848
+ """
849
+ # self.vars['subtract_background'] = False
850
+ if self.vars['subtract_background']:
851
+ self.first_image.generate_subtract_background(self.vars['convert_for_motion'])
852
+
853
+ def get_origins_and_backgrounds_lists(self):
854
+ logging.info("Create origins and background lists")
855
+ if self.top is None:
856
+ # self.top = [1]
857
+ # self.bot = [self.first_im.shape[0] - 2]
858
+ # self.left = [1]
859
+ # self.right = [self.first_im.shape[1] - 2]
860
+ self.top = np.array([1])
861
+ self.bot = np.array([self.first_im.shape[0] - 2])
862
+ self.left = np.array([1])
863
+ self.right = np.array([self.first_im.shape[1] - 2])
864
+
865
+ add_to_c = 1
866
+ first_im = self.first_image.validated_shapes
867
+ self.vars['origin_list'] = []
868
+ self.vars['background_list'] = []
869
+ self.vars['background_list2'] = []
870
+ for rep in np.arange(len(self.vars['analyzed_individuals'])):
871
+ self.vars['origin_list'].append(first_im[self.top[rep]:(self.bot[rep] + add_to_c),
872
+ self.left[rep]:(self.right[rep] + add_to_c)])
873
+ if self.vars['subtract_background']:
874
+ self.vars['background_list'].append(
875
+ self.first_image.subtract_background[self.top[rep]:(self.bot[rep] + add_to_c),
876
+ self.left[rep]:(self.right[rep] + add_to_c)])
877
+ if self.vars['convert_for_motion']['logical'] != 'None':
878
+ self.vars['background_list2'].append(
879
+ self.first_image.subtract_background2[self.top[rep]:(self.bot[rep] + add_to_c),
880
+ self.left[rep]:(self.right[rep] + add_to_c)])
881
+
882
+ def get_origins_and_backgrounds_one_by_one(self):
883
+ add_to_c = 1
884
+ self.vars['origin_list'] = []
885
+ self.vars['background_list'] = []
886
+ self.vars['background_list2'] = []
887
+
888
+ for arena in np.arange(len(self.vars['analyzed_individuals'])):
889
+ bgr_image = self.first_image.bgr[self.top[arena]:(self.bot[arena] + add_to_c),
890
+ self.left[arena]:(self.right[arena] + add_to_c), ...]
891
+ image = OneImageAnalysis(bgr_image)
892
+ if self.vars['subtract_background']:
893
+ image.generate_subtract_background(self.vars['convert_for_motion'])
894
+ self.vars['background_list'].append(image.image)
895
+ if self.vars['convert_for_motion']['logical'] != 'None':
896
+ self.vars['background_list2'].append(image.image2)
897
+
898
+ # self.vars['origins_list'].append(self.first_image.validated_shapes[self.top[arena]:(self.bot[arena]),
899
+ # self.left[arena]:(self.right[arena])])
900
+ #
901
+ if self.vars['several_blob_per_arena']:
902
+ image.validated_shapes = image.binary_image
903
+ else:
904
+ image.get_largest_shape()
905
+
906
+ self.vars['origin_list'].append(image.validated_shapes)
907
+
908
+ def choose_color_space_combination(self):
909
+ # self = po
910
+ # 2) Represent the segmentation using a particular color space combination
911
+ if self.all['are_gravity_centers_moving'] != 1:
912
+ analysis_status = self.delineate_each_arena()
913
+ # self.fi.automatically_crop(self.first_image.crop_coord)
914
+ self.last_image = OneImageAnalysis(self.last_im)
915
+ self.last_image.automatically_crop(self.videos.first_image.crop_coord)
916
+ # csc = ColorSpaceCombination(self.last_image.image)
917
+
918
+ concomp_nb = [self.sample_number, self.sample_number * 50]
919
+ if self.all['are_zigzag'] == "columns":
920
+ inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
921
+ elif self.all['are_zigzag'] == "rows":
922
+ inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
923
+ else:
924
+ dist1 = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
925
+ dist2 = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
926
+ inter_dist = np.max(dist1, dist2)
927
+ if self.all['starting_blob_shape'] == "circle":
928
+ max_shape_size = np.pi * np.square(inter_dist)
929
+ else:
930
+ max_shape_size = np.square(2 * inter_dist)
931
+ total_surfarea = max_shape_size * self.sample_number
932
+ if self.all['are_gravity_centers_moving'] != 1:
933
+ out_of_arenas = np.ones_like(self.videos.first_image.validated_shapes)
934
+ for blob_i in np.arange(len(self.vars['analyzed_individuals'])):
935
+ out_of_arenas[self.top[blob_i]: (self.bot[blob_i] + 1),
936
+ self.left[blob_i]: (self.right[blob_i] + 1)] = 0
937
+ else:
938
+ out_of_arenas = None
939
+ ref_image = self.videos.first_image.validated_shapes
940
+ self.last_image.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
941
+ # csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
942
+ # csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image, self.first_image.subtract_background)
943
+ self.vars['convert_for_motion'] = self.last_image.channel_combination
944
+
945
+ self.fast_image_segmentation(False)
946
+ # if self.vars['subtract_background']:
947
+ # csc = ColorSpaceCombination(self.last_image.image)
948
+ # csc.generate(self.vars['convert_for_motion'], self.first_image.subtract_background)
949
+ # if self.all["more_than_two_colors"]:
950
+ # csc.kmeans(self.vars["color_number"])
951
+ # else:
952
+ # csc.thresholding()
953
+ # self.last_image.image = csc.image
954
+ # self.last_image.binary_image = csc.binary_image
955
+
956
+ def untype_csc_dict(self):
957
+ new_convert_for_origin = {}
958
+ for k, v in self.vars['convert_for_origin'].items():
959
+ new_convert_for_origin[k] = v
960
+ if self.vars['logical_between_csc_for_origin'] is not None:
961
+ new_convert_for_origin['logical'] = self.vars['logical_between_csc_for_origin']
962
+ for k, v in self.vars['convert_for_origin2'].items():
963
+ new_convert_for_origin[k] = v
964
+ self.vars['convert_for_origin'] = new_convert_for_origin
965
+ self.vars['convert_for_origin2'] = {}
966
+
967
+ new_convert_for_motion = {}
968
+ for k, v in self.vars['convert_for_motion'].items():
969
+ new_convert_for_motion[k] = v
970
+ if self.vars['convert_for_motion']['logical'] != 'None':
971
+ new_convert_for_motion['logical'] = self.vars['convert_for_motion']['logical']
972
+ for k, v in self.vars['convert_for_motion2'].items():
973
+ new_convert_for_motion[k] = v
974
+ self.vars['convert_for_motion'] = new_convert_for_motion
975
+ self.vars['convert_for_motion2'] = {}
976
+
977
+ def type_csc_dict(self):
978
+ # self.vars['convert_for_motion']['logical'] = 'And'
979
+ # self.vars['convert_for_motion']['hsv'] = np.array((0, 0, 1))
980
+ # self.vars['convert_for_motion']['logical'] = 'And'
981
+ # self.vars['convert_for_motion']['lab2'] = np.array((0, 0, 1))
982
+
983
+ new_convert_for_origin = TDict()
984
+ self.vars['convert_for_origin2'] = TDict()
985
+ self.vars['logical_between_csc_for_origin'] = None
986
+ for k, v in self.vars['convert_for_origin'].items():
987
+ if k != 'logical' and v.sum() > 0:
988
+ if k[-1] != '2':
989
+ new_convert_for_origin[k] = v
990
+ else:
991
+ self.vars['convert_for_origin2'][k[:-1]] = v
992
+ else:
993
+ self.vars['logical_between_csc_for_origin'] = v
994
+ self.vars['convert_for_origin'] = new_convert_for_origin
995
+
996
+ new_convert_for_motion = TDict()
997
+ self.vars['convert_for_motion2'] = TDict()
998
+ self.vars['convert_for_motion']['logical'] = None
999
+ for k, v in self.vars['convert_for_motion'].items():
1000
+ if k != 'logical' and v.sum() > 0:
1001
+ if k[-1] != '2':
1002
+ new_convert_for_motion[k] = v
1003
+ else:
1004
+ self.vars['convert_for_motion2'][k[:-1]] = v
1005
+ else:
1006
+ self.vars['convert_for_motion']['logical'] = v
1007
+ self.vars['convert_for_motion'] = new_convert_for_motion
1008
+
1009
+ if self.vars['color_number'] > 2:
1010
+ self.vars['bio_label'] = None#self.first_image.bio_label
1011
+ if self.vars['convert_for_motion']['logical'] != 'None':
1012
+ self.vars['bio_label2'] = None
1013
+
1014
+ def find_if_lighter_background(self):
1015
+ logging.info("Find if the background is lighter or darker than the cells")
1016
+ self.vars['lighter_background']: bool = True
1017
+ self.vars['contour_color']: np.uint8 = 0
1018
+ are_dicts_equal: bool = True
1019
+ for key in self.vars['convert_for_origin'].keys():
1020
+ are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_motion'] and self.vars['convert_for_origin'][key] == self.vars['convert_for_motion'][key])
1021
+ for key in self.vars['convert_for_motion'].keys():
1022
+ are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_origin'] and self.vars['convert_for_motion'][key] == self.vars['convert_for_origin'][key])
1023
+
1024
+ if are_dicts_equal:
1025
+
1026
+ if self.first_im is None:
1027
+ self.get_first_image()
1028
+ self.fast_image_segmentation(True)
1029
+ self.cropping(is_first_image=True)
1030
+ among = np.nonzero(self.first_image.validated_shapes)
1031
+ not_among = np.nonzero(1 - self.first_image.validated_shapes)
1032
+ # Use the converted image to tell if the background is lighter, for analysis purposes
1033
+ if self.first_image.image[among[0], among[1]].mean() > self.first_image.image[not_among[0], not_among[1]].mean():
1034
+ self.vars['lighter_background'] = False
1035
+ # Use the original image to tell if the background is lighter, for display purposes
1036
+ if self.first_image.bgr[among[0], among[1], ...].mean() > self.first_image.bgr[not_among[0], not_among[1], ...].mean():
1037
+ self.vars['contour_color'] = 255
1038
+ else:
1039
+ if self.last_im is None:
1040
+ self.get_last_image()
1041
+ # self.cropping(is_first_image=False)
1042
+ self.fast_image_segmentation(is_first_image=False)
1043
+ if self.last_image.binary_image.sum() == 0:
1044
+ self.fast_image_segmentation(is_first_image=False)
1045
+ among = np.nonzero(self.last_image.binary_image)
1046
+ not_among = np.nonzero(1 - self.last_image.binary_image)
1047
+ # Use the converted image to tell if the background is lighter, for analysis purposes
1048
+ if self.last_image.image[among[0], among[1]].mean() > self.last_image.image[not_among[0], not_among[1]].mean():
1049
+ self.vars['lighter_background'] = False
1050
+ # Use the original image to tell if the background is lighter, for display purposes
1051
+ if self.last_image.bgr[among[0], among[1], ...].mean() > self.last_image.bgr[not_among[0], not_among[1], ...].mean():
1052
+ self.vars['contour_color'] = 255
1053
+ if self.vars['origin_state'] == "invisible":
1054
+ binary_image = deepcopy(self.first_image.binary_image)
1055
+ self.first_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
1056
+ None, None, subtract_background=None,
1057
+ subtract_background2=None,
1058
+ grid_segmentation=self.vars["grid_segmentation"])
1059
+ covered_values = self.first_image.image[np.nonzero(binary_image)]
1060
+ if self.vars['lighter_background']:
1061
+ if np.max(covered_values) < 255:
1062
+ self.vars['luminosity_threshold'] = np.max(covered_values) + 1
1063
+ else:
1064
+ self.vars['luminosity_threshold'] = 127
1065
+ else:
1066
+ if np.min(covered_values) > 0:
1067
+ self.vars['luminosity_threshold'] = np.min(covered_values) - 1
1068
+ else:
1069
+ self.vars['luminosity_threshold'] = 127
1070
+
1071
+ def load_one_arena(self, arena):
1072
+ #self.delineate_each_arena()
1073
+ #self.choose_color_space_combination()
1074
+ add_to_c = 1
1075
+ self.one_arena_done = True
1076
+ i = np.nonzero(self.vars['analyzed_individuals'] == arena)[0][0]
1077
+ self.converted_video = np.zeros(
1078
+ (len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c),
1079
+ dtype=float)
1080
+ if not self.vars['already_greyscale']:
1081
+ self.visu = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c, 3), dtype=np.uint8)
1082
+ if self.vars['convert_for_motion']['logical'] != 'None':
1083
+ self.converted_video2 = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c), dtype=float)
1084
+ first_dict = TDict()
1085
+ second_dict = TDict()
1086
+ c_spaces = []
1087
+ for k, v in self.vars['convert_for_motion'].items():
1088
+ if k != 'logical' and v.sum() > 0:
1089
+ if k[-1] != '2':
1090
+ first_dict[k] = v
1091
+ c_spaces.append(k)
1092
+ else:
1093
+ second_dict[k[:-1]] = v
1094
+ c_spaces.append(k[:-1])
1095
+ prev_img = None
1096
+ background = None
1097
+ background2 = None
1098
+ for image_i, image_name in enumerate(self.data_list):
1099
+ img = self.videos.read_and_rotate(image_name, prev_img)
1100
+ prev_img = deepcopy(img)
1101
+ # if self.videos.first_image.crop_coord is not None:
1102
+ # img = img[self.videos.first_image.crop_coord[0]:self.videos.first_image.crop_coord[1],
1103
+ # self.videos.first_image.crop_coord[2]:self.videos.first_image.crop_coord[3], :]
1104
+ img = img[self.top[arena - 1]: (self.bot[arena - 1] + add_to_c),
1105
+ self.left[arena - 1]: (self.right[arena - 1] + add_to_c), :]
1106
+
1107
+ if self.vars['already_greyscale']:
1108
+ if self.reduce_image_dim:
1109
+ self.converted_video[image_i, ...] = img[:, :, 0]
1110
+ else:
1111
+ self.converted_video[image_i, ...] = img
1112
+ else:
1113
+ self.visu[image_i, ...] = img
1114
+ if self.vars['subtract_background']:
1115
+ background = self.vars['background_list'][i]
1116
+ if self.vars['convert_for_motion']['logical'] != 'None':
1117
+ background2 = self.vars['background_list2'][i]
1118
+ greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces, first_dict,
1119
+ second_dict, background, background2,
1120
+ self.vars[
1121
+ 'lose_accuracy_to_save_memory'])
1122
+ self.converted_video[image_i, ...] = greyscale_image
1123
+ if self.vars['convert_for_motion']['logical'] != 'None':
1124
+ self.converted_video2[image_i, ...] = greyscale_image2
1125
+ # csc = OneImageAnalysis(img)
1126
+ # else:
1127
+ # csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
1128
+ # # self.converted_video[image_i, ...] = csc.image
1129
+ # self.converted_video[image_i, ...] = csc.image
1130
+ # if self.vars['convert_for_motion']['logical'] != 'None':
1131
+ # self.converted_video2[image_i, ...] = csc.image2
1132
+
1133
+ # write_video(self.visu, f"ind_{arena}{self.vars['videos_extension']}", is_color=True, fps=1)
1134
+
1135
+ def update_output_list(self):
1136
+ self.vars['descriptors'] = {}
1137
+ # self.vars['descriptors']['final_area'] = True # [False, True, False]
1138
+ # if self.vars['first_move_threshold'] is not None:
1139
+ # self.vars['descriptors']['first_move'] = True # [False, True, False]
1140
+
1141
+ # if self.vars['iso_digi_analysis']:
1142
+ # self.vars['descriptors']['is_growth_isotropic'] = True # [False, True, False]
1143
+ # self.vars['descriptors']['iso_digi_transi'] = True # [False, True, False]
1144
+
1145
+ # if self.vars['oscilacyto_analysis']:
1146
+ # self.vars['descriptors']['max_magnitude'] = True#[False, True, False]
1147
+ # self.vars['descriptors']['frequency_of_max_magnitude'] = True#[False, True, False]
1148
+ for descriptor in self.all['descriptors'].keys():
1149
+ if descriptor == 'standard_deviation_xy':
1150
+ self.vars['descriptors']['standard_deviation_x'] = self.all['descriptors'][descriptor]
1151
+ self.vars['descriptors']['standard_deviation_y'] = self.all['descriptors'][descriptor]
1152
+ elif descriptor == 'skewness_xy':
1153
+ self.vars['descriptors']['skewness_x'] = self.all['descriptors'][descriptor]
1154
+ self.vars['descriptors']['skewness_y'] = self.all['descriptors'][descriptor]
1155
+ elif descriptor == 'kurtosis_xy':
1156
+ self.vars['descriptors']['kurtosis_x'] = self.all['descriptors'][descriptor]
1157
+ self.vars['descriptors']['kurtosis_y'] = self.all['descriptors'][descriptor]
1158
+ elif descriptor == 'major_axes_len_and_angle':
1159
+ self.vars['descriptors']['major_axis_len'] = self.all['descriptors'][descriptor]
1160
+ self.vars['descriptors']['minor_axis_len'] = self.all['descriptors'][descriptor]
1161
+ self.vars['descriptors']['axes_orientation'] = self.all['descriptors'][descriptor]
1162
+ else:
1163
+ if np.isin(descriptor, list(from_shape_descriptors_class.keys())):
1164
+
1165
+ self.vars['descriptors'][descriptor] = self.all['descriptors'][descriptor]
1166
+ self.vars['descriptors']['cluster_number'] = self.vars['oscilacyto_analysis']
1167
+ self.vars['descriptors']['mean_cluster_area'] = self.vars['oscilacyto_analysis']
1168
+ self.vars['descriptors']['vertices_number'] = self.vars['network_analysis']
1169
+ self.vars['descriptors']['edges_number'] = self.vars['network_analysis']
1170
+ self.vars['descriptors']['newly_explored_area'] = self.vars['do_fading']
1171
+ """ if self.vars['descriptors_means']:
1172
+ self.vars['output_list'] += [f'{descriptor}_mean']
1173
+ self.vars['output_list'] += [f'{descriptor}_std']
1174
+ if self.vars['descriptors_regressions']:
1175
+ self.vars['output_list'] += [f"{descriptor}_reg_start"]
1176
+ self.vars['output_list'] += [f"{descriptor}_reg_end"]
1177
+ self.vars['output_list'] += [f'{descriptor}_slope']
1178
+ self.vars['output_list'] += [f'{descriptor}_intercept']
1179
+ """
1180
+
1181
+ def update_available_core_nb(self, image_bit_number=256, video_bit_number=140):# video_bit_number=176
1182
+ if self.vars['lose_accuracy_to_save_memory']:
1183
+ video_bit_number -= 56
1184
+ if self.vars['convert_for_motion']['logical'] != 'None':
1185
+ video_bit_number += 64
1186
+ if self.vars['lose_accuracy_to_save_memory']:
1187
+ video_bit_number -= 56
1188
+ if self.vars['already_greyscale']:
1189
+ video_bit_number -= 64
1190
+ if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
1191
+ video_bit_number += 16
1192
+ image_bit_number += 128
1193
+ if self.vars['save_coord_network'] or self.vars['network_analysis']:
1194
+ video_bit_number += 8
1195
+ image_bit_number += 64
1196
+
1197
+ if isinstance(self.bot, list):
1198
+ one_image_memory = np.multiply((self.bot[0] - self.top[0] + 1),
1199
+ (self.right[0] - self.left[0] + 1)).max().astype(np.uint64)
1200
+ else:
1201
+ one_image_memory = np.multiply((self.bot - self.top + 1).astype(np.uint64),
1202
+ (self.right - self.left + 1).astype(np.uint64)).max()
1203
+ one_video_memory = self.vars['img_number'] * one_image_memory
1204
+ necessary_memory = (one_image_memory * image_bit_number + one_video_memory * video_bit_number) * 1.16415e-10
1205
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1206
+ max_repeat_in_memory = (available_memory // necessary_memory).astype(np.uint16)
1207
+ if max_repeat_in_memory > 1:
1208
+ max_repeat_in_memory = np.max(((available_memory // (2 * necessary_memory)).astype(np.uint16), 1))
1209
+ # if sys.platform.startswith('win'):
1210
+ # available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1211
+ # else:
1212
+
1213
+ self.cores = np.min((self.all['cores'], max_repeat_in_memory))
1214
+ if self.cores > self.sample_number:
1215
+ self.cores = self.sample_number
1216
+ return np.round(np.absolute(available_memory - necessary_memory), 3)
1217
+
1218
+
1219
+ def update_one_row_per_arena(self, i, table_to_add):
1220
+ if not self.vars['several_blob_per_arena']:
1221
+ if self.one_row_per_arena is None:
1222
+ self.one_row_per_arena = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(table_to_add)), dtype=float),
1223
+ columns=table_to_add.keys())
1224
+ self.one_row_per_arena.iloc[i, :] = table_to_add.values()
1225
+
1226
+
1227
+ def update_one_row_per_frame(self, i, j, table_to_add):
1228
+ if not self.vars['several_blob_per_arena']:
1229
+ if self.one_row_per_frame is None:
1230
+ self.one_row_per_frame = pd.DataFrame(index=range(len(self.vars['analyzed_individuals']) *
1231
+ self.vars['img_number']),
1232
+ columns=table_to_add.keys())
1233
+
1234
+ self.one_row_per_frame.iloc[i:j, :] = table_to_add
1235
+
1236
+
1237
+ def instantiate_tables(self):
1238
+ self.update_output_list()
1239
+ logging.info("Instantiate results tables and validation images")
1240
+ self.one_row_per_oscillating_cluster = None
1241
+ self.fractal_box_sizes = None
1242
+ # if self.vars['oscilacyto_analysis']:
1243
+ # self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
1244
+ # 'edge_distance'])
1245
+ # if self.vars['fractal_analysis']:
1246
+ # self.fractal_box_sizes = pd.DataFrame(columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
1247
+
1248
+ if self.vars['already_greyscale']:
1249
+ if len(self.first_image.bgr.shape) == 2:
1250
+ self.first_image.bgr = np.stack((self.first_image.bgr, self.first_image.bgr, self.first_image.bgr), axis=2).astype(np.uint8)
1251
+ if len(self.last_image.bgr.shape) == 2:
1252
+ self.last_image.bgr = np.stack((self.last_image.bgr, self.last_image.bgr, self.last_image.bgr), axis=2).astype(np.uint8)
1253
+ self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
1254
+
1255
+ def add_analysis_visualization_to_first_and_last_images(self, i, first_visualization, last_visualization):
1256
+ cr = ((self.top[i], self.bot[i] + 1),
1257
+ (self.left[i], self.right[i] + 1))
1258
+ if self.vars['arena_shape'] == 'circle':
1259
+ ellipse = Ellipse((cr[0][1] - cr[0][0], cr[1][1] - cr[1][0])).create()
1260
+ ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
1261
+ first_visualization *= ellipse
1262
+ self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1263
+ self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += first_visualization
1264
+ last_visualization *= ellipse
1265
+ self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1266
+ self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += last_visualization
1267
+ else:
1268
+ self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = first_visualization
1269
+ self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = last_visualization
1270
+
1271
+
1272
+ def save_tables(self):
1273
+ logging.info("Save results tables and validation images")
1274
+ if not self.vars['several_blob_per_arena']:
1275
+ try:
1276
+ self.one_row_per_arena.to_csv("one_row_per_arena.csv", sep=";", index=False, lineterminator='\n')
1277
+ del self.one_row_per_arena
1278
+ except PermissionError:
1279
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1280
+ self.message_from_thread.emit(f"Never let one_row_per_arena.csv open when Cellects runs")
1281
+ try:
1282
+ self.one_row_per_frame.to_csv("one_row_per_frame.csv", sep=";", index=False, lineterminator='\n')
1283
+ del self.one_row_per_frame
1284
+ except PermissionError:
1285
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1286
+ self.message_from_thread.emit(f"Never let one_row_per_frame.csv open when Cellects runs")
1287
+ if self.vars['oscilacyto_analysis']:
1288
+ try:
1289
+ if self.one_row_per_oscillating_cluster is None:
1290
+ self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
1291
+ 'edge_distance'])
1292
+ self.one_row_per_oscillating_cluster.to_csv("one_row_per_oscillating_cluster.csv", sep=";", index=False,
1293
+ lineterminator='\n')
1294
+ del self.one_row_per_oscillating_cluster
1295
+ except PermissionError:
1296
+ logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1297
+ self.message_from_thread.emit(f"Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1298
+
1299
+ if self.vars['fractal_analysis']:
1300
+ if os.path.isfile(f"oscillating_clusters_temporal_dynamics.h5"):
1301
+ array_names = get_h5_keys(f"oscillating_clusters_temporal_dynamics.h5")
1302
+ arena_fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_names[0])
1303
+ arena_fractal_dynamics = np.hstack((np.repeat(np.uint32(array_names[0][-1]), arena_fractal_dynamics.shape[0]), arena_fractal_dynamics))
1304
+ for array_name in array_names[1:]:
1305
+ fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_name)
1306
+ fractal_dynamics = np.hstack((np.repeat(np.uint32(array_name[-1]), fractal_dynamics.shape[0]), fractal_dynamics))
1307
+ arena_fractal_dynamics = np.vstack((arena_fractal_dynamics, fractal_dynamics))
1308
+ arena_fractal_dynamics = pd.DataFrame(arena_fractal_dynamics, columns=["arena", "time", "cluster_id", "flow", "centroid_y", "centroid_x", "area", "inner_network_area", "box_count_dim", "inner_network_box_count_dim"])
1309
+ arena_fractal_dynamics.to_csv(f"oscillating_clusters_temporal_dynamics.csv", sep=";", index=False,
1310
+ lineterminator='\n')
1311
+ del arena_fractal_dynamics
1312
+ os.remove(f"oscillating_clusters_temporal_dynamics.h5")
1313
+ if self.all['extension'] == '.JPG':
1314
+ extension = '.PNG'
1315
+ else:
1316
+ extension = '.JPG'
1317
+ cv2.imwrite(f"Analysis efficiency, last image{extension}", self.last_image.bgr)
1318
+ cv2.imwrite(
1319
+ f"Analysis efficiency, {np.ceil(self.vars['img_number'] / 10).astype(np.uint64)}th image{extension}",
1320
+ self.first_image.bgr)
1321
+ # self.save_analysis_parameters.to_csv("analysis_parameters.csv", sep=";")
1322
+
1323
+ software_settings = deepcopy(self.vars)
1324
+ for key in ['descriptors', 'analyzed_individuals', 'exif', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1325
+ software_settings.pop(key, None)
1326
+ global_settings = deepcopy(self.all)
1327
+ for key in ['analyzed_individuals', 'night_mode', 'expert_mode', 'is_auto', 'arena', 'video_option', 'compute_all_options', 'vars', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1328
+ global_settings.pop(key, None)
1329
+ software_settings.update(global_settings)
1330
+ software_settings = pd.DataFrame.from_dict(software_settings, columns=["Setting"], orient='index')
1331
+ try:
1332
+ software_settings.to_csv("software_settings.csv", sep=";")
1333
+ except PermissionError:
1334
+ logging.error("Never let software_settings.csv open when Cellects runs")
1335
+ self.message_from_thread.emit(f"Never let software_settings.csv open when Cellects runs")
1336
+
1337
+
1338
+
1339
+ # if __name__ == "__main__":
1340
+ # po = ProgramOrganizer()
1341
+ # os.chdir(Path("D:\Directory\Data\Example\Example\Example"))
1342
+ # # po.all['global_pathway'] = Path("C:/Users/APELab/Documents/Aurèle/Cellects/install/Installer_and_example/Example")
1343
+ # po.load_variable_dict()
1344
+ # po.all['global_pathway']
1345
+ # po.load_data_to_run_cellects_quickly()
1346
+ # po.all['global_pathway']
1347
+ # po.save_data_to_run_cellects_quickly()