cellects 0.1.2__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. cellects/__main__.py +65 -25
  2. cellects/config/all_vars_dict.py +18 -17
  3. cellects/core/cellects_threads.py +1034 -396
  4. cellects/core/motion_analysis.py +1664 -2010
  5. cellects/core/one_image_analysis.py +1082 -1061
  6. cellects/core/program_organizer.py +1687 -1316
  7. cellects/core/script_based_run.py +80 -76
  8. cellects/gui/advanced_parameters.py +390 -330
  9. cellects/gui/cellects.py +102 -91
  10. cellects/gui/custom_widgets.py +16 -33
  11. cellects/gui/first_window.py +226 -104
  12. cellects/gui/if_several_folders_window.py +117 -68
  13. cellects/gui/image_analysis_window.py +866 -454
  14. cellects/gui/required_output.py +104 -57
  15. cellects/gui/ui_strings.py +840 -0
  16. cellects/gui/video_analysis_window.py +333 -155
  17. cellects/image_analysis/cell_leaving_detection.py +64 -4
  18. cellects/image_analysis/image_segmentation.py +451 -22
  19. cellects/image_analysis/morphological_operations.py +2166 -1635
  20. cellects/image_analysis/network_functions.py +616 -253
  21. cellects/image_analysis/one_image_analysis_threads.py +94 -153
  22. cellects/image_analysis/oscillations_functions.py +131 -0
  23. cellects/image_analysis/progressively_add_distant_shapes.py +2 -3
  24. cellects/image_analysis/shape_descriptors.py +517 -466
  25. cellects/utils/formulas.py +169 -6
  26. cellects/utils/load_display_save.py +362 -109
  27. cellects/utils/utilitarian.py +86 -9
  28. cellects-0.2.6.dist-info/LICENSE +675 -0
  29. cellects-0.2.6.dist-info/METADATA +829 -0
  30. cellects-0.2.6.dist-info/RECORD +44 -0
  31. cellects/core/one_video_per_blob.py +0 -540
  32. cellects/image_analysis/cluster_flux_study.py +0 -102
  33. cellects-0.1.2.dist-info/LICENSE.odt +0 -0
  34. cellects-0.1.2.dist-info/METADATA +0 -132
  35. cellects-0.1.2.dist-info/RECORD +0 -44
  36. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/WHEEL +0 -0
  37. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/entry_points.txt +0 -0
  38. {cellects-0.1.2.dist-info → cellects-0.2.6.dist-info}/top_level.txt +0 -0
@@ -1,1316 +1,1687 @@
1
- #!/usr/bin/env python3
2
- """This file contains the class constituting the link between the graphical interface and the computations
3
- First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
4
- and the background.
5
- Second, Cellects automatically delineate each arena.
6
- Third, Cellects write one video for each arena.
7
- Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
8
- Fifth, Cellects extract variables and store them in .csv files.
9
- """
10
-
11
- import logging
12
- import os
13
- import pickle
14
- import sys
15
- from copy import deepcopy
16
- import cv2
17
- from numba.typed import Dict as TDict
18
- import pandas as pd
19
- import numpy as np
20
- from psutil import virtual_memory
21
- from pathlib import Path
22
- import natsort
23
- from cellects.image_analysis.image_segmentation import generate_color_space_combination
24
- from cellects.utils.load_display_save import extract_time # named exif
25
- from cellects.image_analysis.one_image_analysis_threads import ProcessFirstImage
26
- from cellects.core.one_image_analysis import OneImageAnalysis
27
- from cellects.utils.load_display_save import PickleRick, read_and_rotate, readim, is_raw_image, read_h5_array, get_h5_keys
28
- from cellects.utils.utilitarian import insensitive_glob, vectorized_len
29
- from cellects.image_analysis.morphological_operations import Ellipse, cross_33
30
- from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
31
- from cellects.core.motion_analysis import MotionAnalysis
32
- from cellects.core.one_video_per_blob import OneVideoPerBlob
33
- from cellects.config.all_vars_dict import DefaultDicts
34
- from cellects.image_analysis.shape_descriptors import from_shape_descriptors_class
35
-
36
-
37
- class ProgramOrganizer:
38
- def __init__(self):
39
- """
40
- This class stores all variables required for analysis as well as
41
- methods to process it.
42
- Global variables (i.e. that does not concern the MotionAnalysis)
43
- are directly stored in self.
44
- Variables used in the MotionAnalysis class are stored in a dict
45
- called self.vars
46
- """
47
- if os.path.isfile('PickleRick.pkl'):
48
- os.remove('PickleRick.pkl')
49
- if os.path.isfile('PickleRick0.pkl'):
50
- os.remove('PickleRick0.pkl')
51
- if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick.pkl')):
52
- os.remove(Path(CELLECTS_DIR.parent / 'PickleRick.pkl'))
53
- if os.path.isfile(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl')):
54
- os.remove(Path(CELLECTS_DIR.parent / 'PickleRick0.pkl'))
55
- # self.delineation_number = 0
56
- self.one_arena_done: bool = False
57
- self.reduce_image_dim: bool = False
58
- self.first_exp_ready_to_run: bool = False
59
- self.data_to_save = {'first_image': False, 'coordinates': False, 'exif': False, 'vars': False}
60
- self.videos = None
61
- self.motion = None
62
- self.analysis_instance = None
63
- self.computed_video_options = np.zeros(5, bool)
64
- self.vars = {}
65
- self.all = {}
66
- self.all['folder_list'] = []
67
- self.all['first_detection_frame'] = 1
68
- self.first_im = None
69
- self.last_im = None
70
- self.vars['background_list'] = []
71
- self.starting_blob_hsize_in_pixels = None
72
- self.vars['first_move_threshold'] = None
73
- self.vars['convert_for_origin'] = None
74
- self.vars['convert_for_motion'] = None
75
- self.current_combination_id = 0
76
- self.data_list = []
77
- self.one_row_per_arena = None
78
- self.one_row_per_frame = None
79
- self.one_row_per_oscillating_cluster = None
80
- # self.fractal_box_sizes = None
81
-
82
- def save_variable_dict(self):
83
- logging.info("Save the parameters dictionaries in the Cellects folder")
84
- self.all['vars'] = self.vars
85
- all_vars = deepcopy(self.all)
86
- if not self.all['keep_cell_and_back_for_all_folders']:
87
- all_vars['bio_mask'] = None
88
- all_vars['back_mask'] = None
89
- pickle_rick = PickleRick(0)
90
- pickle_rick.write_file(all_vars, ALL_VARS_PKL_FILE)
91
-
92
- def load_variable_dict(self):
93
- # loading_succeed: bool = False
94
- # if os.path.isfile('Data to run Cellects quickly.pkl'):
95
- # try:
96
- # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
97
- # data_to_run_cellects_quickly = pickle.load(fileopen)
98
- # if 'vars' in data_to_run_cellects_quickly:
99
- # self.vars = data_to_run_cellects_quickly['vars']
100
- # loading_succeed = True
101
- # logging.info("Success to load vars from the data folder")
102
- # except EOFError:
103
- # logging.error("Pickle error: will try to load vars from the Cellects folder")
104
-
105
- if os.path.isfile(ALL_VARS_PKL_FILE):
106
- logging.info("Load the parameters from all_vars.pkl in the config of the Cellects folder")
107
- try: # NEW
108
- with open(ALL_VARS_PKL_FILE, 'rb') as fileopen: # NEW
109
- self.all = pickle.load(fileopen) # NEW
110
- self.vars = self.all['vars']
111
- self.update_data()
112
- logging.info("Success to load the parameters dictionaries from the Cellects folder")
113
- logging.info(os.getcwd())
114
- except Exception as exc: # NEW
115
- logging.error(f"Initialize default parameters because error: {exc}") # NEW
116
- default_dicts = DefaultDicts() # NEW
117
- self.all = default_dicts.all # NEW
118
- self.vars = default_dicts.vars # NEW
119
- else:
120
- logging.info("Initialize default parameters")
121
- default_dicts = DefaultDicts()
122
- self.all = default_dicts.all
123
- self.vars = default_dicts.vars
124
- if self.all['cores'] == 1:
125
- self.all['cores'] = os.cpu_count() - 1
126
-
127
- def analyze_without_gui(self):
128
- # Eventually load "all" dir before calling this function
129
- # self = po
130
- # if len(self.all['folder_list']) == 0:
131
- # folder_list = "/"
132
- # else:
133
- # folder_list = self.all['folder_list']
134
- # for exp_i, folder_name in enumerate(folder_list):
135
- # # exp_i = 0 ; folder_name = folder_list
136
-
137
- self=ProgramOrganizer()
138
- self.load_variable_dict()
139
- # dd = DefaultDicts()
140
- # self.all = dd.all
141
- # self.vars = dd.vars
142
- self.all['global_pathway'] = "/Users/Directory/Data/dossier1"
143
- self.all['first_folder_sample_number'] = 6
144
- # self.all['global_pathway'] = "D:\Directory\Data\Audrey\dosier1"
145
- # self.all['first_folder_sample_number'] = 6
146
- # self.all['radical'] = "IMG"
147
- # self.all['extension'] = ".jpg"
148
- # self.all['im_or_vid'] = 0
149
- self.look_for_data()
150
- self.load_data_to_run_cellects_quickly()
151
- if not self.first_exp_ready_to_run:
152
- self.get_first_image()
153
- self.fast_image_segmentation(True)
154
- # self.first_image.find_first_im_csc(sample_number=self.sample_number,
155
- # several_blob_per_arena=None,
156
- # spot_shape=None, spot_size=None,
157
- # kmeans_clust_nb=2,
158
- # biomask=None, backmask=None,
159
- # color_space_dictionaries=None,
160
- # carefully=True)
161
- self.cropping(is_first_image=True)
162
- self.get_average_pixel_size()
163
- self.delineate_each_arena()
164
- self.get_background_to_subtract()
165
- self.get_origins_and_backgrounds_lists()
166
- self.get_last_image()
167
- self.fast_image_segmentation(is_first_image=False)
168
- self.find_if_lighter_background()
169
- self.extract_exif()
170
- self.update_output_list()
171
- look_for_existing_videos = insensitive_glob('ind_' + '*' + '.npy')
172
- there_already_are_videos = len(look_for_existing_videos) == len(self.vars['analyzed_individuals'])
173
- logging.info(
174
- f"{len(look_for_existing_videos)} .npy video files found for {len(self.vars['analyzed_individuals'])} arenas to analyze")
175
- do_write_videos = not there_already_are_videos or (
176
- there_already_are_videos and self.all['overwrite_unaltered_videos'])
177
- if do_write_videos:
178
- self.videos = OneVideoPerBlob(self.first_image, self.starting_blob_hsize_in_pixels, self.all['raw_images'])
179
- self.videos.left = self.left
180
- self.videos.right = self.right
181
- self.videos.top = self.top
182
- self.videos.bot = self.bot
183
- self.videos.first_image.shape_number = self.sample_number
184
- self.videos.write_videos_as_np_arrays(
185
- self.data_list, self.vars['min_ram_free'], not self.vars['already_greyscale'], self.reduce_image_dim)
186
- self.instantiate_tables()
187
-
188
- i=1
189
- show_seg=True
190
-
191
- if os.path.isfile(f"coord_specimen{i + 1}_t720_y1475_x1477.npy"):
192
- binary_coord = np.load(f"coord_specimen{i + 1}_t720_y1475_x1477.npy")
193
- l = [i, i + 1, self.vars, False, False, show_seg, None]
194
- sav = self
195
- self = MotionAnalysis(l)
196
- self.binary = np.zeros((720, 1475, 1477), dtype=np.uint8)
197
- self.binary[binary_coord[0, :], binary_coord[1, :], binary_coord[2, :]] = 1
198
- else:
199
- l = [i, i + 1, self.vars, True, False, show_seg, None]
200
- sav = self
201
- self = MotionAnalysis(l)
202
- self.get_descriptors_from_binary()
203
- self.detect_growth_transitions()
204
- # self.networks_detection(show_seg)
205
- self.study_cytoscillations(show_seg)
206
-
207
- # for i, arena in enumerate(self.vars['analyzed_individuals']):
208
- # l = [i, i + 1, self.vars, True, False, False, None]
209
- # analysis_i = MotionAnalysis(l)
210
- # self.add_analysis_visualization_to_first_and_last_images(i, analysis_i.efficiency_test_1,
211
- # analysis_i.efficiency_test_2)
212
- # self.save_tables()
213
- #
214
- # self = MotionAnalysis(l)
215
- # l = [5, 6, self.vars, True, False, False, None]
216
- # sav=self
217
- # self.get_descriptors_from_binary()
218
- # self.detect_growth_transitions()
219
-
220
- def look_for_data(self):
221
- # global_pathway = 'I:\Directory\Tracking_data\generalization_and_potentiation\drop_nak1'
222
- os.chdir(Path(self.all['global_pathway']))
223
- logging.info(f"Dir: {self.all['global_pathway']}")
224
- self.data_list = insensitive_glob(
225
- self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
226
- self.data_list = insensitive_glob(self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
227
- self.all['folder_list'] = []
228
- self.all['folder_number'] = 1
229
- if len(self.data_list) > 0:
230
- lengths = vectorized_len(self.data_list)
231
- if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
232
- logging.error(f"File names present strong variations and cannot be correctly sorted.")
233
- self.data_list = natsort.natsorted(self.data_list)
234
- self.sample_number = self.all['first_folder_sample_number']
235
- else:
236
- content = os.listdir()
237
- for obj in content:
238
- if not os.path.isfile(obj):
239
- data_list = insensitive_glob(obj + "/" + self.all['radical'] + '*' + self.all['extension'])
240
- if len(data_list) > 0:
241
- self.all['folder_list'].append(obj)
242
- self.all['folder_number'] += 1
243
- self.all['folder_list'] = np.sort(self.all['folder_list'])
244
-
245
- if isinstance(self.all['sample_number_per_folder'], int) or len(self.all['sample_number_per_folder']) == 1:
246
- self.all['sample_number_per_folder'] = np.repeat(self.all['sample_number_per_folder'],
247
- self.all['folder_number'])
248
-
249
- def update_folder_id(self, sample_number, folder_name=""):
250
- os.chdir(Path(self.all['global_pathway']) / folder_name)
251
- self.data_list = insensitive_glob(
252
- self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
253
- # Sorting is necessary when some modifications (like rotation) modified the last modification date
254
- lengths = vectorized_len(self.data_list)
255
- if np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
256
- logging.error(f"File names present strong variations and cannot be correctly sorted.")
257
- self.data_list = natsort.natsorted(self.data_list)
258
- if self.all['im_or_vid'] == 1:
259
- self.sample_number = len(self.data_list)
260
- else:
261
- self.vars['img_number'] = len(self.data_list)
262
- self.sample_number = sample_number
263
- if len(self.vars['analyzed_individuals']) != sample_number:
264
- self.vars['analyzed_individuals'] = np.arange(sample_number) + 1
265
-
266
- def load_data_to_run_cellects_quickly(self):
267
- current_global_pathway = self.all['global_pathway']
268
- folder_number = self.all['folder_number']
269
- if folder_number > 1:
270
- folder_list = deepcopy(self.all['folder_list'])
271
- sample_number_per_folder = deepcopy(self.all['sample_number_per_folder'])
272
-
273
- if os.path.isfile('Data to run Cellects quickly.pkl'):
274
- pickle_rick = PickleRick()
275
- data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
276
- if data_to_run_cellects_quickly is None:
277
- data_to_run_cellects_quickly = {}
278
-
279
- # try:
280
- # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
281
- # data_to_run_cellects_quickly = pickle.load(fileopen)
282
- # except pickle.UnpicklingError:
283
- # logging.error("Pickle error")
284
- # data_to_run_cellects_quickly = {}
285
- if ('validated_shapes' in data_to_run_cellects_quickly) and ('coordinates' in data_to_run_cellects_quickly) and ('all' in data_to_run_cellects_quickly):
286
- logging.info("Success to load Data to run Cellects quickly.pkl from the user chosen directory")
287
- self.all = data_to_run_cellects_quickly['all']
288
- # If you want to add a new variable, first run an updated version of all_vars_dict,
289
- # then put a breakpoint here and run the following + self.save_data_to_run_cellects_quickly() :
290
- # self.all['vars']['lose_accuracy_to_save_memory'] = False
291
- self.vars = self.all['vars']
292
- self.update_data()
293
- print(self.vars['convert_for_motion'])
294
- folder_changed = False
295
- if current_global_pathway != self.all['global_pathway']:
296
- folder_changed = True
297
- logging.info(
298
- "Although the folder is ready, it is not at the same place as it was during creation, updating")
299
- self.all['global_pathway'] = current_global_pathway
300
- if folder_number > 1:
301
- self.all['global_pathway'] = current_global_pathway
302
- self.all['folder_list'] = folder_list
303
- self.all['folder_number'] = folder_number
304
- self.all['sample_number_per_folder'] = sample_number_per_folder
305
-
306
- if len(self.data_list) == 0:
307
- self.look_for_data()
308
- if folder_changed and folder_number > 1 and len(self.all['folder_list']) > 0:
309
- self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
310
- self.get_first_image()
311
- self.get_last_image()
312
- (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = data_to_run_cellects_quickly[
313
- 'coordinates']
314
- if self.all['automatically_crop']:
315
- self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
316
- logging.info("Crop first image")
317
- self.first_image.automatically_crop(self.first_image.crop_coord)
318
- logging.info("Crop last image")
319
- self.last_image.automatically_crop(self.first_image.crop_coord)
320
- else:
321
- self.first_image.crop_coord = None
322
- # self.cropping(True)
323
- # self.cropping(False)
324
- self.first_image.validated_shapes = data_to_run_cellects_quickly['validated_shapes']
325
- self.first_image.im_combinations = []
326
- self.current_combination_id = 0
327
- self.first_image.im_combinations.append({})
328
- self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
329
- self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
330
- self.first_image.im_combinations[self.current_combination_id]['shape_number'] = data_to_run_cellects_quickly['shape_number']
331
-
332
- self.first_exp_ready_to_run = True
333
- print(f"Overwrite is {self.all['overwrite_unaltered_videos']}")
334
- if self.vars['subtract_background'] and len(self.vars['background_list']) == 0:
335
- self.first_exp_ready_to_run = False
336
- else:
337
- self.first_exp_ready_to_run = False
338
- else:
339
- self.first_exp_ready_to_run = False
340
- if self.first_exp_ready_to_run:
341
- logging.info("The current (or the first) folder is ready to run")
342
- else:
343
- logging.info("The current (or the first) folder is not ready to run")
344
-
345
- def update_data(self):
346
- dd = DefaultDicts()
347
- all = len(dd.all) != len(self.all)
348
- vars = len(dd.vars) != len(self.vars)
349
- all_desc = len(dd.all['descriptors']) != len(self.all['descriptors'])
350
- vars_desc = len(dd.vars['descriptors']) != len(self.vars['descriptors'])
351
- if all:
352
- for key, val in dd.all.items():
353
- if not key in self.all:
354
- self.all[key] = val
355
- if vars:
356
- for key, val in dd.vars.items():
357
- if not key in self.vars:
358
- self.vars[key] = val
359
- if all_desc:
360
- for key, val in dd.all['descriptors'].items():
361
- if not key in self.all['descriptors']:
362
- self.all['descriptors'][key] = val
363
- if vars_desc:
364
- for key, val in dd.vars['descriptors'].items():
365
- if not key in self.vars['descriptors']:
366
- self.vars['descriptors'][key] = val
367
-
368
- def save_data_to_run_cellects_quickly(self, new_one_if_does_not_exist=True):
369
- data_to_run_cellects_quickly = None
370
- if os.path.isfile('Data to run Cellects quickly.pkl'):
371
- logging.info("Update -Data to run Cellects quickly.pkl- in the user chosen directory")
372
- pickle_rick = PickleRick()
373
- data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
374
- if data_to_run_cellects_quickly is None:
375
- logging.error("Failed to load Data to run Cellects quickly.pkl before update. Abort saving.")
376
-
377
- # try:
378
- # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
379
- # data_to_run_cellects_quickly = pickle.load(fileopen)
380
- # except pickle.UnpicklingError:
381
- # logging.error("Pickle error")
382
- # data_to_run_cellects_quickly = {}
383
- else:
384
- if new_one_if_does_not_exist:
385
- logging.info("Create Data to run Cellects quickly.pkl in the user chosen directory")
386
- data_to_run_cellects_quickly = {}
387
- if data_to_run_cellects_quickly is not None:
388
- if self.data_to_save['first_image']:
389
- data_to_run_cellects_quickly['validated_shapes'] = self.first_image.im_combinations[self.current_combination_id]['binary_image']
390
- data_to_run_cellects_quickly['shape_number'] = self.first_image.im_combinations[self.current_combination_id]['shape_number']
391
- # data_to_run_cellects_quickly['converted_image'] = self.first_image.im_combinations[self.current_combination_id]['converted_image']
392
- if self.data_to_save['coordinates']:
393
- data_to_run_cellects_quickly['coordinates'] = self.list_coordinates()
394
- logging.info("When they exist, do overwrite unaltered video")
395
- self.all['overwrite_unaltered_videos'] = True
396
- if self.data_to_save['exif']:
397
- self.vars['exif'] = self.extract_exif()
398
- # data_to_run_cellects_quickly['exif'] = self.extract_exif()
399
- # if self.data_to_save['background_and_origin_list']:
400
- # logging.info(f"Origin shape is {self.vars['origin_list'][0].shape}")
401
- # data_to_run_cellects_quickly['background_and_origin_list'] = [self.vars['origin_list'], self.vars['background_list'], self.vars['background_list2']]
402
- self.all['vars'] = self.vars
403
- print(self.vars['convert_for_motion'])
404
- data_to_run_cellects_quickly['all'] = self.all
405
- # data_to_run_cellects_quickly['all']['vars']['origin_state'] = "fluctuating"
406
- pickle_rick = PickleRick()
407
- pickle_rick.write_file(data_to_run_cellects_quickly, 'Data to run Cellects quickly.pkl')
408
-
409
- def list_coordinates(self):
410
- if self.first_image.crop_coord is None:
411
- self.first_image.crop_coord = [0, self.first_image.image.shape[0], 0,
412
- self.first_image.image.shape[1]]
413
- videos_coordinates = self.first_image.crop_coord + [self.left, self.right, self.top, self.bot]
414
- return videos_coordinates
415
-
416
- def extract_exif(self):
417
- if self.all['im_or_vid'] == 1:
418
- timings = np.arange(self.vars['dims'][0])
419
- else:
420
- if sys.platform.startswith('win'):
421
- pathway = os.getcwd() + '\\'
422
- else:
423
- pathway = os.getcwd() + '/'
424
- arbitrary_time_step: bool = True
425
- if self.all['extract_time_interval']:
426
- self.vars['time_step'] = 1
427
- try:
428
- timings = extract_time(self.data_list, pathway, self.all['raw_images'])
429
- timings = timings - timings[0]
430
- timings = timings / 60
431
- time_step = np.mean(np.diff(timings))
432
- digit_nb = 0
433
- for i in str(time_step):
434
- if i in {'.'}:
435
- pass
436
- elif i in {'0'}:
437
- digit_nb += 1
438
- else:
439
- break
440
- self.vars['time_step'] = np.round(time_step, digit_nb + 1)
441
- arbitrary_time_step = False
442
- except:
443
- pass
444
- if arbitrary_time_step:
445
- timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
446
- timings = timings - timings[0]
447
- timings = timings / 60
448
- return timings
449
-
450
- #
451
- # if not os.path.isfile("timings.csv") or self.all['overwrite_cellects_data']:
452
- # if self.vars['time_step'] == 0:
453
- # if self.all['im_or_vid'] == 1:
454
- # savetxt("timings.csv", np.arange(self.vars['dims'][0]), fmt='%10d', delimiter=',')
455
- # else:
456
- # if sys.platform.startswith('win'):
457
- # pathway = os.getcwd() + '\\'
458
- # else:
459
- # pathway = os.getcwd() + '/'
460
- # timings = extract_time(self.data_list, pathway, self.all['raw_images'])
461
- # timings = timings - timings[0]
462
- # timings = timings / 60
463
- # else:
464
- # timings = np.arange(0, self.vars['dims'][0] * self.vars['time_step'], self.vars['time_step'])
465
- # savetxt("timings.csv", timings, fmt='%1.2f', delimiter=',')
466
-
467
- def get_first_image(self):
468
- logging.info("Load first image")
469
- just_read_image = self.first_im is not None
470
- self.reduce_image_dim = False
471
- # just_read_image = self.analysis_instance is not None
472
- if self.all['im_or_vid'] == 1:
473
- cap = cv2.VideoCapture(self.data_list[0])
474
- counter = 0
475
- if not just_read_image:
476
- self.sample_number = len(self.data_list)
477
- self.vars['img_number'] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
478
- self.analysis_instance = np.zeros(
479
- [int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
480
- int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 3])
481
- while cap.isOpened() and counter < 1:
482
- ret, frame = cap.read()
483
- if counter == 0:
484
- self.first_im = frame
485
- self.analysis_instance[0, ...] = self.first_im
486
- break
487
- cap.release()
488
- elif np.sum(self.analysis_instance[self.all['first_detection_frame'] - 1, ...] == 0):
489
- cap = cv2.VideoCapture(self.data_list[0])
490
- counter = 0
491
- while cap.isOpened() and (counter < self.all['first_detection_frame']):
492
- ret, frame = cap.read()
493
- # if self.reduce_image_dim:
494
- # frame = frame[:, :, 0]
495
- self.analysis_instance[counter, ...] = frame
496
- counter += 1
497
-
498
- cap.release()
499
- self.first_im = self.analysis_instance[
500
- self.all['first_detection_frame'] - 1, ...]
501
- self.vars['dims'] = self.analysis_instance.shape[:3]
502
-
503
- else:
504
- self.vars['img_number'] = len(self.data_list)
505
- self.all['raw_images'] = is_raw_image(self.data_list[0])
506
- self.first_im = readim(self.data_list[self.all['first_detection_frame'] - 1], self.all['raw_images'])
507
- # if self.reduce_image_dim:
508
- # self.first_im = self.first_im[:, :, 0]
509
- self.vars['dims'] = [self.vars['img_number'], self.first_im.shape[0], self.first_im.shape[1]]
510
- # self.first_im = readim(self.data_list[0], self.all['raw_images'])
511
- if len(self.first_im.shape) == 3:
512
- if np.all(np.equal(self.first_im[:, :, 0], self.first_im[:, :, 1])) and np.all(
513
- np.equal(self.first_im[:, :, 1], self.first_im[:, :, 2])):
514
- self.reduce_image_dim = True
515
- if self.reduce_image_dim:
516
- self.first_im = self.first_im[:, :, 0]
517
- if self.all['im_or_vid'] == 1:
518
- self.analysis_instance = self.analysis_instance[:, :, :, 0]
519
- self.first_image = OneImageAnalysis(self.first_im)
520
- self.vars['already_greyscale'] = self.first_image.already_greyscale
521
- if self.vars['already_greyscale']:
522
- self.vars["convert_for_origin"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
523
- self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
524
- if np.mean((np.mean(self.first_image.image[2, :, ...]), np.mean(self.first_image.image[-3, :, ...]), np.mean(self.first_image.image[:, 2, ...]), np.mean(self.first_image.image[:, -3, ...]))) > 127:
525
- self.vars['contour_color']: np.uint8 = 0
526
- else:
527
- self.vars['contour_color']: np.uint8 = 255
528
- if self.all['first_detection_frame'] > 1:
529
- self.vars['origin_state'] = 'invisible'
530
-
531
- def get_last_image(self):
532
- logging.info("Load last image")
533
- if self.all['im_or_vid'] == 1:
534
- cap = cv2.VideoCapture(self.data_list[0])
535
- counter = 0
536
- while cap.isOpened() and counter < self.vars['img_number']:
537
- ret, frame = cap.read()
538
- if self.reduce_image_dim:
539
- frame = frame[:, :, 0]
540
- self.analysis_instance[-1, ...] = frame
541
- # if counter == self.vars['img_number'] - 1:
542
- # if self.reduce_image_dim:
543
- # frame = frame[:, :, 0]
544
- # break
545
- counter += 1
546
- self.last_im = frame
547
- cap.release()
548
- else:
549
- # self.last_im = readim(self.data_list[-1], self.all['raw_images'])
550
- is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
551
- self.last_im = read_and_rotate(self.data_list[-1], self.first_im, self.all['raw_images'], is_landscape)
552
- if self.reduce_image_dim:
553
- self.last_im = self.last_im[:, :, 0]
554
- self.last_image = OneImageAnalysis(self.last_im)
555
-
556
- # self.message_when_thread_finished.emit("")
557
- def fast_image_segmentation(self, is_first_image, biomask=None, backmask=None, spot_size=None):
558
- if is_first_image:
559
- self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
560
- self.all["bio_mask"], self.all["back_mask"], subtract_background=None,
561
- subtract_background2=None, grid_segmentation=False,
562
- filter_spec=self.vars["filter_spec"])
563
- if not self.first_image.drift_correction_already_adjusted:
564
- self.vars['drift_already_corrected'] = self.first_image.check_if_image_border_attest_drift_correction()
565
- if self.vars['drift_already_corrected']:
566
- logging.info("Cellects detected that the images have already been corrected for drift")
567
- self.first_image.adjust_to_drift_correction(self.vars['convert_for_origin']['logical'])
568
- if self.vars["grid_segmentation"]:
569
- self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
570
- self.all["bio_mask"], self.all["back_mask"],
571
- subtract_background=None, subtract_background2=None,
572
- grid_segmentation=True,
573
- filter_spec=self.vars["filter_spec"])
574
-
575
- self.first_image.set_spot_shapes_and_size_confint(self.all['starting_blob_shape'])
576
- logging.info(self.sample_number)
577
- process_i = ProcessFirstImage(
578
- [self.first_image, False, False, None, self.vars['several_blob_per_arena'],
579
- self.sample_number, spot_size, self.vars["color_number"], self.all["bio_mask"], self.all["back_mask"], None])
580
- process_i.binary_image = self.first_image.binary_image
581
- process_i.process_binary_image(use_bio_and_back_masks=True)
582
-
583
- if self.all["back_mask"] is not None:
584
- if np.any(process_i.shapes[self.all["back_mask"]]):
585
- process_i.shapes[np.isin(process_i.shapes, np.unique(process_i.shapes[self.all["back_mask"]]))] = 0
586
- process_i.validated_shapes = (process_i.shapes > 0).astype(np.uint8)
587
- if self.all["bio_mask"] is not None:
588
- process_i.validated_shapes[self.all["bio_mask"]] = 1
589
- if self.all["back_mask"] is not None or self.all["bio_mask"] is not None:
590
- process_i.shape_number, process_i.shapes = cv2.connectedComponents(process_i.validated_shapes, connectivity=8)
591
- process_i.shape_number -= 1
592
-
593
- self.first_image.validated_shapes = process_i.validated_shapes
594
- self.first_image.shape_number = process_i.shape_number
595
- if self.first_image.im_combinations is None:
596
- self.first_image.im_combinations = []
597
- self.first_image.im_combinations.append({})
598
- self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
599
- self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
600
- self.first_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.first_image.image).astype(np.uint8)
601
- self.first_image.im_combinations[self.current_combination_id]['shape_number'] = process_i.shape_number
602
- # self.first_image.generate_color_space_combination(self.vars['convert_for_origin'], subtract_background)
603
- else:
604
- # self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
605
- # if self.vars['drift_already_corrected']:
606
- # drift_correction, drift_correction2 = self.last_image.adjust_to_drift_correction()
607
- # self.last_image.segmentation(self.vars['convert_for_motion']['logical'], self.vars['color_number'])
608
- self.cropping(is_first_image=False)
609
- print(self.vars["filter_spec"])
610
- self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
611
- biomask, backmask, self.first_image.subtract_background,
612
- self.first_image.subtract_background2,
613
- grid_segmentation=self.vars["grid_segmentation"],
614
- filter_spec=self.vars["filter_spec"])
615
- if self.vars['drift_already_corrected'] and not self.last_image.drift_correction_already_adjusted and not self.vars["grid_segmentation"]:
616
- self.last_image.adjust_to_drift_correction(self.vars['convert_for_motion']['logical'])
617
-
618
- if self.last_image.im_combinations is None:
619
- self.last_image.im_combinations = []
620
- self.last_image.im_combinations.append({})
621
- self.last_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_motion']
622
- self.last_image.im_combinations[self.current_combination_id]['binary_image'] = self.last_image.binary_image
623
- self.last_image.im_combinations[self.current_combination_id]['converted_image'] = np.round(self.last_image.image).astype(np.uint8)
624
-
625
- # self.last_image.generate_color_space_combination(self.vars['convert_for_motion'], subtract_background)
626
- # if self.all["more_than_two_colors"]:
627
- # self.last_image.kmeans(self.vars["color_number"])
628
- # else:
629
- # self.last_image.thresholding()
630
- # if self.all['are_gravity_centers_moving'] != 1:
631
- # self.delineate_each_arena()
632
-
633
- def cropping(self, is_first_image):
634
- if not self.vars['drift_already_corrected']:
635
- if is_first_image:
636
- if not self.first_image.cropped:
637
- if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
638
- pickle_rick = PickleRick()
639
- data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
640
- if data_to_run_cellects_quickly is not None:
641
- if 'coordinates' in data_to_run_cellects_quickly:
642
- logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
643
- (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
644
- data_to_run_cellects_quickly['coordinates']
645
- self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
646
- else:
647
- self.first_image.get_crop_coordinates()
648
- else:
649
- self.first_image.get_crop_coordinates()
650
-
651
-
652
- # try:
653
- # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
654
- # data_to_run_cellects_quickly = pickle.load(fileopen)
655
- # if 'coordinates' in data_to_run_cellects_quickly:
656
- # logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
657
- # (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
658
- # data_to_run_cellects_quickly['coordinates']
659
- # self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
660
- # else:
661
- # self.first_image.get_crop_coordinates()
662
- # except pickle.UnpicklingError:
663
- # logging.error("Pickle error")
664
- # self.first_image.get_crop_coordinates()
665
-
666
-
667
- # if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
668
- # with open('coordinates.pkl', 'rb') as fileopen:
669
- # (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top,
670
- # self.videos.bot) = pickle.load(fileopen)
671
- else:
672
- self.first_image.get_crop_coordinates()
673
- if self.all['automatically_crop']:
674
- self.first_image.automatically_crop(self.first_image.crop_coord)
675
- else:
676
- self.first_image.crop_coord = None
677
- else:
678
- if not self.last_image.cropped and self.all['automatically_crop']:
679
- self.last_image.automatically_crop(self.first_image.crop_coord)
680
- # if self.all['automatically_crop'] and not self.vars['drift_already_corrected']:
681
- # if is_first_image:
682
- # self.first_image.get_crop_coordinates()
683
- # self.first_image.automatically_crop(self.first_image.crop_coord)
684
- # else:
685
- # self.last_image.automatically_crop(self.first_image.crop_coord)
686
-
687
- def get_average_pixel_size(self):
688
- logging.info("Get average pixel size")
689
- (self.first_image.shape_number,
690
- self.first_image.shapes,
691
- self.first_image.stats,
692
- centroids) = cv2.connectedComponentsWithStats(
693
- self.first_image.validated_shapes,
694
- connectivity=8)
695
- self.first_image.shape_number -= 1
696
- if self.all['scale_with_image_or_cells'] == 0:
697
- self.vars['average_pixel_size'] = np.square(
698
- self.all['image_horizontal_size_in_mm'] /
699
- self.first_im.shape[1])
700
- else:
701
- self.vars['average_pixel_size'] = np.square(
702
- self.all['starting_blob_hsize_in_mm'] /
703
- np.mean(self.first_image.stats[1:, 2]))
704
- if self.all['set_spot_size']:
705
- self.starting_blob_hsize_in_pixels = (
706
- self.all['starting_blob_hsize_in_mm'] /
707
- np.sqrt(self.vars['average_pixel_size']))
708
- else:
709
- self.starting_blob_hsize_in_pixels = None
710
-
711
- if self.all['automatic_size_thresholding']:
712
- self.vars['first_move_threshold'] = 10
713
- else:
714
- # if self.vars['origin_state'] != "invisible":
715
- self.vars['first_move_threshold'] = np.round(
716
- self.all['first_move_threshold_in_mm²'] /
717
- self.vars['average_pixel_size']).astype(np.uint8)
718
- logging.info(f"The average pixel size is: {self.vars['average_pixel_size']} mm²")
719
-
720
- def delineate_each_arena(self):
721
- self.videos = OneVideoPerBlob(
722
- self.first_image,
723
- self.starting_blob_hsize_in_pixels,
724
- self.all['raw_images'])
725
- # self.delineation_number += 1
726
- # if self.delineation_number > 1:
727
- # print('stophere')
728
- analysis_status = {"continue": True, "message": ""}
729
- if (self.sample_number > 1 and not self.vars['several_blob_per_arena']):
730
- compute_get_bb: bool = True
731
- if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
732
-
733
- pickle_rick = PickleRick()
734
- data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
735
- if data_to_run_cellects_quickly is not None:
736
- if 'coordinates' in data_to_run_cellects_quickly:
737
- (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
738
- data_to_run_cellects_quickly['coordinates']
739
- self.videos.left, self.videos.right, self.videos.top, self.videos.bot = self.left, self.right, self.top, self.bot
740
- self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
741
- if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
742
- self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
743
- logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
744
- compute_get_bb = False
745
-
746
-
747
- # try:
748
- # with open('Data to run Cellects quickly.pkl', 'rb') as fileopen:
749
- # data_to_run_cellects_quickly = pickle.load(fileopen)
750
- # except pickle.UnpicklingError:
751
- # logging.error("Pickle error")
752
- # data_to_run_cellects_quickly = {}
753
- # if 'coordinates' in data_to_run_cellects_quickly:
754
- # (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
755
- # data_to_run_cellects_quickly['coordinates']
756
- # self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
757
- # if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
758
- # self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
759
- # logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
760
- # compute_get_bb = False
761
-
762
- # if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('coordinates.pkl')):
763
- # with open('coordinates.pkl', 'rb') as fileopen:
764
- # (ccy1, ccy2, ccx1, ccx2, self.videos.left, self.videos.right, self.videos.top, self.videos.bot) = pickle.load(fileopen)
765
-
766
- # if (not self.all['overwrite_unaltered_videos'] and
767
- # os.path.isfile('coordinates.pkl')):
768
- # with open('coordinates.pkl', 'rb') as fileopen:
769
- # (vertical_shape, horizontal_shape, self.videos.left, self.videos.right, self.videos.top,
770
- # self.videos.bot) = pickle.load(fileopen)
771
-
772
- if compute_get_bb:
773
- if self.all['im_or_vid'] == 1:
774
- self.videos.get_bounding_boxes(
775
- are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
776
- img_list=self.analysis_instance,
777
- color_space_combination=self.vars['convert_for_origin'],#self.vars['convert_for_motion']
778
- color_number=self.vars["color_number"],
779
- sample_size=5,
780
- all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'],
781
- filter_spec=self.vars['filter_spec'])
782
- else:
783
- self.videos.get_bounding_boxes(
784
- are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
785
- img_list=self.data_list,
786
- color_space_combination=self.vars['convert_for_origin'],
787
- color_number=self.vars["color_number"],
788
- sample_size=5,
789
- all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'],
790
- filter_type=self.vars['filter_spec'])
791
- if np.any(self.videos.ordered_stats[:, 4] > 100 * np.median(self.videos.ordered_stats[:, 4])):
792
- analysis_status['message'] = "A specimen is at least 100 times larger: (re)do the first image analysis."
793
- analysis_status['continue'] = False
794
- if np.any(self.videos.ordered_stats[:, 4] < 0.01 * np.median(self.videos.ordered_stats[:, 4])):
795
- analysis_status['message'] = "A specimen is at least 100 times smaller: (re)do the first image analysis."
796
- analysis_status['continue'] = False
797
- logging.info(
798
- str(self.videos.not_analyzed_individuals) + " individuals are out of picture scope and cannot be analyzed")
799
- self.left, self.right, self.top, self.bot = self.videos.left, self.videos.right, self.videos.top, self.videos.bot
800
-
801
- else:
802
- self.left, self.right, self.top, self.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
803
- self.videos.left, self.videos.right, self.videos.top, self.videos.bot = np.array([1]), np.array([self.first_image.image.shape[1] - 2]), np.array([1]), np.array([self.first_image.image.shape[0] - 2])
804
-
805
- self.vars['analyzed_individuals'] = np.arange(self.sample_number) + 1
806
- if self.videos.not_analyzed_individuals is not None:
807
- self.vars['analyzed_individuals'] = np.delete(self.vars['analyzed_individuals'],
808
- self.videos.not_analyzed_individuals - 1)
809
- # logging.info(self.top)
810
- return analysis_status
811
-
812
- def get_background_to_subtract(self):
813
- """
814
- Repenser le moment où ça arrive et trouver pourquoi ça marche pas
815
- """
816
- # self.vars['subtract_background'] = False
817
- if self.vars['subtract_background']:
818
- self.first_image.generate_subtract_background(self.vars['convert_for_motion'])
819
-
820
- def get_origins_and_backgrounds_lists(self):
821
- logging.info("Create origins and background lists")
822
- if self.top is None:
823
- # self.top = [1]
824
- # self.bot = [self.first_im.shape[0] - 2]
825
- # self.left = [1]
826
- # self.right = [self.first_im.shape[1] - 2]
827
- self.top = np.array([1])
828
- self.bot = np.array([self.first_im.shape[0] - 2])
829
- self.left = np.array([1])
830
- self.right = np.array([self.first_im.shape[1] - 2])
831
-
832
- add_to_c = 1
833
- first_im = self.first_image.validated_shapes
834
- self.vars['origin_list'] = []
835
- self.vars['background_list'] = []
836
- self.vars['background_list2'] = []
837
- for rep in np.arange(len(self.vars['analyzed_individuals'])):
838
- self.vars['origin_list'].append(first_im[self.top[rep]:(self.bot[rep] + add_to_c),
839
- self.left[rep]:(self.right[rep] + add_to_c)])
840
- if self.vars['subtract_background']:
841
- self.vars['background_list'].append(
842
- self.first_image.subtract_background[self.top[rep]:(self.bot[rep] + add_to_c),
843
- self.left[rep]:(self.right[rep] + add_to_c)])
844
- if self.vars['convert_for_motion']['logical'] != 'None':
845
- self.vars['background_list2'].append(
846
- self.first_image.subtract_background2[self.top[rep]:(self.bot[rep] + add_to_c),
847
- self.left[rep]:(self.right[rep] + add_to_c)])
848
-
849
- def get_origins_and_backgrounds_one_by_one(self):
850
- add_to_c = 1
851
- self.vars['origin_list'] = []
852
- self.vars['background_list'] = []
853
- self.vars['background_list2'] = []
854
-
855
- for arena in np.arange(len(self.vars['analyzed_individuals'])):
856
- bgr_image = self.first_image.bgr[self.top[arena]:(self.bot[arena] + add_to_c),
857
- self.left[arena]:(self.right[arena] + add_to_c), ...]
858
- image = OneImageAnalysis(bgr_image)
859
- if self.vars['subtract_background']:
860
- image.generate_subtract_background(self.vars['convert_for_motion'])
861
- self.vars['background_list'].append(image.image)
862
- if self.vars['convert_for_motion']['logical'] != 'None':
863
- self.vars['background_list2'].append(image.image2)
864
-
865
- # self.vars['origins_list'].append(self.first_image.validated_shapes[self.top[arena]:(self.bot[arena]),
866
- # self.left[arena]:(self.right[arena])])
867
- #
868
- if self.vars['several_blob_per_arena']:
869
- image.validated_shapes = image.binary_image
870
- else:
871
- image.get_largest_shape()
872
-
873
- self.vars['origin_list'].append(image.validated_shapes)
874
-
875
- def choose_color_space_combination(self):
876
- # self = po
877
- # 2) Represent the segmentation using a particular color space combination
878
- if self.all['are_gravity_centers_moving'] != 1:
879
- analysis_status = self.delineate_each_arena()
880
- # self.fi.automatically_crop(self.first_image.crop_coord)
881
- self.last_image = OneImageAnalysis(self.last_im)
882
- self.last_image.automatically_crop(self.videos.first_image.crop_coord)
883
- # csc = ColorSpaceCombination(self.last_image.image)
884
-
885
- concomp_nb = [self.sample_number, self.sample_number * 50]
886
- if self.all['are_zigzag'] == "columns":
887
- inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
888
- elif self.all['are_zigzag'] == "rows":
889
- inter_dist = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
890
- else:
891
- dist1 = np.mean(np.diff(np.nonzero(self.videos.first_image.y_boundaries)))
892
- dist2 = np.mean(np.diff(np.nonzero(self.videos.first_image.x_boundaries)))
893
- inter_dist = np.max(dist1, dist2)
894
- if self.all['starting_blob_shape'] == "circle":
895
- max_shape_size = np.pi * np.square(inter_dist)
896
- else:
897
- max_shape_size = np.square(2 * inter_dist)
898
- total_surfarea = max_shape_size * self.sample_number
899
- if self.all['are_gravity_centers_moving'] != 1:
900
- out_of_arenas = np.ones_like(self.videos.first_image.validated_shapes)
901
- for blob_i in np.arange(len(self.vars['analyzed_individuals'])):
902
- out_of_arenas[self.top[blob_i]: (self.bot[blob_i] + 1),
903
- self.left[blob_i]: (self.right[blob_i] + 1)] = 0
904
- else:
905
- out_of_arenas = None
906
- ref_image = self.videos.first_image.validated_shapes
907
- self.last_image.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
908
- # csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image)
909
- # csc.find_potential_channels(concomp_nb, total_surfarea, max_shape_size, out_of_arenas, ref_image, self.first_image.subtract_background)
910
- self.vars['convert_for_motion'] = self.last_image.channel_combination
911
-
912
- self.fast_image_segmentation(False)
913
- # if self.vars['subtract_background']:
914
- # csc = ColorSpaceCombination(self.last_image.image)
915
- # csc.generate(self.vars['convert_for_motion'], self.first_image.subtract_background)
916
- # if self.all["more_than_two_colors"]:
917
- # csc.kmeans(self.vars["color_number"])
918
- # else:
919
- # csc.thresholding()
920
- # self.last_image.image = csc.image
921
- # self.last_image.binary_image = csc.binary_image
922
-
923
- def untype_csc_dict(self):
924
- new_convert_for_origin = {}
925
- for k, v in self.vars['convert_for_origin'].items():
926
- new_convert_for_origin[k] = v
927
- if self.vars['logical_between_csc_for_origin'] is not None:
928
- new_convert_for_origin['logical'] = self.vars['logical_between_csc_for_origin']
929
- for k, v in self.vars['convert_for_origin2'].items():
930
- new_convert_for_origin[k] = v
931
- self.vars['convert_for_origin'] = new_convert_for_origin
932
- self.vars['convert_for_origin2'] = {}
933
-
934
- new_convert_for_motion = {}
935
- for k, v in self.vars['convert_for_motion'].items():
936
- new_convert_for_motion[k] = v
937
- if self.vars['convert_for_motion']['logical'] != 'None':
938
- new_convert_for_motion['logical'] = self.vars['convert_for_motion']['logical']
939
- for k, v in self.vars['convert_for_motion2'].items():
940
- new_convert_for_motion[k] = v
941
- self.vars['convert_for_motion'] = new_convert_for_motion
942
- self.vars['convert_for_motion2'] = {}
943
-
944
- def type_csc_dict(self):
945
- # self.vars['convert_for_motion']['logical'] = 'And'
946
- # self.vars['convert_for_motion']['hsv'] = np.array((0, 0, 1))
947
- # self.vars['convert_for_motion']['logical'] = 'And'
948
- # self.vars['convert_for_motion']['lab2'] = np.array((0, 0, 1))
949
-
950
- new_convert_for_origin = TDict()
951
- self.vars['convert_for_origin2'] = TDict()
952
- self.vars['logical_between_csc_for_origin'] = None
953
- for k, v in self.vars['convert_for_origin'].items():
954
- if k != 'logical' and v.sum() > 0:
955
- if k[-1] != '2':
956
- new_convert_for_origin[k] = v
957
- else:
958
- self.vars['convert_for_origin2'][k[:-1]] = v
959
- else:
960
- self.vars['logical_between_csc_for_origin'] = v
961
- self.vars['convert_for_origin'] = new_convert_for_origin
962
-
963
- new_convert_for_motion = TDict()
964
- self.vars['convert_for_motion2'] = TDict()
965
- self.vars['convert_for_motion']['logical'] = None
966
- for k, v in self.vars['convert_for_motion'].items():
967
- if k != 'logical' and v.sum() > 0:
968
- if k[-1] != '2':
969
- new_convert_for_motion[k] = v
970
- else:
971
- self.vars['convert_for_motion2'][k[:-1]] = v
972
- else:
973
- self.vars['convert_for_motion']['logical'] = v
974
- self.vars['convert_for_motion'] = new_convert_for_motion
975
-
976
- if self.vars['color_number'] > 2:
977
- self.vars['bio_label'] = None#self.first_image.bio_label
978
- if self.vars['convert_for_motion']['logical'] != 'None':
979
- self.vars['bio_label2'] = None
980
-
981
- def find_if_lighter_background(self):
982
- logging.info("Find if the background is lighter or darker than the cells")
983
- self.vars['lighter_background']: bool = True
984
- self.vars['contour_color']: np.uint8 = 0
985
- are_dicts_equal: bool = True
986
- for key in self.vars['convert_for_origin'].keys():
987
- are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_motion'] and self.vars['convert_for_origin'][key] == self.vars['convert_for_motion'][key])
988
- for key in self.vars['convert_for_motion'].keys():
989
- are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_origin'] and self.vars['convert_for_motion'][key] == self.vars['convert_for_origin'][key])
990
-
991
- if are_dicts_equal:
992
-
993
- if self.first_im is None:
994
- self.get_first_image()
995
- self.fast_image_segmentation(True)
996
- self.cropping(is_first_image=True)
997
- among = np.nonzero(self.first_image.validated_shapes)
998
- not_among = np.nonzero(1 - self.first_image.validated_shapes)
999
- # Use the converted image to tell if the background is lighter, for analysis purposes
1000
- if self.first_image.image[among[0], among[1]].mean() > self.first_image.image[not_among[0], not_among[1]].mean():
1001
- self.vars['lighter_background'] = False
1002
- # Use the original image to tell if the background is lighter, for display purposes
1003
- if self.first_image.bgr[among[0], among[1], ...].mean() > self.first_image.bgr[not_among[0], not_among[1], ...].mean():
1004
- self.vars['contour_color'] = 255
1005
- else:
1006
- if self.last_im is None:
1007
- self.get_last_image()
1008
- # self.cropping(is_first_image=False)
1009
- self.fast_image_segmentation(is_first_image=False)
1010
- if self.last_image.binary_image.sum() == 0:
1011
- self.fast_image_segmentation(is_first_image=False)
1012
- among = np.nonzero(self.last_image.binary_image)
1013
- not_among = np.nonzero(1 - self.last_image.binary_image)
1014
- # Use the converted image to tell if the background is lighter, for analysis purposes
1015
- if self.last_image.image[among[0], among[1]].mean() > self.last_image.image[not_among[0], not_among[1]].mean():
1016
- self.vars['lighter_background'] = False
1017
- # Use the original image to tell if the background is lighter, for display purposes
1018
- if self.last_image.bgr[among[0], among[1], ...].mean() > self.last_image.bgr[not_among[0], not_among[1], ...].mean():
1019
- self.vars['contour_color'] = 255
1020
- if self.vars['origin_state'] == "invisible":
1021
- binary_image = deepcopy(self.first_image.binary_image)
1022
- self.first_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
1023
- None, None, subtract_background=None,
1024
- subtract_background2=None,
1025
- grid_segmentation=self.vars["grid_segmentation"],
1026
- filter_spec=self.vars["filter_spec"])
1027
- covered_values = self.first_image.image[np.nonzero(binary_image)]
1028
- if self.vars['lighter_background']:
1029
- if np.max(covered_values) < 255:
1030
- self.vars['luminosity_threshold'] = np.max(covered_values) + 1
1031
- else:
1032
- self.vars['luminosity_threshold'] = 127
1033
- else:
1034
- if np.min(covered_values) > 0:
1035
- self.vars['luminosity_threshold'] = np.min(covered_values) - 1
1036
- else:
1037
- self.vars['luminosity_threshold'] = 127
1038
-
1039
- def load_one_arena(self, arena):
1040
- #self.delineate_each_arena()
1041
- #self.choose_color_space_combination()
1042
- add_to_c = 1
1043
- self.one_arena_done = True
1044
- i = np.nonzero(self.vars['analyzed_individuals'] == arena)[0][0]
1045
- self.converted_video = np.zeros(
1046
- (len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c),
1047
- dtype=float)
1048
- if not self.vars['already_greyscale']:
1049
- self.visu = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c, 3), dtype=np.uint8)
1050
- if self.vars['convert_for_motion']['logical'] != 'None':
1051
- self.converted_video2 = np.zeros((len(self.data_list), self.bot[i] - self.top[i] + add_to_c, self.right[i] - self.left[i] + add_to_c), dtype=float)
1052
- first_dict = TDict()
1053
- second_dict = TDict()
1054
- c_spaces = []
1055
- for k, v in self.vars['convert_for_motion'].items():
1056
- if k != 'logical' and v.sum() > 0:
1057
- if k[-1] != '2':
1058
- first_dict[k] = v
1059
- c_spaces.append(k)
1060
- else:
1061
- second_dict[k[:-1]] = v
1062
- c_spaces.append(k[:-1])
1063
- prev_img = None
1064
- background = None
1065
- background2 = None
1066
- is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
1067
- for image_i, image_name in enumerate(self.data_list):
1068
- img = read_and_rotate(image_name, prev_img, self.all['raw_images'], is_landscape, self.first_image.crop_coord)
1069
- prev_img = deepcopy(img)
1070
- # if self.videos.first_image.crop_coord is not None:
1071
- # img = img[self.videos.first_image.crop_coord[0]:self.videos.first_image.crop_coord[1],
1072
- # self.videos.first_image.crop_coord[2]:self.videos.first_image.crop_coord[3], :]
1073
- img = img[self.top[arena - 1]: (self.bot[arena - 1] + add_to_c),
1074
- self.left[arena - 1]: (self.right[arena - 1] + add_to_c), :]
1075
-
1076
- if self.vars['already_greyscale']:
1077
- if self.reduce_image_dim:
1078
- self.converted_video[image_i, ...] = img[:, :, 0]
1079
- else:
1080
- self.converted_video[image_i, ...] = img
1081
- else:
1082
- self.visu[image_i, ...] = img
1083
- if self.vars['subtract_background']:
1084
- background = self.vars['background_list'][i]
1085
- if self.vars['convert_for_motion']['logical'] != 'None':
1086
- background2 = self.vars['background_list2'][i]
1087
- greyscale_image, greyscale_image2 = generate_color_space_combination(img, c_spaces, first_dict,
1088
- second_dict, background, background2,
1089
- self.vars[
1090
- 'lose_accuracy_to_save_memory'])
1091
- self.converted_video[image_i, ...] = greyscale_image
1092
- if self.vars['convert_for_motion']['logical'] != 'None':
1093
- self.converted_video2[image_i, ...] = greyscale_image2
1094
- # csc = OneImageAnalysis(img)
1095
- # else:
1096
- # csc.generate_color_space_combination(c_spaces, first_dict, second_dict, None, None)
1097
- # # self.converted_video[image_i, ...] = csc.image
1098
- # self.converted_video[image_i, ...] = csc.image
1099
- # if self.vars['convert_for_motion']['logical'] != 'None':
1100
- # self.converted_video2[image_i, ...] = csc.image2
1101
-
1102
- # write_video(self.visu, f"ind_{arena}{self.vars['videos_extension']}", is_color=True, fps=1)
1103
-
1104
- def update_output_list(self):
1105
- self.vars['descriptors'] = {}
1106
- # self.vars['descriptors']['final_area'] = True # [False, True, False]
1107
- # if self.vars['first_move_threshold'] is not None:
1108
- # self.vars['descriptors']['first_move'] = True # [False, True, False]
1109
-
1110
- # if self.vars['iso_digi_analysis']:
1111
- # self.vars['descriptors']['is_growth_isotropic'] = True # [False, True, False]
1112
- # self.vars['descriptors']['iso_digi_transi'] = True # [False, True, False]
1113
-
1114
- # if self.vars['oscilacyto_analysis']:
1115
- # self.vars['descriptors']['max_magnitude'] = True#[False, True, False]
1116
- # self.vars['descriptors']['frequency_of_max_magnitude'] = True#[False, True, False]
1117
- for descriptor in self.all['descriptors'].keys():
1118
- if descriptor == 'standard_deviation_xy':
1119
- self.vars['descriptors']['standard_deviation_x'] = self.all['descriptors'][descriptor]
1120
- self.vars['descriptors']['standard_deviation_y'] = self.all['descriptors'][descriptor]
1121
- elif descriptor == 'skewness_xy':
1122
- self.vars['descriptors']['skewness_x'] = self.all['descriptors'][descriptor]
1123
- self.vars['descriptors']['skewness_y'] = self.all['descriptors'][descriptor]
1124
- elif descriptor == 'kurtosis_xy':
1125
- self.vars['descriptors']['kurtosis_x'] = self.all['descriptors'][descriptor]
1126
- self.vars['descriptors']['kurtosis_y'] = self.all['descriptors'][descriptor]
1127
- elif descriptor == 'major_axes_len_and_angle':
1128
- self.vars['descriptors']['major_axis_len'] = self.all['descriptors'][descriptor]
1129
- self.vars['descriptors']['minor_axis_len'] = self.all['descriptors'][descriptor]
1130
- self.vars['descriptors']['axes_orientation'] = self.all['descriptors'][descriptor]
1131
- else:
1132
- if np.isin(descriptor, list(from_shape_descriptors_class.keys())):
1133
-
1134
- self.vars['descriptors'][descriptor] = self.all['descriptors'][descriptor]
1135
- self.vars['descriptors']['cluster_number'] = self.vars['oscilacyto_analysis']
1136
- self.vars['descriptors']['mean_cluster_area'] = self.vars['oscilacyto_analysis']
1137
- self.vars['descriptors']['vertices_number'] = self.vars['network_analysis']
1138
- self.vars['descriptors']['edges_number'] = self.vars['network_analysis']
1139
- self.vars['descriptors']['newly_explored_area'] = self.vars['do_fading']
1140
- """ if self.vars['descriptors_means']:
1141
- self.vars['output_list'] += [f'{descriptor}_mean']
1142
- self.vars['output_list'] += [f'{descriptor}_std']
1143
- if self.vars['descriptors_regressions']:
1144
- self.vars['output_list'] += [f"{descriptor}_reg_start"]
1145
- self.vars['output_list'] += [f"{descriptor}_reg_end"]
1146
- self.vars['output_list'] += [f'{descriptor}_slope']
1147
- self.vars['output_list'] += [f'{descriptor}_intercept']
1148
- """
1149
-
1150
- def update_available_core_nb(self, image_bit_number=256, video_bit_number=140):# video_bit_number=176
1151
- if self.vars['lose_accuracy_to_save_memory']:
1152
- video_bit_number -= 56
1153
- if self.vars['convert_for_motion']['logical'] != 'None':
1154
- video_bit_number += 64
1155
- if self.vars['lose_accuracy_to_save_memory']:
1156
- video_bit_number -= 56
1157
- if self.vars['already_greyscale']:
1158
- video_bit_number -= 64
1159
- if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
1160
- video_bit_number += 16
1161
- image_bit_number += 128
1162
- if self.vars['save_coord_network'] or self.vars['network_analysis']:
1163
- video_bit_number += 8
1164
- image_bit_number += 64
1165
-
1166
- if isinstance(self.bot, list):
1167
- one_image_memory = np.multiply((self.bot[0] - self.top[0] + 1),
1168
- (self.right[0] - self.left[0] + 1)).max().astype(np.uint64)
1169
- else:
1170
- one_image_memory = np.multiply((self.bot - self.top + 1).astype(np.uint64),
1171
- (self.right - self.left + 1).astype(np.uint64)).max()
1172
- one_video_memory = self.vars['img_number'] * one_image_memory
1173
- necessary_memory = (one_image_memory * image_bit_number + one_video_memory * video_bit_number) * 1.16415e-10
1174
- available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1175
- max_repeat_in_memory = (available_memory // necessary_memory).astype(np.uint16)
1176
- if max_repeat_in_memory > 1:
1177
- max_repeat_in_memory = np.max(((available_memory // (2 * necessary_memory)).astype(np.uint16), 1))
1178
- # if sys.platform.startswith('win'):
1179
- # available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1180
- # else:
1181
-
1182
- self.cores = np.min((self.all['cores'], max_repeat_in_memory))
1183
- if self.cores > self.sample_number:
1184
- self.cores = self.sample_number
1185
- return np.round(np.absolute(available_memory - necessary_memory), 3)
1186
-
1187
-
1188
- def update_one_row_per_arena(self, i, table_to_add):
1189
- if not self.vars['several_blob_per_arena']:
1190
- if self.one_row_per_arena is None:
1191
- self.one_row_per_arena = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(table_to_add)), dtype=float),
1192
- columns=table_to_add.keys())
1193
- self.one_row_per_arena.iloc[i, :] = table_to_add.values()
1194
-
1195
-
1196
- def update_one_row_per_frame(self, i, j, table_to_add):
1197
- if not self.vars['several_blob_per_arena']:
1198
- if self.one_row_per_frame is None:
1199
- self.one_row_per_frame = pd.DataFrame(index=range(len(self.vars['analyzed_individuals']) *
1200
- self.vars['img_number']),
1201
- columns=table_to_add.keys())
1202
-
1203
- self.one_row_per_frame.iloc[i:j, :] = table_to_add
1204
-
1205
-
1206
- def instantiate_tables(self):
1207
- self.update_output_list()
1208
- logging.info("Instantiate results tables and validation images")
1209
- self.one_row_per_oscillating_cluster = None
1210
- self.fractal_box_sizes = None
1211
- # if self.vars['oscilacyto_analysis']:
1212
- # self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
1213
- # 'edge_distance'])
1214
- # if self.vars['fractal_analysis']:
1215
- # self.fractal_box_sizes = pd.DataFrame(columns=['arena', 'time', 'fractal_box_lengths', 'fractal_box_widths'])
1216
-
1217
- if self.vars['already_greyscale']:
1218
- if len(self.first_image.bgr.shape) == 2:
1219
- self.first_image.bgr = np.stack((self.first_image.bgr, self.first_image.bgr, self.first_image.bgr), axis=2).astype(np.uint8)
1220
- if len(self.last_image.bgr.shape) == 2:
1221
- self.last_image.bgr = np.stack((self.last_image.bgr, self.last_image.bgr, self.last_image.bgr), axis=2).astype(np.uint8)
1222
- self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
1223
-
1224
- def add_analysis_visualization_to_first_and_last_images(self, i, first_visualization, last_visualization):
1225
- cr = ((self.top[i], self.bot[i] + 1),
1226
- (self.left[i], self.right[i] + 1))
1227
- if self.vars['arena_shape'] == 'circle':
1228
- ellipse = Ellipse((cr[0][1] - cr[0][0], cr[1][1] - cr[1][0])).create()
1229
- ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
1230
- first_visualization *= ellipse
1231
- self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1232
- self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += first_visualization
1233
- last_visualization *= ellipse
1234
- self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1235
- self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += last_visualization
1236
- else:
1237
- self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = first_visualization
1238
- self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = last_visualization
1239
-
1240
-
1241
- def save_tables(self):
1242
- logging.info("Save results tables and validation images")
1243
- if not self.vars['several_blob_per_arena']:
1244
- try:
1245
- self.one_row_per_arena.to_csv("one_row_per_arena.csv", sep=";", index=False, lineterminator='\n')
1246
- del self.one_row_per_arena
1247
- except PermissionError:
1248
- logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1249
- self.message_from_thread.emit(f"Never let one_row_per_arena.csv open when Cellects runs")
1250
- try:
1251
- self.one_row_per_frame.to_csv("one_row_per_frame.csv", sep=";", index=False, lineterminator='\n')
1252
- del self.one_row_per_frame
1253
- except PermissionError:
1254
- logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1255
- self.message_from_thread.emit(f"Never let one_row_per_frame.csv open when Cellects runs")
1256
- if self.vars['oscilacyto_analysis']:
1257
- try:
1258
- if self.one_row_per_oscillating_cluster is None:
1259
- self.one_row_per_oscillating_cluster = pd.DataFrame(columns=['arena', 'mean_pixel_period', 'phase', 'cluster_size',
1260
- 'edge_distance'])
1261
- self.one_row_per_oscillating_cluster.to_csv("one_row_per_oscillating_cluster.csv", sep=";", index=False,
1262
- lineterminator='\n')
1263
- del self.one_row_per_oscillating_cluster
1264
- except PermissionError:
1265
- logging.error("Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1266
- self.message_from_thread.emit(f"Never let one_row_per_oscillating_cluster.csv open when Cellects runs")
1267
-
1268
- if self.vars['fractal_analysis']:
1269
- if os.path.isfile(f"oscillating_clusters_temporal_dynamics.h5"):
1270
- array_names = get_h5_keys(f"oscillating_clusters_temporal_dynamics.h5")
1271
- arena_fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_names[0])
1272
- arena_fractal_dynamics = np.hstack((np.repeat(np.uint32(array_names[0][-1]), arena_fractal_dynamics.shape[0]), arena_fractal_dynamics))
1273
- for array_name in array_names[1:]:
1274
- fractal_dynamics = read_h5_array(f"oscillating_clusters_temporal_dynamics.h5", key=array_name)
1275
- fractal_dynamics = np.hstack((np.repeat(np.uint32(array_name[-1]), fractal_dynamics.shape[0]), fractal_dynamics))
1276
- arena_fractal_dynamics = np.vstack((arena_fractal_dynamics, fractal_dynamics))
1277
- arena_fractal_dynamics = pd.DataFrame(arena_fractal_dynamics, columns=["arena", "time", "cluster_id", "flow", "centroid_y", "centroid_x", "area", "inner_network_area", "box_count_dim", "inner_network_box_count_dim"])
1278
- arena_fractal_dynamics.to_csv(f"oscillating_clusters_temporal_dynamics.csv", sep=";", index=False,
1279
- lineterminator='\n')
1280
- del arena_fractal_dynamics
1281
- os.remove(f"oscillating_clusters_temporal_dynamics.h5")
1282
- if self.all['extension'] == '.JPG':
1283
- extension = '.PNG'
1284
- else:
1285
- extension = '.JPG'
1286
- cv2.imwrite(f"Analysis efficiency, last image{extension}", self.last_image.bgr)
1287
- cv2.imwrite(
1288
- f"Analysis efficiency, {np.ceil(self.vars['img_number'] / 10).astype(np.uint64)}th image{extension}",
1289
- self.first_image.bgr)
1290
- # self.save_analysis_parameters.to_csv("analysis_parameters.csv", sep=";")
1291
-
1292
- software_settings = deepcopy(self.vars)
1293
- for key in ['descriptors', 'analyzed_individuals', 'exif', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1294
- software_settings.pop(key, None)
1295
- global_settings = deepcopy(self.all)
1296
- for key in ['analyzed_individuals', 'night_mode', 'expert_mode', 'is_auto', 'arena', 'video_option', 'compute_all_options', 'vars', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1297
- global_settings.pop(key, None)
1298
- software_settings.update(global_settings)
1299
- software_settings = pd.DataFrame.from_dict(software_settings, columns=["Setting"], orient='index')
1300
- try:
1301
- software_settings.to_csv("software_settings.csv", sep=";")
1302
- except PermissionError:
1303
- logging.error("Never let software_settings.csv open when Cellects runs")
1304
- self.message_from_thread.emit(f"Never let software_settings.csv open when Cellects runs")
1305
-
1306
-
1307
-
1308
- # if __name__ == "__main__":
1309
- # po = ProgramOrganizer()
1310
- # os.chdir(Path("D:\Directory\Data\Example\Example\Example"))
1311
- # # po.all['global_pathway'] = Path("C:/Users/APELab/Documents/Aurèle/Cellects/install/Installer_and_example/Example")
1312
- # po.load_variable_dict()
1313
- # po.all['global_pathway']
1314
- # po.load_data_to_run_cellects_quickly()
1315
- # po.all['global_pathway']
1316
- # po.save_data_to_run_cellects_quickly()
1
+ #!/usr/bin/env python3
2
+ """This file contains the class constituting the link between the graphical interface and the computations.
3
+ First, Cellects analyze one image in order to get a color space combination maximizing the contrast between the specimens
4
+ and the background.
5
+ Second, Cellects automatically delineate each arena.
6
+ Third, Cellects write one video for each arena.
7
+ Fourth, Cellects segments the video and apply post-processing algorithms to improve the segmentation.
8
+ Fifth, Cellects extract variables and store them in .csv files.
9
+ """
10
+
11
+ import pickle
12
+ import sys
13
+ import os
14
+ import logging
15
+ from copy import deepcopy
16
+ import psutil
17
+ import cv2
18
+ from numba.typed import Dict as TDict
19
+ import pandas as pd
20
+ import numpy as np
21
+ from numpy.typing import NDArray
22
+ from psutil import virtual_memory
23
+ from pathlib import Path
24
+ import natsort
25
+ from cellects.utils.formulas import bracket_to_uint8_image_contrast
26
+ from cellects.utils.load_display_save import extract_time
27
+ from cellects.image_analysis.network_functions import detect_network_dynamics, extract_graph_dynamics
28
+ from cellects.utils.load_display_save import PickleRick, readim, is_raw_image, read_h5_array, get_h5_keys
29
+ from cellects.utils.utilitarian import insensitive_glob, vectorized_len
30
+ from cellects.core.cellects_paths import CELLECTS_DIR, ALL_VARS_PKL_FILE
31
+ from cellects.config.all_vars_dict import DefaultDicts
32
+ from cellects.image_analysis.shape_descriptors import from_shape_descriptors_class, compute_one_descriptor_per_frame, compute_one_descriptor_per_colony
33
+ from cellects.image_analysis.morphological_operations import create_ellipse, rank_from_top_to_bottom_from_left_to_right, \
34
+ get_quick_bounding_boxes, get_bb_with_moving_centers, get_contours, keep_one_connected_component, box_counting_dimension, prepare_box_counting
35
+ from cellects.image_analysis.progressively_add_distant_shapes import ProgressivelyAddDistantShapes
36
+ from cellects.core.one_image_analysis import OneImageAnalysis
37
+ from cellects.utils.load_display_save import read_and_rotate, video2numpy
38
+ from cellects.image_analysis.morphological_operations import shape_selection, draw_img_with_mask
39
+
40
+
41
+ class ProgramOrganizer:
42
+ """
43
+ Organizes and manages variables, configuration settings, and processing workflows for motion analysis in a Cellects project.
44
+
45
+ This class maintains global state and analysis-specific data structures, handles file operations,
46
+ processes image/video inputs, and generates output tables. It provides methods to load/save configurations,
47
+ segment images, track objects across frames, and export results with metadata.
48
+
49
+ Attributes
50
+ ----------
51
+ one_arena_done : bool
52
+ Flag indicating whether a single arena has been processed.
53
+ reduce_image_dim : bool
54
+ Whether image dimensions should be reduced (e.g., from color to grayscale).
55
+ first_exp_ready_to_run : bool
56
+ Indicates if the initial experiment setup is complete and ready for execution.
57
+ data_to_save : dict of {str: bool}
58
+ Specifies which data types (first image, coordinates, EXIF) require saving.
59
+ videos : OneVideoPerBlob or None
60
+ Video processing container instance.
61
+ motion : MotionAnalysis or None
62
+ Motion tracking and analysis module.
63
+ all : dict
64
+ Global configuration parameters for the entire workflow.
65
+ vars : dict
66
+ Analysis-specific variables used by `MotionAnalysis`.
67
+ first_im, last_im : np.ndarray or None
68
+ First and last images of the dataset for preprocessing.
69
+ data_list : list of str
70
+ List of video/image file paths in the working directory.
71
+ computed_video_options : np.ndarray of bool
72
+ Flags indicating which video processing options have been applied.
73
+ one_row_per_arena, one_row_per_frame : pd.DataFrame or None
74
+ Result tables for different levels of analysis (per arena, per frame, and oscillating clusters).
75
+
76
+ Methods:
77
+ --------
78
+ save_variable_dict() : Save configuration dictionaries to file.
79
+ load_variable_dict() : Load saved configuration or initialize defaults.
80
+ look_for_data() : Discover video/image files in the working directory.
81
+ update_folder_id(...) : Update folder-specific metadata based on file structure.
82
+ ...
83
+
84
+ """
85
+ def __init__(self):
86
+ """
87
+ This class stores all variables required for analysis as well as
88
+ methods to process it.
89
+ Global variables (i.e. that does not concern the MotionAnalysis)
90
+ are directly stored in self.
91
+ Variables used in the MotionAnalysis class are stored in a dict
92
+ called self.vars
93
+ """
94
+ if os.path.isfile('PickleRick.pkl'):
95
+ os.remove('PickleRick.pkl')
96
+ if os.path.isfile('PickleRick0.pkl'):
97
+ os.remove('PickleRick0.pkl')
98
+ self.one_arena_done: bool = False
99
+ self.reduce_image_dim: bool = False
100
+ self.first_exp_ready_to_run: bool = False
101
+ self.data_to_save = {'first_image': False, 'coordinates': False, 'exif': False, 'vars': False}
102
+ self.sample_number = None
103
+ self.top = None
104
+ self.motion = None
105
+ self.analysis_instance = None
106
+ self.computed_video_options = np.zeros(5, bool)
107
+ self.vars = {}
108
+ self.all = {}
109
+ self.all['folder_list'] = []
110
+ self.vars['first_detection_frame'] = 0
111
+ self.first_im = None
112
+ self.last_im = None
113
+ self.vars['background_list'] = []
114
+ self.starting_blob_hsize_in_pixels = None
115
+ self.vars['first_move_threshold'] = None
116
+ self.vars['convert_for_origin'] = None
117
+ self.vars['convert_for_motion'] = None
118
+ self.current_combination_id = 0
119
+ self.data_list = []
120
+ self.one_row_per_arena = None
121
+ self.one_row_per_frame = None
122
+ self.not_analyzed_individuals = None
123
+ self.visualize: bool = True
124
+
125
+ def update_variable_dict(self):
126
+ """
127
+
128
+ Update the `all` and `vars` dictionaries with new data from `DefaultDicts`.
129
+
130
+ This method updates the `all` and `vars` dictionaries of the current object with
131
+ data from a new instance of `DefaultDicts`. It checks if any keys or descriptors
132
+ are missing and adds them accordingly.
133
+
134
+ Examples
135
+ --------
136
+ >>> organizer = ProgramOrganizer()
137
+ >>> organizer.update_variable_dict()
138
+ """
139
+ dd = DefaultDicts()
140
+ all = len(dd.all) != len(self.all)
141
+ vars = len(dd.vars) != len(self.vars)
142
+ all_desc = not 'descriptors' in self.all or len(dd.all['descriptors']) != len(self.all['descriptors'])
143
+ vars_desc = not 'descriptors' in self.vars or len(dd.vars['descriptors']) != len(self.vars['descriptors'])
144
+ if all:
145
+ for key, val in dd.all.items():
146
+ if not key in self.all:
147
+ self.all[key] = val
148
+ if vars:
149
+ for key, val in dd.vars.items():
150
+ if not key in self.vars:
151
+ self.vars[key] = val
152
+ if all_desc:
153
+ for key, val in dd.all['descriptors'].items():
154
+ if not key in self.all['descriptors']:
155
+ self.all['descriptors'][key] = val
156
+ if vars_desc:
157
+ for key, val in dd.vars['descriptors'].items():
158
+ if not key in self.vars['descriptors']:
159
+ self.vars['descriptors'][key] = val
160
+ self._set_analyzed_individuals()
161
+
162
+ def save_variable_dict(self):
163
+ """
164
+ Saves the configuration dictionaries (`self.all` and `self.vars`) to a pickle file.
165
+
166
+ If bio_mask or back_mask are not required for all folders, they are excluded from the saved data.
167
+
168
+ Notes
169
+ -----
170
+ This method is used to preserve state between Cellects sessions or restart scenarios.
171
+ """
172
+ logging.info("Save the parameters dictionaries in the Cellects folder")
173
+ self.all['vars'] = self.vars
174
+ all_vars = deepcopy(self.all)
175
+ if not self.all['keep_cell_and_back_for_all_folders']:
176
+ all_vars['bio_mask'] = None
177
+ all_vars['back_mask'] = None
178
+ pickle_rick = PickleRick(0)
179
+ pickle_rick.write_file(all_vars, ALL_VARS_PKL_FILE)
180
+
181
+ def load_variable_dict(self):
182
+ """
183
+ Loads configuration dictionaries from a pickle file if available, otherwise initializes defaults.
184
+
185
+ Tries to load saved parameters. If the file doesn't exist or loading fails due to corruption,
186
+ default values are used instead (logging relevant warnings).
187
+
188
+ Raises
189
+ ------
190
+ FileNotFoundError
191
+ If no valid configuration file is found and default initialization fails.
192
+
193
+ Notes
194
+ -----
195
+ This method ensures robust operation by handling missing or corrupted configuration files gracefully.
196
+ """
197
+ if os.path.isfile(ALL_VARS_PKL_FILE):
198
+ logging.info("Load the parameters from all_vars.pkl in the config of the Cellects folder")
199
+ try:
200
+ with open(ALL_VARS_PKL_FILE, 'rb') as fileopen:
201
+ self.all = pickle.load(fileopen)
202
+ self.vars = self.all['vars']
203
+ self.update_variable_dict()
204
+ logging.info("Success to load the parameters dictionaries from the Cellects folder")
205
+ except Exception as exc:
206
+ logging.error(f"Initialize default parameters because error: {exc}")
207
+ default_dicts = DefaultDicts()
208
+ self.all = default_dicts.all
209
+ self.vars = default_dicts.vars
210
+ else:
211
+ logging.info("Initialize default parameters")
212
+ default_dicts = DefaultDicts()
213
+ self.all = default_dicts.all
214
+ self.vars = default_dicts.vars
215
+ if self.all['cores'] == 1:
216
+ self.all['cores'] = os.cpu_count() - 1
217
+
218
+ def look_for_data(self):
219
+ """
220
+ Discovers all relevant video/image data in the working directory.
221
+
222
+ Uses natural sorting to handle filenames with numeric suffixes. Validates file consistency and logs warnings
223
+ if filename patterns are inconsistent across folders.
224
+
225
+ Raises
226
+ ------
227
+ ValueError
228
+ If no files match the specified naming convention.
229
+
230
+ Notes
231
+ -----
232
+ This method assumes all data files follow a predictable pattern with numeric extensions. Use caution in
233
+ unpredictable directory structures where this may fail silently or produce incorrect results.
234
+
235
+ Examples
236
+ --------
237
+ >>> organizer.look_for_data()
238
+ >>> print(organizer.data_list)
239
+ ['/path/to/video1.avi', '/path/to/video2.avi']
240
+ """
241
+ os.chdir(Path(self.all['global_pathway']))
242
+ logging.info(f"Dir: {self.all['global_pathway']}")
243
+ self.data_list = insensitive_glob(self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
244
+ self.all['folder_list'] = []
245
+ self.all['folder_number'] = 1
246
+ if len(self.data_list) > 0:
247
+ self._sort_data_list()
248
+ self.sample_number = self.all['first_folder_sample_number']
249
+ else:
250
+ content = os.listdir()
251
+ for obj in content:
252
+ if not os.path.isfile(obj):
253
+ data_list = insensitive_glob(obj + "/" + self.all['radical'] + '*' + self.all['extension'])
254
+ if len(data_list) > 0:
255
+ self.all['folder_list'].append(obj)
256
+ self.all['folder_number'] += 1
257
+ self.all['folder_list'] = np.sort(self.all['folder_list'])
258
+
259
+ if isinstance(self.all['sample_number_per_folder'], int) or len(self.all['sample_number_per_folder']) == 1:
260
+ self.all['sample_number_per_folder'] = np.repeat(self.all['sample_number_per_folder'],
261
+ self.all['folder_number'])
262
+
263
+ def _sort_data_list(self):
264
+ """
265
+ Sorts the data list using natural sorting.
266
+
267
+ Extended Description
268
+ --------------------
269
+ This function sorts the `data_list` attribute of an instance using the natsort library,
270
+ which is useful when filenames have a mixture of numbers and letters.
271
+ """
272
+ if len(self.data_list) > 0:
273
+ lengths = vectorized_len(self.data_list)
274
+ if len(lengths) > 1 and np.max(np.diff(lengths)) > np.log10(len(self.data_list)):
275
+ logging.error(f"File names present strong variations and cannot be correctly sorted.")
276
+ wrong_images = np.nonzero(np.char.startswith(self.data_list, "Analysis efficiency, ", ))[0]
277
+ for w_im in wrong_images[::-1]:
278
+ self.data_list.pop(w_im)
279
+ self.data_list = natsort.natsorted(self.data_list)
280
+ if self.all['im_or_vid'] == 1:
281
+ self.vars['video_list'] = self.data_list
282
+ else:
283
+ self.vars['video_list'] = None
284
+
285
+ def update_folder_id(self, sample_number: int, folder_name: str=""):
286
+ """
287
+ Update the current working directory and data list based on the given sample number
288
+ and optional folder name.
289
+
290
+ Parameters
291
+ ----------
292
+ sample_number : int
293
+ The number of samples to analyze.
294
+ folder_name : str, optional
295
+ The name of the folder to change to. Default is an empty string.
296
+
297
+ Notes
298
+ -----
299
+ This function changes the current working directory to the specified folder name
300
+ and updates the data list based on the file names in that directory. It also performs
301
+ sorting of the data list and checks for strong variations in file names.
302
+
303
+ """
304
+ os.chdir(Path(self.all['global_pathway']) / folder_name)
305
+ self.data_list = insensitive_glob(
306
+ self.all['radical'] + '*' + self.all['extension']) # Provides a list ordered by last modification date
307
+ # Sorting is necessary when some modifications (like rotation) modified the last modification date
308
+ self._sort_data_list()
309
+ if self.all['im_or_vid'] == 1:
310
+ self.sample_number = sample_number
311
+ else:
312
+ self.vars['img_number'] = len(self.data_list)
313
+ self.sample_number = sample_number
314
+ if not 'analyzed_individuals' in self.vars:
315
+ self._set_analyzed_individuals()
316
+
317
+ def _set_analyzed_individuals(self):
318
+ """
319
+ Set the analyzed individuals variable in the dataset.
320
+ """
321
+ if self.sample_number is not None:
322
+ self.vars['analyzed_individuals'] = np.arange(self.sample_number) + 1
323
+ if self.not_analyzed_individuals is not None:
324
+ self.vars['analyzed_individuals'] = np.delete(self.vars['analyzed_individuals'],
325
+ self.not_analyzed_individuals - 1)
326
+
327
+ def load_data_to_run_cellects_quickly(self):
328
+ """
329
+ Load data from a pickle file and update the current state of the object.
330
+
331
+ Summarizes, loads, and validates data needed to run Cellects,
332
+ updating the object's state accordingly. If the necessary data
333
+ are not present or valid, it ensures the experiment is marked as
334
+ not ready to run.
335
+
336
+ Parameters
337
+ ----------
338
+ self : CellectsObject
339
+ The instance of the class (assumed to be a subclass of
340
+ CellectsObject) that this method belongs to.
341
+
342
+ Returns
343
+ -------
344
+ None
345
+
346
+ Notes
347
+ -----
348
+ This function relies on the presence of a pickle file 'Data to run Cellects quickly.pkl'.
349
+ It updates the state of various attributes based on the loaded data
350
+ and logs appropriate messages.
351
+ """
352
+ self.first_im = None
353
+ current_global_pathway = self.all['global_pathway']
354
+ folder_number = self.all['folder_number']
355
+ if folder_number > 1:
356
+ folder_list = deepcopy(self.all['folder_list'])
357
+ sample_number_per_folder = deepcopy(self.all['sample_number_per_folder'])
358
+
359
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
360
+ pickle_rick = PickleRick()
361
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
362
+ if data_to_run_cellects_quickly is None:
363
+ data_to_run_cellects_quickly = {}
364
+
365
+ if ('validated_shapes' in data_to_run_cellects_quickly) and ('coordinates' in data_to_run_cellects_quickly) and ('all' in data_to_run_cellects_quickly):
366
+ logging.info("Success to load Data to run Cellects quickly.pkl from the user chosen directory")
367
+ self.all = data_to_run_cellects_quickly['all']
368
+ # If you want to add a new variable, first run an updated version of all_vars_dict,
369
+ # then put a breakpoint here and run the following + self.save_data_to_run_cellects_quickly() :
370
+ self.vars = self.all['vars']
371
+ self.update_variable_dict()
372
+ folder_changed = False
373
+ if current_global_pathway != self.all['global_pathway']:
374
+ folder_changed = True
375
+ logging.info(
376
+ "Although the folder is ready, it is not at the same place as it was during creation, updating")
377
+ self.all['global_pathway'] = current_global_pathway
378
+ if folder_number > 1:
379
+ self.all['global_pathway'] = current_global_pathway
380
+ self.all['folder_list'] = folder_list
381
+ self.all['folder_number'] = folder_number
382
+ self.all['sample_number_per_folder'] = sample_number_per_folder
383
+
384
+ if len(self.data_list) == 0:
385
+ self.look_for_data()
386
+ if folder_changed and folder_number > 1 and len(self.all['folder_list']) > 0:
387
+ self.update_folder_id(self.all['sample_number_per_folder'][0], self.all['folder_list'][0])
388
+ self.get_first_image()
389
+ self.get_last_image()
390
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = data_to_run_cellects_quickly[
391
+ 'coordinates']
392
+ if self.all['automatically_crop']:
393
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
394
+ logging.info("Crop first image")
395
+ self.first_image.automatically_crop(self.first_image.crop_coord)
396
+ logging.info("Crop last image")
397
+ self.last_image.automatically_crop(self.first_image.crop_coord)
398
+ else:
399
+ self.first_image.crop_coord = None
400
+ self.first_image.validated_shapes = data_to_run_cellects_quickly['validated_shapes']
401
+ self.first_image.im_combinations = []
402
+ self.current_combination_id = 0
403
+ self.first_image.im_combinations.append({})
404
+ self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
405
+ self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
406
+ self.first_image.im_combinations[self.current_combination_id]['shape_number'] = data_to_run_cellects_quickly['shape_number']
407
+
408
+ self.first_exp_ready_to_run = True
409
+ if self.vars['subtract_background'] and len(self.vars['background_list']) == 0:
410
+ self.first_exp_ready_to_run = False
411
+ else:
412
+ self.first_exp_ready_to_run = False
413
+ else:
414
+ self.first_exp_ready_to_run = False
415
+ if self.first_exp_ready_to_run:
416
+ logging.info("The current (or the first) folder is ready to run")
417
+ else:
418
+ logging.info("The current (or the first) folder is not ready to run")
419
+
420
+ def save_data_to_run_cellects_quickly(self, new_one_if_does_not_exist: bool=True):
421
+ """
422
+ Save data to a pickled file if it does not exist or update existing data.
423
+
424
+ Parameters
425
+ ----------
426
+ new_one_if_does_not_exist : bool, optional
427
+ Whether to create a new data file if it does not already exist.
428
+ Default is True.
429
+
430
+ Notes
431
+ -----
432
+ This method logs various information about its operations and handles the writing of data to a pickled file.
433
+ """
434
+ data_to_run_cellects_quickly = None
435
+ if os.path.isfile('Data to run Cellects quickly.pkl'):
436
+ logging.info("Update -Data to run Cellects quickly.pkl- in the user chosen directory")
437
+ pickle_rick = PickleRick()
438
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
439
+ if data_to_run_cellects_quickly is None:
440
+ os.remove('Data to run Cellects quickly.pkl')
441
+ logging.error("Failed to load Data to run Cellects quickly.pkl before update. Remove pre existing.")
442
+ else:
443
+ if new_one_if_does_not_exist:
444
+ logging.info("Create Data to run Cellects quickly.pkl in the user chosen directory")
445
+ data_to_run_cellects_quickly = {}
446
+ if data_to_run_cellects_quickly is not None:
447
+ if self.data_to_save['first_image']:
448
+ data_to_run_cellects_quickly['validated_shapes'] = self.first_image.im_combinations[self.current_combination_id]['binary_image']
449
+ data_to_run_cellects_quickly['shape_number'] = self.first_image.im_combinations[self.current_combination_id]['shape_number']
450
+ # data_to_run_cellects_quickly['converted_image'] = self.first_image.im_combinations[self.current_combination_id]['converted_image']
451
+ if self.data_to_save['coordinates']:
452
+ data_to_run_cellects_quickly['coordinates'] = self._list_coordinates()
453
+ logging.info("When they exist, do overwrite unaltered video")
454
+ self.all['overwrite_unaltered_videos'] = True
455
+ if self.data_to_save['exif']:
456
+ self.vars['exif'] = self.extract_exif()
457
+ self.all['vars'] = self.vars
458
+ data_to_run_cellects_quickly['all'] = self.all
459
+ pickle_rick = PickleRick()
460
+ pickle_rick.write_file(data_to_run_cellects_quickly, 'Data to run Cellects quickly.pkl')
461
+
462
+ def _list_coordinates(self):
463
+ """
464
+ Summarize the coordinates of images and video.
465
+
466
+ Combine the crop coordinates from the first image with additional
467
+ coordinates for left, right, top, and bottom boundaries to form a list of
468
+ video coordinates. If the crop coordinates are not already set, initialize
469
+ them to cover the entire image.
470
+
471
+ Returns
472
+ -------
473
+ list of int
474
+ A list containing the coordinates [left, right, top, bottom] for video.
475
+
476
+ """
477
+ if self.first_image.crop_coord is None:
478
+ self.first_image.crop_coord = [0, self.first_image.image.shape[0], 0,
479
+ self.first_image.image.shape[1]]
480
+ videos_coordinates = self.first_image.crop_coord + [self.left, self.right, self.top, self.bot]
481
+ return videos_coordinates
482
+
483
+ def get_first_image(self, first_im: NDArray=None, sample_number: int=None):
484
+ """
485
+ Load and process the first image or frame from a video.
486
+
487
+ This method handles loading the first image or the first frame of a video
488
+ depending on whether the data is an image or a video. It performs necessary
489
+ preprocessing and initializes relevant attributes for subsequent analysis.
490
+ """
491
+ self.reduce_image_dim = False
492
+ if first_im is not None:
493
+ self.first_im = first_im
494
+ if sample_number is not None:
495
+ self.sample_number = sample_number
496
+ else:
497
+ logging.info("Load first image")
498
+ just_read_image = self.first_im is not None
499
+ # just_read_image = self.analysis_instance is not None
500
+ if self.all['im_or_vid'] == 1:
501
+ if not just_read_image:
502
+ self.analysis_instance = video2numpy(self.data_list[0])
503
+ self.sample_number = len(self.data_list)
504
+ self.vars['img_number'] = self.analysis_instance.shape[0]
505
+ self.first_im = self.analysis_instance[0, ...]
506
+ else:
507
+ self.first_im = self.analysis_instance[self.vars['first_detection_frame'], ...]
508
+ self.vars['dims'] = self.analysis_instance.shape[:3]
509
+
510
+ else:
511
+ self.vars['img_number'] = len(self.data_list)
512
+ self.all['raw_images'] = is_raw_image(self.data_list[0])
513
+ self.first_im = readim(self.data_list[self.vars['first_detection_frame']], self.all['raw_images'])
514
+ self.vars['dims'] = [self.vars['img_number'], self.first_im.shape[0], self.first_im.shape[1]]
515
+
516
+ if len(self.first_im.shape) == 3:
517
+ if np.all(np.equal(self.first_im[:, :, 0], self.first_im[:, :, 1])) and np.all(
518
+ np.equal(self.first_im[:, :, 1], self.first_im[:, :, 2])):
519
+ self.reduce_image_dim = True
520
+ if self.reduce_image_dim:
521
+ self.first_im = self.first_im[:, :, 0]
522
+ self.first_image = OneImageAnalysis(self.first_im)
523
+ self.vars['already_greyscale'] = self.first_image.already_greyscale
524
+ if self.vars['already_greyscale']:
525
+ self.vars["convert_for_origin"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
526
+ self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
527
+ if np.mean((np.mean(self.first_image.image[2, :, ...]), np.mean(self.first_image.image[-3, :, ...]), np.mean(self.first_image.image[:, 2, ...]), np.mean(self.first_image.image[:, -3, ...]))) > 127:
528
+ self.vars['contour_color']: np.uint8 = 0
529
+ else:
530
+ self.vars['contour_color']: np.uint8 = 255
531
+ if self.vars['first_detection_frame'] > 0:
532
+ self.vars['origin_state'] = 'invisible'
533
+
534
+ def get_last_image(self, last_im: NDArray=None):
535
+ """
536
+
537
+ Load the last image from a video or image list and process it based on given parameters.
538
+
539
+ Parameters
540
+ ----------
541
+ last_im : NDArray, optional
542
+ The last image to be loaded. If not provided, the last image will be loaded from the data list.
543
+ """
544
+ logging.info("Load last image")
545
+ if last_im is not None:
546
+ self.last_im = last_im
547
+ else:
548
+ if self.all['im_or_vid'] == 1:
549
+ cap = cv2.VideoCapture(self.data_list[0])
550
+ counter = 0
551
+ while cap.isOpened() and counter < self.vars['img_number']:
552
+ ret, frame = cap.read()
553
+ if self.reduce_image_dim:
554
+ frame = frame[:, :, 0]
555
+ self.analysis_instance[-1, ...] = frame
556
+ counter += 1
557
+ self.last_im = frame
558
+ cap.release()
559
+ else:
560
+ is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
561
+ self.last_im = read_and_rotate(self.data_list[-1], self.first_im, self.all['raw_images'], is_landscape)
562
+ if self.reduce_image_dim:
563
+ self.last_im = self.last_im[:, :, 0]
564
+ self.last_image = OneImageAnalysis(self.last_im)
565
+
566
+ def extract_exif(self):
567
+ """
568
+ Extract EXIF data from image or video files.
569
+
570
+ Notes
571
+ -----
572
+ If `extract_time_interval` is True and unsuccessful, arbitrary time steps will be used.
573
+ Timings are normalized to minutes for consistency across different files.
574
+ """
575
+ self.vars['time_step_is_arbitrary'] = True
576
+ if self.all['im_or_vid'] == 1:
577
+ self.vars['dims'] = self.analysis_instance.shape
578
+ timings = np.arange(self.vars['dims'][0])
579
+ else:
580
+ timings = np.arange(len(self.data_list))
581
+ if sys.platform.startswith('win'):
582
+ pathway = os.getcwd() + '\\'
583
+ else:
584
+ pathway = os.getcwd() + '/'
585
+ if not 'extract_time_interval' in self.all:
586
+ self.all['extract_time_interval'] = True
587
+ if self.all['extract_time_interval']:
588
+ self.vars['time_step'] = 1
589
+ try:
590
+ timings = extract_time(self.data_list, pathway, self.all['raw_images'])
591
+ timings = timings - timings[0]
592
+ timings = timings / 60
593
+ time_step = np.diff(timings)
594
+ if len(time_step) > 0:
595
+ time_step = np.mean(time_step)
596
+ digit_nb = 0
597
+ for i in str(time_step):
598
+ if i in {'.'}:
599
+ pass
600
+ elif i in {'0'}:
601
+ digit_nb += 1
602
+ else:
603
+ break
604
+ self.vars['time_step'] = np.round(time_step, digit_nb + 1)
605
+ self.vars['time_step_is_arbitrary'] = False
606
+ except:
607
+ pass
608
+ else:
609
+ timings = np.arange(0, len(self.data_list) * self.vars['time_step'], self.vars['time_step'])
610
+ self.vars['time_step_is_arbitrary'] = False
611
+ return timings
612
+
613
+ def fast_first_image_segmentation(self):
614
+ """
615
+ Segment the first or subsequent image in a series for biological and background masks.
616
+
617
+ Notes
618
+ -----
619
+ This function processes the first or subsequent image in a sequence, applying biological and background masks,
620
+ segmenting the image, and updating internal data structures accordingly. The function is specific to handling
621
+ image sequences for biological analysis
622
+
623
+ """
624
+ if not "color_number" in self.vars:
625
+ self.update_variable_dict()
626
+ if self.vars['convert_for_origin'] is None:
627
+ self.vars['convert_for_origin'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
628
+ self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
629
+ self.all["bio_mask"], self.all["back_mask"], subtract_background=None,
630
+ subtract_background2=None,
631
+ rolling_window_segmentation=self.vars["rolling_window_segmentation"],
632
+ filter_spec=self.vars["filter_spec"])
633
+ if not self.first_image.drift_correction_already_adjusted:
634
+ self.vars['drift_already_corrected'] = self.first_image.check_if_image_border_attest_drift_correction()
635
+ if self.vars['drift_already_corrected']:
636
+ logging.info("Cellects detected that the images have already been corrected for drift")
637
+ self.first_image.convert_and_segment(self.vars['convert_for_origin'], self.vars["color_number"],
638
+ self.all["bio_mask"], self.all["back_mask"],
639
+ subtract_background=None, subtract_background2=None,
640
+ rolling_window_segmentation=self.vars["rolling_window_segmentation"],
641
+ filter_spec=self.vars["filter_spec"],
642
+ allowed_window=self.first_image.drift_mask_coord)
643
+
644
+ shapes_features = shape_selection(self.first_image.binary_image, true_shape_number=self.sample_number,
645
+ horizontal_size=self.starting_blob_hsize_in_pixels,
646
+ spot_shape=self.all['starting_blob_shape'],
647
+ several_blob_per_arena=self.vars['several_blob_per_arena'],
648
+ bio_mask=self.all["bio_mask"], back_mask=self.all["back_mask"])
649
+ self.first_image.validated_shapes, shape_number, stats, centroids = shapes_features
650
+ self.first_image.shape_number = shape_number
651
+ if self.first_image.im_combinations is None:
652
+ self.first_image.im_combinations = []
653
+ if len(self.first_image.im_combinations) == 0:
654
+ self.first_image.im_combinations.append({})
655
+ self.current_combination_id = np.min((self.current_combination_id, len(self.first_image.im_combinations) - 1))
656
+ self.first_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_origin']
657
+ self.first_image.im_combinations[self.current_combination_id]['binary_image'] = self.first_image.validated_shapes
658
+ if self.first_image.greyscale is not None:
659
+ greyscale = self.first_image.greyscale
660
+ else:
661
+ greyscale = self.first_image.image
662
+ self.first_image.im_combinations[self.current_combination_id]['converted_image'] = bracket_to_uint8_image_contrast(greyscale)
663
+ self.first_image.im_combinations[self.current_combination_id]['shape_number'] = shape_number
664
+
665
+ def fast_last_image_segmentation(self, biomask: NDArray[np.uint8] = None, backmask: NDArray[np.uint8] = None):
666
+ """
667
+ Segment the first or subsequent image in a series for biological and background masks.
668
+
669
+ Parameters
670
+ ----------
671
+ biomask : NDArray[np.uint8], optional
672
+ The biological mask to be applied to the image.
673
+ backmask : NDArray[np.uint8], optional
674
+ The background mask to be applied to the image.
675
+
676
+ Returns
677
+ -------
678
+ None
679
+
680
+ Notes
681
+ -----
682
+ This function processes the first or subsequent image in a sequence, applying biological and background masks,
683
+ segmenting the image, and updating internal data structures accordingly. The function is specific to handling
684
+ image sequences for biological analysis
685
+
686
+ """
687
+ if self.vars['convert_for_motion'] is None:
688
+ self.vars['convert_for_motion'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
689
+ self.cropping(is_first_image=False)
690
+ self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
691
+ biomask, backmask, self.first_image.subtract_background,
692
+ self.first_image.subtract_background2,
693
+ rolling_window_segmentation=self.vars["rolling_window_segmentation"],
694
+ filter_spec=self.vars["filter_spec"])
695
+ if self.vars['drift_already_corrected'] and not self.last_image.drift_correction_already_adjusted and not self.vars["rolling_window_segmentation"]['do']:
696
+ self.last_image.check_if_image_border_attest_drift_correction()
697
+ self.last_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
698
+ biomask, backmask, self.first_image.subtract_background,
699
+ self.first_image.subtract_background2,
700
+ allowed_window=self.last_image.drift_mask_coord,
701
+ filter_spec=self.vars["filter_spec"])
702
+
703
+ if self.last_image.im_combinations is None:
704
+ self.last_image.im_combinations = []
705
+ if len(self.last_image.im_combinations) == 0:
706
+ self.last_image.im_combinations.append({})
707
+ self.current_combination_id = np.min((self.current_combination_id, len(self.last_image.im_combinations) - 1))
708
+ self.last_image.im_combinations[self.current_combination_id]['csc'] = self.vars['convert_for_motion']
709
+ self.last_image.im_combinations[self.current_combination_id]['binary_image'] = self.last_image.binary_image
710
+ if self.last_image.greyscale is not None:
711
+ greyscale = self.last_image.greyscale
712
+ else:
713
+ greyscale = self.last_image.image
714
+ self.last_image.im_combinations[self.current_combination_id]['converted_image'] = bracket_to_uint8_image_contrast(greyscale)
715
+
716
+ def cropping(self, is_first_image: bool):
717
+ """
718
+ Crops the image based on specified conditions and settings.
719
+
720
+ This method checks if drift correction has already been applied.
721
+ If the image is the first one and hasn't been cropped yet, it will attempt
722
+ to use pre-stored coordinates or compute new crop coordinates. If automatic
723
+ cropping is enabled, it will apply the cropping process.
724
+
725
+ Parameters
726
+ ----------
727
+ is_first_image : bool
728
+ Indicates whether the image being processed is the first one in the sequence.
729
+ """
730
+ if not self.vars['drift_already_corrected']:
731
+ if is_first_image:
732
+ if not self.first_image.cropped:
733
+ if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
734
+ pickle_rick = PickleRick()
735
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
736
+ if data_to_run_cellects_quickly is not None:
737
+ if 'coordinates' in data_to_run_cellects_quickly:
738
+ logging.info("Get crop coordinates from Data to run Cellects quickly.pkl")
739
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
740
+ data_to_run_cellects_quickly['coordinates']
741
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
742
+ else:
743
+ self.first_image.get_crop_coordinates()
744
+ else:
745
+ self.first_image.get_crop_coordinates()
746
+
747
+ else:
748
+ self.first_image.get_crop_coordinates()
749
+ if self.all['automatically_crop']:
750
+ self.first_image.automatically_crop(self.first_image.crop_coord)
751
+ else:
752
+ self.first_image.crop_coord = None
753
+ else:
754
+ if not self.last_image.cropped and self.all['automatically_crop']:
755
+ self.last_image.automatically_crop(self.first_image.crop_coord)
756
+
757
+ def get_average_pixel_size(self):
758
+ """
759
+ Calculate the average pixel size and related variables.
760
+
761
+ Logs information about calculation steps, computes the average
762
+ pixel size based on image or cell scaling settings,
763
+ and sets initial thresholds for object detection.
764
+
765
+ Notes
766
+ -----
767
+ - The average pixel size is determined by either image dimensions or blob sizes.
768
+ - Thresholds for automatic detection are set based on configuration settings.
769
+
770
+ """
771
+ logging.info("Getting average pixel size")
772
+ (self.first_image.shape_number,
773
+ self.first_image.shapes,
774
+ self.first_image.stats,
775
+ centroids) = cv2.connectedComponentsWithStats(
776
+ self.first_image.validated_shapes,
777
+ connectivity=8)
778
+ self.first_image.shape_number -= 1
779
+ if self.all['scale_with_image_or_cells'] == 0:
780
+ self.vars['average_pixel_size'] = np.square(
781
+ self.all['image_horizontal_size_in_mm'] /
782
+ self.first_im.shape[1])
783
+ else:
784
+ self.vars['average_pixel_size'] = np.square(
785
+ self.all['starting_blob_hsize_in_mm'] /
786
+ np.mean(self.first_image.stats[1:, 2]))
787
+ if self.all['set_spot_size']:
788
+ self.starting_blob_hsize_in_pixels = (
789
+ self.all['starting_blob_hsize_in_mm'] /
790
+ np.sqrt(self.vars['average_pixel_size']))
791
+ else:
792
+ self.starting_blob_hsize_in_pixels = None
793
+
794
+ if self.all['automatic_size_thresholding']:
795
+ self.vars['first_move_threshold'] = 10
796
+ else:
797
+ self.vars['first_move_threshold'] = np.round(
798
+ self.all['first_move_threshold_in_mm²'] /
799
+ self.vars['average_pixel_size']).astype(np.uint8)
800
+ logging.info(f"The average pixel size is: {self.vars['average_pixel_size']} mm²")
801
+
802
+ def get_background_to_subtract(self):
803
+ """
804
+ Determine if background subtraction should be applied to the image.
805
+
806
+ Extended Description
807
+ --------------------
808
+ This function checks whether background subtraction should be applied.
809
+ It utilizes the 'subtract_background' flag and potentially converts
810
+ the image for motion estimation.
811
+
812
+ Parameters
813
+ ----------
814
+ self : object
815
+ The instance of the class containing this method.
816
+ Must have attributes `vars` and `first_image`.
817
+ """
818
+ if self.vars['subtract_background']:
819
+ self.first_image.generate_subtract_background(self.vars['convert_for_motion'], self.vars['drift_already_corrected'])
820
+
821
+ def find_if_lighter_background(self):
822
+ """
823
+ Determines whether the background is lighter or darker than the cells.
824
+
825
+ This function analyzes images to determine if their backgrounds are lighter
826
+ or darker relative to the cells, updating attributes accordingly for analysis and display purposes.
827
+
828
+
829
+ Notes
830
+ -----
831
+ This function modifies instance variables and does not return any value.
832
+ The analysis involves comparing mean pixel values in specific areas of the image.
833
+ """
834
+ logging.info("Find if the background is lighter or darker than the cells")
835
+ self.vars['lighter_background']: bool = True
836
+ self.vars['contour_color']: np.uint8 = 0
837
+ are_dicts_equal: bool = True
838
+ if self.vars['convert_for_origin'] is not None and self.vars['convert_for_origin'] is not None:
839
+ for key in self.vars['convert_for_origin'].keys():
840
+ are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_motion'] and self.vars['convert_for_origin'][key] == self.vars['convert_for_motion'][key])
841
+
842
+ for key in self.vars['convert_for_motion'].keys():
843
+ are_dicts_equal = are_dicts_equal and np.all(key in self.vars['convert_for_origin'] and self.vars['convert_for_motion'][key] == self.vars['convert_for_origin'][key])
844
+ else:
845
+ self.vars['convert_for_origin'] = {"logical": 'None', "PCA": np.ones(3, dtype=np.uint8)}
846
+ are_dicts_equal = True
847
+ if are_dicts_equal:
848
+ if self.first_im is None:
849
+ self.get_first_image()
850
+ self.fast_first_image_segmentation()
851
+ self.cropping(is_first_image=True)
852
+ among = np.nonzero(self.first_image.validated_shapes)
853
+ not_among = np.nonzero(1 - self.first_image.validated_shapes)
854
+ # Use the converted image to tell if the background is lighter, for analysis purposes
855
+ if self.first_image.image[among[0], among[1]].mean() > self.first_image.image[not_among[0], not_among[1]].mean():
856
+ self.vars['lighter_background'] = False
857
+ # Use the original image to tell if the background is lighter, for display purposes
858
+ if self.first_image.bgr[among[0], among[1], ...].mean() > self.first_image.bgr[not_among[0], not_among[1], ...].mean():
859
+ self.vars['contour_color'] = 255
860
+ else:
861
+ if self.last_im is None:
862
+ self.get_last_image()
863
+ # self.cropping(is_first_image=False)
864
+ self.fast_last_image_segmentation()
865
+ if self.last_image.binary_image.sum() == 0:
866
+ self.fast_last_image_segmentation()
867
+ among = np.nonzero(self.last_image.binary_image)
868
+ not_among = np.nonzero(1 - self.last_image.binary_image)
869
+ # Use the converted image to tell if the background is lighter, for analysis purposes
870
+ if self.last_image.image[among[0], among[1]].mean() > self.last_image.image[not_among[0], not_among[1]].mean():
871
+ self.vars['lighter_background'] = False
872
+ # Use the original image to tell if the background is lighter, for display purposes
873
+ if self.last_image.bgr[among[0], among[1], ...].mean() > self.last_image.bgr[not_among[0], not_among[1], ...].mean():
874
+ self.vars['contour_color'] = 255
875
+ if self.vars['origin_state'] == "invisible":
876
+ binary_image = deepcopy(self.first_image.binary_image)
877
+ self.first_image.convert_and_segment(self.vars['convert_for_motion'], self.vars["color_number"],
878
+ None, None, subtract_background=None,
879
+ subtract_background2=None,
880
+ rolling_window_segmentation=self.vars['rolling_window_segmentation'],
881
+ filter_spec=self.vars["filter_spec"])
882
+ covered_values = self.first_image.image[np.nonzero(binary_image)]
883
+ self.vars['luminosity_threshold'] = 127
884
+ if len(covered_values) > 0:
885
+ if self.vars['lighter_background']:
886
+ if np.max(covered_values) < 255:
887
+ self.vars['luminosity_threshold'] = np.max(covered_values) + 1
888
+ else:
889
+ if np.min(covered_values) > 0:
890
+ self.vars['luminosity_threshold'] = np.min(covered_values) - 1
891
+
892
+ def delineate_each_arena(self):
893
+ """
894
+ Determine the coordinates of each arena for video analysis.
895
+
896
+ The function processes video frames to identify bounding boxes around
897
+ specimens and determines valid arenas for analysis. In case of existing data,
898
+ it uses previously computed coordinates if available and valid.
899
+
900
+ Returns
901
+ -------
902
+ analysis_status : dict
903
+ A dictionary containing flags and messages indicating the status of
904
+ the analysis.
905
+ - 'continue' (bool): Whether to continue processing.
906
+ - 'message' (str): Informational or error message.
907
+
908
+ Raises
909
+ ------
910
+ None
911
+
912
+ Notes
913
+ -----
914
+ This function relies on the existence of certain attributes and variables
915
+ defined in the class instance.
916
+
917
+ Examples
918
+ --------
919
+ >>> self.delineate_each_arena()
920
+ {'continue': True, 'message': ''}
921
+ """
922
+ analysis_status = {"continue": True, "message": ""}
923
+ if not self.vars['several_blob_per_arena'] and (self.sample_number > 1):
924
+ compute_get_bb: bool = True
925
+ if (not self.all['overwrite_unaltered_videos'] and os.path.isfile('Data to run Cellects quickly.pkl')):
926
+
927
+ pickle_rick = PickleRick()
928
+ data_to_run_cellects_quickly = pickle_rick.read_file('Data to run Cellects quickly.pkl')
929
+ if data_to_run_cellects_quickly is not None:
930
+ if 'coordinates' in data_to_run_cellects_quickly:
931
+ (ccy1, ccy2, ccx1, ccx2, self.left, self.right, self.top, self.bot) = \
932
+ data_to_run_cellects_quickly['coordinates']
933
+ self.first_image.crop_coord = [ccy1, ccy2, ccx1, ccx2]
934
+ if (self.first_image.image.shape[0] == (ccy2 - ccy1)) and (
935
+ self.first_image.image.shape[1] == (ccx2 - ccx1)): # maybe useless now
936
+ logging.info("Get the coordinates of all arenas from Data to run Cellects quickly.pkl")
937
+ compute_get_bb = False
938
+
939
+ if compute_get_bb:
940
+ motion_list = None
941
+ if self.all['are_gravity_centers_moving']:
942
+ motion_list = self._segment_blob_motion(sample_size=5)
943
+ # if self.all['im_or_vid'] == 1:
944
+ self.get_bounding_boxes(are_gravity_centers_moving=self.all['are_gravity_centers_moving'] == 1,
945
+ motion_list=motion_list, all_specimens_have_same_direction=self.all['all_specimens_have_same_direction'])
946
+
947
+ if np.any(self.ordered_stats[:, 4] > 100 * np.median(self.ordered_stats[:, 4])):
948
+ analysis_status['message'] = "A specimen is at least 100 times larger: click previous and retry by specifying 'back' areas."
949
+ analysis_status['continue'] = False
950
+ if np.any(self.ordered_stats[:, 4] < 0.01 * np.median(self.ordered_stats[:, 4])):
951
+ analysis_status['message'] = "A specimen is at least 100 times smaller: click previous and retry by specifying 'back' areas."
952
+ analysis_status['continue'] = False
953
+ del self.ordered_stats
954
+ logging.info(
955
+ str(self.not_analyzed_individuals) + " individuals are out of picture scope and cannot be analyzed")
956
+
957
+ else:
958
+ self._whole_image_bounding_boxes()
959
+ self.sample_number = 1
960
+ self._set_analyzed_individuals()
961
+ return analysis_status
962
+
963
+ def _segment_blob_motion(self, sample_size: int) -> list:
964
+ """
965
+ Segment blob motion from the data list at specified sample sizes.
966
+
967
+ Parameters
968
+ ----------
969
+ sample_size : int
970
+ Number of samples to take from the data list.
971
+
972
+ Returns
973
+ -------
974
+ list
975
+ List containing segmented binary images at sampled frames.
976
+
977
+ Notes
978
+ -----
979
+ This function uses numpy for handling array operations and assumes the presence of certain attributes in the object, namely `data_list`, `first_image`, and `vars`.
980
+
981
+ Examples
982
+ --------
983
+ >>> motion_samples = _segment_blob_motion(10)
984
+ >>> print(len(motion_samples)) # Expected output: 10
985
+ """
986
+ motion_list = list()
987
+ if isinstance(self.data_list, list):
988
+ frame_number = len(self.data_list)
989
+ else:
990
+ frame_number = self.data_list.shape[0]
991
+ sample_numbers = np.floor(np.linspace(0, frame_number, sample_size)).astype(int)
992
+ if not 'lighter_background' in self.vars.keys():
993
+ self.find_if_lighter_background()
994
+ for frame_idx in np.arange(sample_size):
995
+ if frame_idx == 0:
996
+ motion_list.insert(frame_idx, self.first_image.validated_shapes)
997
+ else:
998
+ if isinstance(self.data_list[0], str):
999
+ image = self.data_list[sample_numbers[frame_idx] - 1]
1000
+ else:
1001
+ image = self.data_list[sample_numbers[frame_idx] - 1]
1002
+ if isinstance(image, str):
1003
+ is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
1004
+ image = read_and_rotate(image, self.first_image.bgr, self.all['raw_images'],
1005
+ is_landscape, self.first_image.crop_coord)
1006
+ # image = readim(image)
1007
+ In = OneImageAnalysis(image)
1008
+ if self.vars['drift_already_corrected']:
1009
+ In.check_if_image_border_attest_drift_correction()
1010
+ # In.adjust_to_drift_correction(self.vars['convert_for_motion']['logical'])
1011
+ In.convert_and_segment(self.vars['convert_for_motion'], self.vars['color_number'], None, None,
1012
+ self.first_image.subtract_background, self.first_image.subtract_background2,
1013
+ self.vars['rolling_window_segmentation'], self.vars['lighter_background'],
1014
+ allowed_window=In.drift_mask_coord, filter_spec=self.vars['filter_spec'])
1015
+ motion_list.insert(frame_idx, In.binary_image)
1016
+ return motion_list
1017
+
1018
+
1019
+ def get_bounding_boxes(self, are_gravity_centers_moving: bool, motion_list: list=(), all_specimens_have_same_direction: bool=True, original_shape_hsize: int=None):
1020
+ """Get the coordinates of arenas using bounding boxes.
1021
+
1022
+ Parameters
1023
+ ----------
1024
+ are_gravity_centers_moving : bool
1025
+ Flag indicating whether gravity centers are moving or not.
1026
+ motion_list : list
1027
+ List of motion information for the specimens.
1028
+ all_specimens_have_same_direction : bool, optional
1029
+ Flag indicating whether all specimens have the same direction,
1030
+ by default True.
1031
+ Notes
1032
+ -----
1033
+ This method uses various internal methods and variables to determine the bounding boxes.
1034
+ """
1035
+ # 7) Create required empty arrays: especially the bounding box coordinates of each video
1036
+ self.ordered_first_image = None
1037
+ self.shapes_to_remove = None
1038
+ if self.first_image.crop_coord is None:
1039
+ self.first_image.get_crop_coordinates()
1040
+
1041
+ logging.info("Get the coordinates of all arenas using the get_bounding_boxes method of the VideoMaker class")
1042
+ if self.first_image.validated_shapes.any() and self.first_image.shape_number > 0:
1043
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
1044
+ self.first_image.validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
1045
+ self.unchanged_ordered_fimg = deepcopy(self.ordered_first_image)
1046
+ self.modif_validated_shapes = deepcopy(self.first_image.validated_shapes)
1047
+ self.standard = - 1
1048
+ counter = 0
1049
+ while np.any(np.less(self.standard, 0)) and counter < 20:
1050
+ counter += 1
1051
+ self.left = np.zeros(self.first_image.shape_number, dtype=np.int64)
1052
+ self.right = np.repeat(self.modif_validated_shapes.shape[1], self.first_image.shape_number)
1053
+ self.top = np.zeros(self.first_image.shape_number, dtype=np.int64)
1054
+ self.bot = np.repeat(self.modif_validated_shapes.shape[0], self.first_image.shape_number)
1055
+ if are_gravity_centers_moving:
1056
+ self.top, self.bot, self.left, self.right, self.ordered_first_image = get_bb_with_moving_centers(motion_list, all_specimens_have_same_direction,
1057
+ original_shape_hsize, self.first_image.validated_shapes,
1058
+ self.first_image.y_boundaries)
1059
+ new_ordered_first_image = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
1060
+
1061
+ for i in np.arange(1, self.first_image.shape_number + 1):
1062
+ previous_shape = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
1063
+ previous_shape[np.nonzero(self.unchanged_ordered_fimg == i)] = 1
1064
+ new_potentials = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
1065
+ new_potentials[np.nonzero(self.ordered_first_image == i)] = 1
1066
+ new_potentials[np.nonzero(self.unchanged_ordered_fimg == i)] = 0
1067
+
1068
+ pads = ProgressivelyAddDistantShapes(new_potentials, previous_shape, max_distance=2)
1069
+ pads.consider_shapes_sizes(min_shape_size=10)
1070
+ pads.connect_shapes(only_keep_connected_shapes=True, rank_connecting_pixels=False)
1071
+ new_ordered_first_image[np.nonzero(pads.expanded_shape)] = i
1072
+ self.ordered_first_image = new_ordered_first_image
1073
+ self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
1074
+ self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
1075
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
1076
+ self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
1077
+ self.top, self.bot, self.left, self.right = get_quick_bounding_boxes(self.modif_validated_shapes, self.ordered_first_image, self.ordered_stats)
1078
+ else:
1079
+ self.top, self.bot, self.left, self.right = get_quick_bounding_boxes(self.modif_validated_shapes, self.ordered_first_image, self.ordered_stats)
1080
+ self._standardize_video_sizes()
1081
+ if counter == 20:
1082
+ self.top[self.top < 0] = 1
1083
+ self.bot[self.bot >= self.ordered_first_image.shape[0] - 1] = self.ordered_first_image.shape[0] - 2
1084
+ self.left[self.left < 0] = 1
1085
+ self.right[self.right >= self.ordered_first_image.shape[1] - 1] = self.ordered_first_image.shape[1] - 2
1086
+ del self.ordered_first_image
1087
+ del self.unchanged_ordered_fimg
1088
+ del self.modif_validated_shapes
1089
+ del self.standard
1090
+ del self.shapes_to_remove
1091
+ self.bot += 1
1092
+ self.right += 1
1093
+ else:
1094
+ self._whole_image_bounding_boxes()
1095
+
1096
+ def _whole_image_bounding_boxes(self):
1097
+ self.top, self.bot, self.left, self.right = np.array([0]), np.array([self.first_image.image.shape[0]]), np.array([0]), np.array([self.first_image.image.shape[1]])
1098
+
1099
+ def _standardize_video_sizes(self):
1100
+ """
1101
+ Standardize video sizes by adjusting bounding boxes.
1102
+
1103
+ Extended Description
1104
+ --------------------
1105
+ This function adjusts the bounding boxes of detected shapes in a video frame.
1106
+ It ensures that all bounding boxes are within the frame's boundaries and
1107
+ standardizes their sizes to avoid issues with odd dimensions during video writing.
1108
+
1109
+ Returns
1110
+ -------
1111
+ None
1112
+ The function modifies the following attributes of the class instance:
1113
+
1114
+ Attributes Modified
1115
+ ------------------
1116
+ standard : numpy.ndarray
1117
+ Standardized bounding boxes.
1118
+ shapes_to_remove : numpy.ndarray
1119
+ Indices of shapes to be removed from the image.
1120
+ modif_validated_shapes : numpy.ndarray
1121
+ Modified validated shapes after removing out-of-picture areas.
1122
+ ordered_stats : list of float
1123
+ Updated order statistics for the shapes.
1124
+ ordered_centroids : numpy.ndarray
1125
+ Centroids of the ordered shapes.
1126
+ ordered_first_image : numpy.ndarray
1127
+ First image with updated order statistics and centroids.
1128
+ first_image.shape_number : int
1129
+ Updated number of shapes in the first image.
1130
+ not_analyzed_individuals : numpy.ndarray
1131
+ Indices of individuals not analyzed after modifications.
1132
+
1133
+ """
1134
+ distance_threshold_to_consider_an_arena_out_of_the_picture = None# in pixels, worked nicely with - 50
1135
+
1136
+ # The modifications allowing to not make videos of setups out of view, do not work for moving centers
1137
+ y_diffs = self.bot - self.top
1138
+ x_diffs = self.right - self.left
1139
+ add_to_y = ((np.max(y_diffs) - y_diffs) / 2)
1140
+ add_to_x = ((np.max(x_diffs) - x_diffs) / 2)
1141
+ self.standard = np.zeros((len(self.top), 4), dtype=np.int64)
1142
+ self.standard[:, 0] = self.top - np.uint8(np.floor(add_to_y))
1143
+ self.standard[:, 1] = self.bot + np.uint8(np.ceil(add_to_y))
1144
+ self.standard[:, 2] = self.left - np.uint8(np.floor(add_to_x))
1145
+ self.standard[:, 3] = self.right + np.uint8(np.ceil(add_to_x))
1146
+
1147
+ # Monitor if one bounding box gets out of picture shape
1148
+ out_of_pic = deepcopy(self.standard)
1149
+ out_of_pic[:, 1] = self.ordered_first_image.shape[0] - out_of_pic[:, 1] - 1
1150
+ out_of_pic[:, 3] = self.ordered_first_image.shape[1] - out_of_pic[:, 3] - 1
1151
+
1152
+ if distance_threshold_to_consider_an_arena_out_of_the_picture is None:
1153
+ distance_threshold_to_consider_an_arena_out_of_the_picture = np.min(out_of_pic) - 1
1154
+
1155
+ # If it occurs at least one time, apply a correction, otherwise, continue and write videos
1156
+ # If the overflow is strong, remove the corresponding individuals and remake bounding_box finding
1157
+ if np.any(np.less(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)):
1158
+ # Remove shapes
1159
+ self.standard = - 1
1160
+ self.shapes_to_remove = np.nonzero(np.less(out_of_pic, - 20))[0]
1161
+ for shape_i in self.shapes_to_remove:
1162
+ self.ordered_first_image[self.ordered_first_image == (shape_i + 1)] = 0
1163
+ self.modif_validated_shapes = np.zeros(self.ordered_first_image.shape, dtype=np.uint8)
1164
+ self.modif_validated_shapes[np.nonzero(self.ordered_first_image)] = 1
1165
+ self.ordered_stats, ordered_centroids, self.ordered_first_image = rank_from_top_to_bottom_from_left_to_right(
1166
+ self.modif_validated_shapes, self.first_image.y_boundaries, get_ordered_image=True)
1167
+
1168
+ self.first_image.shape_number = self.first_image.shape_number - len(self.shapes_to_remove)
1169
+ self.not_analyzed_individuals = np.unique(self.unchanged_ordered_fimg -
1170
+ (self.unchanged_ordered_fimg * self.modif_validated_shapes))[1:]
1171
+
1172
+ else:
1173
+ # Reduce all box sizes if necessary and proceed
1174
+ if np.any(np.less(out_of_pic, 0)):
1175
+ # When the overflow is weak, remake standardization with lower "add_to_y" and "add_to_x"
1176
+ overflow = np.nonzero(np.logical_and(np.less(out_of_pic, 0), np.greater_equal(out_of_pic, distance_threshold_to_consider_an_arena_out_of_the_picture)))[0]
1177
+ # Look if overflow occurs on the y axis
1178
+ if np.any(np.less(out_of_pic[overflow, :2], 0)):
1179
+ add_to_top_and_bot = np.min(out_of_pic[overflow, :2])
1180
+ self.standard[:, 0] = self.standard[:, 0] - add_to_top_and_bot
1181
+ self.standard[:, 1] = self.standard[:, 1] + add_to_top_and_bot
1182
+ # Look if overflow occurs on the x axis
1183
+ if np.any(np.less(out_of_pic[overflow, 2:], 0)):
1184
+ add_to_left_and_right = np.min(out_of_pic[overflow, 2:])
1185
+ self.standard[:, 2] = self.standard[:, 2] - add_to_left_and_right
1186
+ self.standard[:, 3] = self.standard[:, 3] + add_to_left_and_right
1187
+ # If x or y sizes are odd, make them even :
1188
+ # Don't know why, but opencv remove 1 to odd shapes when writing videos
1189
+ if (self.standard[0, 1] - self.standard[0, 0]) % 2 != 0:
1190
+ self.standard[:, 1] -= 1
1191
+ if (self.standard[0, 3] - self.standard[0, 2]) % 2 != 0:
1192
+ self.standard[:, 3] -= 1
1193
+ self.top = self.standard[:, 0]
1194
+ self.bot = self.standard[:, 1]
1195
+ self.left = self.standard[:, 2]
1196
+ self.right = self.standard[:, 3]
1197
+
1198
+ def get_origins_and_backgrounds_lists(self):
1199
+ """
1200
+ Create origins and background lists for image processing.
1201
+
1202
+ Extended Description
1203
+ --------------------
1204
+ This method generates the origin and background lists by slicing the first image
1205
+ and its background subtraction based on predefined boundaries. It handles cases where
1206
+ the top, bottom, left, and right boundaries are not yet initialized.
1207
+
1208
+ Notes
1209
+ -----
1210
+ This method directly modifies the input image data. The `self.vars` dictionary is populated
1211
+ with lists of sliced arrays from the first image and its background.
1212
+
1213
+ Attributes
1214
+ ----------
1215
+ self.vars : dict
1216
+ Dictionary to store processed data.
1217
+ self.first_image : ImageObject
1218
+ The first image object containing validated shapes and background subtraction arrays.
1219
+ """
1220
+ logging.info("Create origins and background lists")
1221
+ if self.top is None:
1222
+ self._whole_image_bounding_boxes()
1223
+ first_im = self.first_image.validated_shapes
1224
+ self.vars['origin_list'] = []
1225
+ self.vars['background_list'] = []
1226
+ self.vars['background_list2'] = []
1227
+ for rep in np.arange(len(self.vars['analyzed_individuals'])):
1228
+ self.vars['origin_list'].append(first_im[self.top[rep]:self.bot[rep], self.left[rep]:self.right[rep]])
1229
+ if self.vars['subtract_background']:
1230
+ for rep in np.arange(len(self.vars['analyzed_individuals'])):
1231
+ self.vars['background_list'].append(
1232
+ self.first_image.subtract_background[self.top[rep]:self.bot[rep], self.left[rep]:self.right[rep]])
1233
+ if self.vars['convert_for_motion']['logical'] != 'None':
1234
+ self.vars['background_list2'].append(self.first_image.subtract_background2[self.top[rep]:
1235
+ self.bot[rep], self.left[rep]:self.right[rep]])
1236
+
1237
+ def complete_image_analysis(self):
1238
+ if not self.visualize and len(self.last_image.im_combinations) > 0:
1239
+ self.last_image.binary_image = self.last_image.im_combinations[self.current_combination_id]['binary_image']
1240
+ self.last_image.image = self.last_image.im_combinations[self.current_combination_id]['converted_image']
1241
+ self.instantiate_tables()
1242
+ if len(self.vars['exif']) > 1:
1243
+ self.vars['exif'] = self.vars['exif'][0]
1244
+ if len(self.last_image.all_c_spaces) == 0:
1245
+ self.last_image.all_c_spaces['bgr'] = self.last_image.bgr.copy()
1246
+ if self.all['bio_mask'] is not None:
1247
+ self.last_image.binary_image[self.all['bio_mask']] = 1
1248
+ if self.all['back_mask'] is not None:
1249
+ self.last_image.binary_image[self.all['back_mask']] = 0
1250
+ for i, arena in enumerate(self.vars['analyzed_individuals']):
1251
+ binary = self.last_image.binary_image[self.top[i]:self.bot[i], self.left[i]:self.right[i]]
1252
+ efficiency_test = self.last_image.all_c_spaces['bgr'][self.top[i]:self.bot[i], self.left[i]:self.right[i], :]
1253
+ if not self.vars['several_blob_per_arena']:
1254
+ binary = keep_one_connected_component(binary)
1255
+ one_row_per_frame = compute_one_descriptor_per_frame(binary[None, :, :],
1256
+ arena,
1257
+ self.vars['exif'],
1258
+ self.vars['descriptors'],
1259
+ self.vars['output_in_mm'],
1260
+ self.vars['average_pixel_size'],
1261
+ self.vars['do_fading'],
1262
+ self.vars['save_coord_specimen'])
1263
+ coord_network = None
1264
+ coord_pseudopods = None
1265
+ if self.vars['save_graph']:
1266
+ if coord_network is None:
1267
+ coord_network = np.array(np.nonzero(binary))
1268
+ extract_graph_dynamics(self.last_image.image[None, :, :], coord_network, arena,
1269
+ 0, None, coord_pseudopods)
1270
+
1271
+ else:
1272
+ one_row_per_frame = compute_one_descriptor_per_colony(binary[None, :, :],
1273
+ arena,
1274
+ self.vars['exif'],
1275
+ self.vars['descriptors'],
1276
+ self.vars['output_in_mm'],
1277
+ self.vars['average_pixel_size'],
1278
+ self.vars['do_fading'],
1279
+ self.vars['first_move_threshold'],
1280
+ self.vars['save_coord_specimen'])
1281
+ if self.vars['fractal_analysis']:
1282
+ zoomed_binary, side_lengths = prepare_box_counting(binary,
1283
+ min_mesh_side=self.vars[
1284
+ 'fractal_box_side_threshold'],
1285
+ zoom_step=self.vars['fractal_zoom_step'],
1286
+ contours=True)
1287
+ box_counting_dimensions = box_counting_dimension(zoomed_binary, side_lengths)
1288
+ one_row_per_frame["fractal_dimension"] = box_counting_dimensions[0]
1289
+ one_row_per_frame["fractal_box_nb"] = box_counting_dimensions[1]
1290
+ one_row_per_frame["fractal_r_value"] = box_counting_dimensions[2]
1291
+
1292
+ one_descriptor_per_arena = {}
1293
+ one_descriptor_per_arena["arena"] = arena
1294
+ one_descriptor_per_arena["first_move"] = pd.NA
1295
+ one_descriptor_per_arena["final_area"] = binary.sum()
1296
+ one_descriptor_per_arena["iso_digi_transi"] = pd.NA
1297
+ one_descriptor_per_arena["is_growth_isotropic"] = pd.NA
1298
+ self.update_one_row_per_arena(i, one_descriptor_per_arena)
1299
+ self.update_one_row_per_frame(i * 1, (i + 1) * 1, one_row_per_frame)
1300
+ contours = np.nonzero(get_contours(binary))
1301
+ efficiency_test[contours[0], contours[1], :] = np.array((94, 0, 213), dtype=np.uint8)
1302
+ self.add_analysis_visualization_to_first_and_last_images(i, efficiency_test, None)
1303
+ self.save_tables(with_last_image=False)
1304
+
1305
+ def prepare_video_writing(self, img_list: list, min_ram_free: float, in_colors: bool=False, pathway: str=""):
1306
+ """
1307
+
1308
+ Prepare the raw video (.npy) writing process for Cellects.
1309
+
1310
+ Parameters
1311
+ ----------
1312
+ img_list : list
1313
+ List of images to be processed.
1314
+ min_ram_free : float
1315
+ Minimum amount of RAM in GB that should remain free.
1316
+ in_colors : bool, optional
1317
+ Whether the images are in color. Default is False.
1318
+ pathway : str, optional
1319
+ Path to save the video files. Default is an empty string.
1320
+
1321
+ Returns
1322
+ -------
1323
+ tuple
1324
+ A tuple containing:
1325
+ - bunch_nb: int, number of bunches needed for video writing.
1326
+ - video_nb_per_bunch: int, number of videos per bunch.
1327
+ - sizes: ndarray, dimensions of each video.
1328
+ - video_bunch: list or ndarray, initialized video arrays.
1329
+ - vid_names: list, names of the video files.
1330
+ - rom_memory_required: None or float, required ROM memory.
1331
+ - analysis_status: dict, status and message of the analysis process.
1332
+ - remaining: int, remainder videos that do not fit in a complete bunch.
1333
+
1334
+ Notes
1335
+ -----
1336
+ - The function calculates necessary memory and ensures 10% extra to avoid issues.
1337
+ - It checks for available RAM and adjusts the number of bunches accordingly.
1338
+ - If using color images, memory requirements are tripled.
1339
+
1340
+ expected output depends on the provided images and RAM availability
1341
+ """
1342
+ # 1) Create a list of video names
1343
+ if self.not_analyzed_individuals is not None:
1344
+ number_to_add = len(self.not_analyzed_individuals)
1345
+ else:
1346
+ number_to_add = 0
1347
+ vid_names = list()
1348
+ ind_i = 0
1349
+ counter = 0
1350
+ while ind_i < (self.first_image.shape_number + number_to_add):
1351
+ ind_i += 1
1352
+ while np.any(np.isin(self.not_analyzed_individuals, ind_i)):
1353
+ ind_i += 1
1354
+ vid_names.append(pathway + "ind_" + str(ind_i) + ".npy")
1355
+ counter += 1
1356
+ img_nb = len(img_list)
1357
+
1358
+ # 2) Create a table of the dimensions of each video
1359
+ # Add 10% to the necessary memory to avoid problems
1360
+ necessary_memory = img_nb * np.multiply((self.bot - self.top).astype(np.uint64), (self.right - self.left).astype(np.uint64)).sum() * 8 * 1.16415e-10
1361
+ if in_colors:
1362
+ sizes = np.column_stack(
1363
+ (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top, self.right - self.left,
1364
+ np.repeat(3, self.first_image.shape_number)))
1365
+ necessary_memory *= 3
1366
+ else:
1367
+ sizes = np.column_stack(
1368
+ (np.repeat(img_nb, self.first_image.shape_number), self.bot - self.top, self.right - self.left))
1369
+ use_list_of_vid = True
1370
+ if np.all(sizes[0, :] == sizes):
1371
+ use_list_of_vid = False
1372
+ available_memory = (psutil.virtual_memory().available >> 30) - min_ram_free
1373
+ if available_memory == 0:
1374
+ analysis_status = {"continue": False, "message": "There are not enough RAM available"}
1375
+ bunch_nb = 1
1376
+ else:
1377
+ bunch_nb = int(np.ceil(necessary_memory / available_memory))
1378
+ if bunch_nb > 1:
1379
+ # The program will need twice the memory to create the second bunch.
1380
+ bunch_nb = int(np.ceil(2 * necessary_memory / available_memory))
1381
+
1382
+ video_nb_per_bunch = np.floor(self.first_image.shape_number / bunch_nb).astype(np.uint8)
1383
+ analysis_status = {"continue": True, "message": ""}
1384
+ video_bunch = None
1385
+ try:
1386
+ if use_list_of_vid:
1387
+ video_bunch = [np.zeros(sizes[i, :], dtype=np.uint8) for i in range(video_nb_per_bunch)]
1388
+ else:
1389
+ video_bunch = np.zeros(np.append(sizes[0, :], video_nb_per_bunch), dtype=np.uint8)
1390
+ except ValueError as v_err:
1391
+ analysis_status = {"continue": False, "message": "Probably failed to detect the right cell(s) number, do the first image analysis manually."}
1392
+ logging.error(f"{analysis_status['message']} error is: {v_err}")
1393
+ # Check for available ROM memory
1394
+ if (psutil.disk_usage('/')[2] >> 30) < (necessary_memory + 2):
1395
+ rom_memory_required = necessary_memory + 2
1396
+ else:
1397
+ rom_memory_required = None
1398
+ remaining = self.first_image.shape_number % bunch_nb
1399
+ if remaining > 0:
1400
+ bunch_nb += 1
1401
+ is_landscape = self.first_image.image.shape[0] < self.first_image.image.shape[1]
1402
+ logging.info(f"Cellects will start writing {self.first_image.shape_number} videos. Given available memory, it will do it in {bunch_nb} time(s)")
1403
+ return bunch_nb, video_nb_per_bunch, sizes, video_bunch, vid_names, rom_memory_required, analysis_status, remaining, use_list_of_vid, is_landscape
1404
+
1405
+
1406
+
1407
+ def update_output_list(self):
1408
+ """
1409
+ Update the output list with various descriptors from the analysis results.
1410
+
1411
+ This method processes different types of descriptors and assigns them to
1412
+ the `self.vars['descriptors']` dictionary. It handles special cases for
1413
+ descriptors related to 'xy' dimensions and ensures that all relevant metrics
1414
+ are stored in the output list.
1415
+ """
1416
+ self.vars['descriptors'] = {}
1417
+ for descriptor in self.all['descriptors'].keys():
1418
+ if descriptor == 'standard_deviation_xy':
1419
+ self.vars['descriptors']['standard_deviation_x'] = self.all['descriptors'][descriptor]
1420
+ self.vars['descriptors']['standard_deviation_y'] = self.all['descriptors'][descriptor]
1421
+ elif descriptor == 'skewness_xy':
1422
+ self.vars['descriptors']['skewness_x'] = self.all['descriptors'][descriptor]
1423
+ self.vars['descriptors']['skewness_y'] = self.all['descriptors'][descriptor]
1424
+ elif descriptor == 'kurtosis_xy':
1425
+ self.vars['descriptors']['kurtosis_x'] = self.all['descriptors'][descriptor]
1426
+ self.vars['descriptors']['kurtosis_y'] = self.all['descriptors'][descriptor]
1427
+ elif descriptor == 'major_axes_len_and_angle':
1428
+ self.vars['descriptors']['major_axis_len'] = self.all['descriptors'][descriptor]
1429
+ self.vars['descriptors']['minor_axis_len'] = self.all['descriptors'][descriptor]
1430
+ self.vars['descriptors']['axes_orientation'] = self.all['descriptors'][descriptor]
1431
+ else:
1432
+ if np.isin(descriptor, list(from_shape_descriptors_class.keys())):
1433
+
1434
+ self.vars['descriptors'][descriptor] = self.all['descriptors'][descriptor]
1435
+ self.vars['descriptors']['newly_explored_area'] = self.vars['do_fading']
1436
+
1437
+ def update_available_core_nb(self, image_bit_number=256, video_bit_number=140):# video_bit_number=176
1438
+ """
1439
+ Update available computation resources based on memory and processing constraints.
1440
+
1441
+ Parameters
1442
+ ----------
1443
+ image_bit_number : int, optional
1444
+ Number of bits per image pixel (default is 256).
1445
+ video_bit_number : int, optional
1446
+ Number of bits per video frame pixel (default is 140).
1447
+
1448
+ Other Parameters
1449
+ ----------------
1450
+ lose_accuracy_to_save_memory : bool
1451
+ Flag to reduce accuracy for memory savings.
1452
+ convert_for_motion : dict
1453
+ Conversion settings for motion analysis.
1454
+ already_greyscale : bool
1455
+ Flag indicating if the image is already greyscale.
1456
+ save_coord_thickening_slimming : bool
1457
+ Flag to save coordinates for thickening and slimming.
1458
+ oscilacyto_analysis : bool
1459
+ Flag indicating if oscilacyto analysis is enabled.
1460
+ save_coord_network : bool
1461
+ Flag to save coordinates for network analysis.
1462
+
1463
+ Returns
1464
+ -------
1465
+ float
1466
+ Rounded absolute difference between available memory and necessary memory in GB.
1467
+
1468
+ Notes
1469
+ -----
1470
+ Performance considerations and limitations should be noted here if applicable.
1471
+
1472
+ """
1473
+ if self.vars['lose_accuracy_to_save_memory']:
1474
+ video_bit_number -= 56
1475
+ if self.vars['convert_for_motion']['logical'] != 'None':
1476
+ video_bit_number += 64
1477
+ if self.vars['lose_accuracy_to_save_memory']:
1478
+ video_bit_number -= 56
1479
+ if self.vars['already_greyscale']:
1480
+ video_bit_number -= 64
1481
+ if self.vars['save_coord_thickening_slimming'] or self.vars['oscilacyto_analysis']:
1482
+ video_bit_number += 16
1483
+ image_bit_number += 128
1484
+ if self.vars['save_coord_network']:
1485
+ video_bit_number += 8
1486
+ image_bit_number += 64
1487
+
1488
+ if isinstance(self.bot, list):
1489
+ one_image_memory = np.multiply((self.bot[0] - self.top[0]),
1490
+ (self.right[0] - self.left[0])).max().astype(np.uint64)
1491
+ else:
1492
+ one_image_memory = np.multiply((self.bot - self.top).astype(np.uint64),
1493
+ (self.right - self.left).astype(np.uint64)).max()
1494
+ one_video_memory = self.vars['img_number'] * one_image_memory
1495
+ necessary_memory = (one_image_memory * image_bit_number + one_video_memory * video_bit_number) * 1.16415e-10
1496
+ available_memory = (virtual_memory().available >> 30) - self.vars['min_ram_free']
1497
+ max_repeat_in_memory = (available_memory // necessary_memory).astype(np.uint16)
1498
+ if max_repeat_in_memory > 1:
1499
+ max_repeat_in_memory = np.max(((available_memory // (2 * necessary_memory)).astype(np.uint16), 1))
1500
+
1501
+
1502
+ self.cores = np.min((self.all['cores'], max_repeat_in_memory))
1503
+ if self.cores > self.sample_number:
1504
+ self.cores = self.sample_number
1505
+ return np.round(np.absolute(available_memory - necessary_memory), 3)
1506
+
1507
+
1508
+ def update_one_row_per_arena(self, i: int, table_to_add):
1509
+ """
1510
+ Update one row of the dataframe per arena.
1511
+
1512
+ Add a row to a DataFrame for each arena, based on the provided table_to_add. If no previous rows exist,
1513
+ initialize a new DataFrame with zeros.
1514
+
1515
+ Parameters
1516
+ ----------
1517
+ i : int
1518
+ Index of the arena to update.
1519
+ table_to_add : dict
1520
+ Dictionary containing values to add. Keys are column names, values are the data.
1521
+
1522
+ """
1523
+ if not self.vars['several_blob_per_arena']:
1524
+ if self.one_row_per_arena is None:
1525
+ self.one_row_per_arena = pd.DataFrame(np.zeros((len(self.vars['analyzed_individuals']), len(table_to_add)), dtype=float),
1526
+ columns=table_to_add.keys())
1527
+ self.one_row_per_arena.iloc[i, :] = table_to_add.values()
1528
+
1529
+
1530
+ def update_one_row_per_frame(self, i: int, j: int, table_to_add):
1531
+ """
1532
+ Update a range of rows in `self.one_row_per_frame` DataFrame with values from
1533
+ `table_to_add`.
1534
+
1535
+ Parameters
1536
+ ----------
1537
+ i : int
1538
+ The starting row index to update in `self.one_row_per_frame`.
1539
+ j : int
1540
+ The ending row index (exclusive) to update in `self.one_row_per_frame`.
1541
+ table_to_add : dict
1542
+ A dictionary where keys are column labels and values are lists or arrays of
1543
+ data to insert into `self.one_row_per_frame`.
1544
+ Notes
1545
+ -----
1546
+ Ensures that one row per arena is being updated. If `self.one_row_per_frame` is
1547
+ None, it initializes a DataFrame to hold the data.
1548
+ """
1549
+ if not self.vars['several_blob_per_arena']:
1550
+ if self.one_row_per_frame is None:
1551
+ self.one_row_per_frame = pd.DataFrame(index=range(len(self.vars['analyzed_individuals']) *
1552
+ self.vars['img_number']),
1553
+ columns=table_to_add.keys())
1554
+
1555
+ self.one_row_per_frame.iloc[i:j, :] = table_to_add
1556
+
1557
+
1558
+ def instantiate_tables(self):
1559
+ """
1560
+ Update output list and prepare results tables and validation images.
1561
+
1562
+ Extended Description
1563
+ --------------------
1564
+ This method performs necessary preparations for processing image sequences,
1565
+ including updating the output list and initializing key attributes required
1566
+ for subsequent operations.
1567
+
1568
+ """
1569
+ self.update_output_list()
1570
+ logging.info("Instantiate results tables and validation images")
1571
+ self.fractal_box_sizes = None
1572
+ self.one_row_per_arena = None
1573
+ self.one_row_per_frame = None
1574
+ if self.vars['already_greyscale']:
1575
+ if len(self.first_image.bgr.shape) == 2:
1576
+ self.first_image.bgr = np.stack((self.first_image.bgr, self.first_image.bgr, self.first_image.bgr), axis=2).astype(np.uint8)
1577
+ if len(self.last_image.bgr.shape) == 2:
1578
+ self.last_image.bgr = np.stack((self.last_image.bgr, self.last_image.bgr, self.last_image.bgr), axis=2).astype(np.uint8)
1579
+ self.vars["convert_for_motion"] = {"bgr": np.array((1, 1, 1), dtype=np.uint8), "logical": "None"}
1580
+
1581
+ def add_analysis_visualization_to_first_and_last_images(self, i: int, first_visualization: NDArray, last_visualization: NDArray=None):
1582
+ """
1583
+ Adds analysis visualizations to the first and last images of a sequence.
1584
+
1585
+ Parameters
1586
+ ----------
1587
+ i : int
1588
+ Index of the image in the sequence.
1589
+ first_visualization : NDArray[np.uint8]
1590
+ The visualization to add to the first image.
1591
+ last_visualization : NDArray[np.uint8]
1592
+ The visualization to add to the last image.
1593
+
1594
+ Other Parameters
1595
+ ----------------
1596
+ vars : dict
1597
+ Dictionary containing various parameters.
1598
+ arena_shape : str, optional
1599
+ The shape of the arena. Either 'circle' or other shapes.
1600
+
1601
+ Notes
1602
+ -----
1603
+ If `arena_shape` is 'circle', the visualization will be masked by an ellipse.
1604
+
1605
+ """
1606
+ minmax = (self.top[i], self.bot[i], self.left[i], self.right[i])
1607
+ self.first_image.bgr = draw_img_with_mask(self.first_image.bgr, self.first_image.bgr.shape[:2], minmax,
1608
+ self.vars['arena_shape'], first_visualization)
1609
+ if last_visualization is not None:
1610
+ self.last_image.bgr = draw_img_with_mask(self.last_image.bgr, self.last_image.bgr.shape[:2], minmax,
1611
+ self.vars['arena_shape'], last_visualization)
1612
+
1613
+ # cr = ((self.top[i], self.bot[i]),
1614
+ # (self.left[i], self.right[i]))
1615
+ # if self.vars['arena_shape'] == 'circle':
1616
+ # ellipse = create_ellipse(cr[0][1] - cr[0][0], cr[1][1] - cr[1][0])
1617
+ # ellipse = np.stack((ellipse, ellipse, ellipse), axis=2).astype(np.uint8)
1618
+ # first_visualization *= ellipse
1619
+ # self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1620
+ # self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += first_visualization
1621
+ # if last_visualization is not None:
1622
+ # last_visualization *= ellipse
1623
+ # self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] *= (1 - ellipse)
1624
+ # self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] += last_visualization
1625
+ # else:
1626
+ # self.first_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = first_visualization
1627
+ # if last_visualization is not None:
1628
+ # self.last_image.bgr[cr[0][0]:cr[0][1], cr[1][0]:cr[1][1], ...] = last_visualization
1629
+
1630
+
1631
+ def save_tables(self, with_last_image: bool=True):
1632
+ """
1633
+ Exports analysis results to CSV files and saves visualization outputs.
1634
+
1635
+ Generates the following output:
1636
+ - one_row_per_arena.csv, one_row_per_frame.csv : Tracking data per arena/frame.
1637
+ - software_settings.csv : Full configuration settings for reproducibility.
1638
+
1639
+ Raises
1640
+ ------
1641
+ PermissionError
1642
+ If any output file is already open in an external program (logged and re-raised).
1643
+
1644
+ Notes
1645
+ -----
1646
+ Ensure no exported CSV files are open while running this method to avoid permission errors. This
1647
+ function will fail gracefully if the files cannot be overwritten.
1648
+
1649
+ """
1650
+ logging.info("Save results tables and validation images")
1651
+ if not self.vars['several_blob_per_arena']:
1652
+ try:
1653
+ self.one_row_per_arena.to_csv("one_row_per_arena.csv", sep=";", index=False, lineterminator='\n')
1654
+ del self.one_row_per_arena
1655
+ except PermissionError:
1656
+ logging.error("Never let one_row_per_arena.csv open when Cellects runs")
1657
+ self.message_from_thread.emit(f"Never let one_row_per_arena.csv open when Cellects runs")
1658
+ try:
1659
+ self.one_row_per_frame.to_csv("one_row_per_frame.csv", sep=";", index=False, lineterminator='\n')
1660
+ del self.one_row_per_frame
1661
+ except PermissionError:
1662
+ logging.error("Never let one_row_per_frame.csv open when Cellects runs")
1663
+ self.message_from_thread.emit(f"Never let one_row_per_frame.csv open when Cellects runs")
1664
+ if self.all['extension'] == '.JPG':
1665
+ extension = '.PNG'
1666
+ else:
1667
+ extension = '.JPG'
1668
+ if with_last_image:
1669
+ cv2.imwrite(f"Analysis efficiency, last image{extension}", self.last_image.bgr)
1670
+ cv2.imwrite(
1671
+ f"Analysis efficiency, {np.ceil(self.vars['img_number'] / 10).astype(np.uint64)}th image{extension}",
1672
+ self.first_image.bgr)
1673
+ software_settings = deepcopy(self.vars)
1674
+ for key in ['descriptors', 'analyzed_individuals', 'exif', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1675
+ software_settings.pop(key, None)
1676
+ global_settings = deepcopy(self.all)
1677
+ for key in ['analyzed_individuals', 'night_mode', 'expert_mode', 'is_auto', 'arena', 'video_option', 'compute_all_options', 'vars', 'dims', 'origin_list', 'background_list', 'background_list2', 'descriptors', 'folder_list', 'sample_number_per_folder']:
1678
+ global_settings.pop(key, None)
1679
+ software_settings.update(global_settings)
1680
+ software_settings = pd.DataFrame.from_dict(software_settings, columns=["Setting"], orient='index')
1681
+ try:
1682
+ software_settings.to_csv("software_settings.csv", sep=";")
1683
+ except PermissionError:
1684
+ logging.error("Never let software_settings.csv open when Cellects runs")
1685
+ self.message_from_thread.emit(f"Never let software_settings.csv open when Cellects runs")
1686
+
1687
+