celldetective 1.4.0__py3-none-any.whl → 1.4.1.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. celldetective/_version.py +1 -1
  2. celldetective/exceptions.py +11 -0
  3. celldetective/filters.py +7 -1
  4. celldetective/gui/InitWindow.py +4 -1
  5. celldetective/gui/__init__.py +2 -9
  6. celldetective/gui/about.py +2 -2
  7. celldetective/gui/base_annotator.py +786 -0
  8. celldetective/gui/classifier_widget.py +18 -13
  9. celldetective/gui/configure_new_exp.py +51 -30
  10. celldetective/gui/control_panel.py +10 -7
  11. celldetective/gui/{signal_annotator.py → event_annotator.py} +473 -1437
  12. celldetective/gui/generic_signal_plot.py +2 -1
  13. celldetective/gui/gui_utils.py +5 -2
  14. celldetective/gui/help/neighborhood.json +2 -2
  15. celldetective/gui/layouts.py +21 -11
  16. celldetective/gui/{signal_annotator2.py → pair_event_annotator.py} +3 -1
  17. celldetective/gui/process_block.py +129 -91
  18. celldetective/gui/processes/downloader.py +37 -34
  19. celldetective/gui/processes/measure_cells.py +14 -8
  20. celldetective/gui/processes/segment_cells.py +438 -314
  21. celldetective/gui/processes/track_cells.py +12 -13
  22. celldetective/gui/settings/__init__.py +7 -0
  23. celldetective/gui/settings/_settings_base.py +70 -0
  24. celldetective/gui/{retrain_signal_model_options.py → settings/_settings_event_model_training.py} +35 -91
  25. celldetective/gui/{measurement_options.py → settings/_settings_measurements.py} +28 -81
  26. celldetective/gui/{neighborhood_options.py → settings/_settings_neighborhood.py} +1 -1
  27. celldetective/gui/settings/_settings_segmentation.py +49 -0
  28. celldetective/gui/{retrain_segmentation_model_options.py → settings/_settings_segmentation_model_training.py} +33 -79
  29. celldetective/gui/{signal_annotator_options.py → settings/_settings_signal_annotator.py} +73 -95
  30. celldetective/gui/{btrack_options.py → settings/_settings_tracking.py} +64 -87
  31. celldetective/gui/styles.py +2 -1
  32. celldetective/gui/survival_ui.py +1 -1
  33. celldetective/gui/tableUI.py +25 -0
  34. celldetective/gui/table_ops/__init__.py +0 -0
  35. celldetective/gui/table_ops/merge_groups.py +118 -0
  36. celldetective/gui/viewers.py +3 -5
  37. celldetective/gui/workers.py +0 -2
  38. celldetective/io.py +98 -55
  39. celldetective/links/zenodo.json +145 -144
  40. celldetective/measure.py +31 -26
  41. celldetective/preprocessing.py +34 -21
  42. celldetective/regionprops/_regionprops.py +16 -5
  43. celldetective/scripts/measure_cells.py +5 -5
  44. celldetective/scripts/measure_relative.py +16 -11
  45. celldetective/scripts/segment_cells.py +4 -4
  46. celldetective/scripts/segment_cells_thresholds.py +3 -3
  47. celldetective/scripts/track_cells.py +7 -7
  48. celldetective/scripts/train_segmentation_model.py +10 -1
  49. celldetective/tracking.py +10 -4
  50. celldetective/utils.py +59 -58
  51. {celldetective-1.4.0.dist-info → celldetective-1.4.1.post1.dist-info}/METADATA +1 -1
  52. celldetective-1.4.1.post1.dist-info/RECORD +123 -0
  53. tests/gui/__init__.py +0 -0
  54. tests/gui/test_new_project.py +228 -0
  55. tests/{test_qt.py → gui/test_project.py} +22 -26
  56. tests/test_preprocessing.py +2 -2
  57. celldetective/models/segmentation_effectors/ricm_bf_all_last/config_input.json +0 -79
  58. celldetective/models/segmentation_effectors/ricm_bf_all_last/ricm_bf_all_last +0 -0
  59. celldetective/models/segmentation_effectors/ricm_bf_all_last/training_instructions.json +0 -37
  60. celldetective/models/segmentation_effectors/test-transfer/config_input.json +0 -39
  61. celldetective/models/segmentation_effectors/test-transfer/test-transfer +0 -0
  62. celldetective/models/signal_detection/NucCond/classification_loss.png +0 -0
  63. celldetective/models/signal_detection/NucCond/classifier.h5 +0 -0
  64. celldetective/models/signal_detection/NucCond/config_input.json +0 -1
  65. celldetective/models/signal_detection/NucCond/log_classifier.csv +0 -126
  66. celldetective/models/signal_detection/NucCond/log_regressor.csv +0 -282
  67. celldetective/models/signal_detection/NucCond/regression_loss.png +0 -0
  68. celldetective/models/signal_detection/NucCond/regressor.h5 +0 -0
  69. celldetective/models/signal_detection/NucCond/scores.npy +0 -0
  70. celldetective/models/signal_detection/NucCond/test_confusion_matrix.png +0 -0
  71. celldetective/models/signal_detection/NucCond/test_regression.png +0 -0
  72. celldetective/models/signal_detection/NucCond/validation_confusion_matrix.png +0 -0
  73. celldetective/models/signal_detection/NucCond/validation_regression.png +0 -0
  74. celldetective-1.4.0.dist-info/RECORD +0 -131
  75. {celldetective-1.4.0.dist-info → celldetective-1.4.1.post1.dist-info}/WHEEL +0 -0
  76. {celldetective-1.4.0.dist-info → celldetective-1.4.1.post1.dist-info}/entry_points.txt +0 -0
  77. {celldetective-1.4.0.dist-info → celldetective-1.4.1.post1.dist-info}/licenses/LICENSE +0 -0
  78. {celldetective-1.4.0.dist-info → celldetective-1.4.1.post1.dist-info}/top_level.txt +0 -0
@@ -4,8 +4,28 @@ import datetime
4
4
  import os
5
5
  import json
6
6
  import numpy as np
7
- from celldetective.io import extract_position_name, locate_segmentation_model, auto_load_number_of_frames, load_frames, _check_label_dims, _load_frames_to_segment
8
- from celldetective.utils import _rescale_labels, _segment_image_with_stardist_model, _segment_image_with_cellpose_model, _prep_stardist_model, _prep_cellpose_model, _get_normalize_kwargs_from_config, extract_experiment_channels, _estimate_scale_factor, _extract_channel_indices_from_config, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel
7
+ from celldetective.io import (
8
+ extract_position_name,
9
+ locate_segmentation_model,
10
+ auto_load_number_of_frames,
11
+ load_frames,
12
+ _check_label_dims,
13
+ _load_frames_to_segment,
14
+ )
15
+ from celldetective.utils import (
16
+ _rescale_labels,
17
+ _segment_image_with_stardist_model,
18
+ _segment_image_with_cellpose_model,
19
+ _prep_stardist_model,
20
+ _prep_cellpose_model,
21
+ _get_normalize_kwargs_from_config,
22
+ extract_experiment_channels,
23
+ _estimate_scale_factor,
24
+ _extract_channel_indices_from_config,
25
+ config_section_to_dict,
26
+ _extract_nbr_channels_from_config,
27
+ _get_img_num_per_channel,
28
+ )
9
29
 
10
30
  from pathlib import Path, PurePath
11
31
  from glob import glob
@@ -13,363 +33,467 @@ from shutil import rmtree
13
33
  from tqdm import tqdm
14
34
  import numpy as np
15
35
  from csbdeep.io import save_tiff_imagej_compatible
16
- from celldetective.segmentation import segment_frame_from_thresholds, merge_instance_segmentation
36
+ from celldetective.segmentation import (
37
+ segment_frame_from_thresholds,
38
+ merge_instance_segmentation,
39
+ )
17
40
  import gc
18
41
  from art import tprint
19
42
 
20
43
  import concurrent.futures
21
44
 
45
+
22
46
  class BaseSegmentProcess(Process):
23
47
 
24
- def __init__(self, queue=None, process_args=None, *args, **kwargs):
25
-
26
- super().__init__(*args, **kwargs)
27
-
28
- self.queue = queue
48
+ def __init__(self, queue=None, process_args=None, *args, **kwargs):
49
+
50
+ super().__init__(*args, **kwargs)
51
+
52
+ self.queue = queue
53
+
54
+ if process_args is not None:
55
+ for key, value in process_args.items():
56
+ setattr(self, key, value)
29
57
 
30
- if process_args is not None:
31
- for key, value in process_args.items():
32
- setattr(self, key, value)
58
+ tprint("Segment")
33
59
 
34
- tprint("Segment")
60
+ # Experiment
61
+ self.locate_experiment_config()
35
62
 
36
- # Experiment
37
- self.locate_experiment_config()
63
+ print(f"Position: {extract_position_name(self.pos)}...")
64
+ print("Configuration file: ", self.config)
65
+ print(f"Population: {self.mode}...")
66
+ self.instruction_file = os.sep.join(
67
+ ["configs", f"segmentation_instructions_{self.mode}.json"]
68
+ )
38
69
 
39
- print(f"Position: {extract_position_name(self.pos)}...")
40
- print("Configuration file: ",self.config)
41
- print(f"Population: {self.mode}...")
70
+ self.read_instructions()
71
+ self.extract_experiment_parameters()
72
+ self.detect_movie_length()
73
+ self.write_folders()
42
74
 
43
- self.extract_experiment_parameters()
44
- self.detect_movie_length()
45
- self.write_folders()
75
+ def read_instructions(self):
76
+ print("Looking for instruction file...")
77
+ instr_path = PurePath(self.exp_dir, Path(f"{self.instruction_file}"))
78
+ if os.path.exists(instr_path):
79
+ with open(instr_path, "r") as f:
80
+ _instructions = json.load(f)
81
+ print(f"Measurement instruction file successfully loaded...")
82
+ print(f"Instructions: {_instructions}...")
83
+ self.flip = _instructions.get("flip", False)
84
+ else:
85
+ self.flip = False
46
86
 
47
- def write_folders(self):
87
+ def write_folders(self):
48
88
 
49
- self.mode = self.mode.lower()
50
- self.label_folder = f"labels_{self.mode}"
89
+ self.mode = self.mode.lower()
90
+ self.label_folder = f"labels_{self.mode}"
51
91
 
52
- if os.path.exists(self.pos+self.label_folder):
53
- print('Erasing the previous labels folder...')
54
- rmtree(self.pos+self.label_folder)
55
- os.mkdir(self.pos+self.label_folder)
56
- print(f'Labels folder successfully generated...')
92
+ if os.path.exists(self.pos + self.label_folder):
93
+ print("Erasing the previous labels folder...")
94
+ rmtree(self.pos + self.label_folder)
95
+ os.mkdir(self.pos + self.label_folder)
96
+ print(f"Labels folder successfully generated...")
57
97
 
98
+ def extract_experiment_parameters(self):
58
99
 
59
- def extract_experiment_parameters(self):
100
+ self.spatial_calibration = float(
101
+ config_section_to_dict(self.config, "MovieSettings")["pxtoum"]
102
+ )
103
+ self.len_movie = float(
104
+ config_section_to_dict(self.config, "MovieSettings")["len_movie"]
105
+ )
106
+ self.movie_prefix = config_section_to_dict(self.config, "MovieSettings")[
107
+ "movie_prefix"
108
+ ]
109
+ self.nbr_channels = _extract_nbr_channels_from_config(self.config)
110
+ self.channel_names, self.channel_indices = extract_experiment_channels(
111
+ self.exp_dir
112
+ )
60
113
 
61
- self.spatial_calibration = float(ConfigSectionMap(self.config,"MovieSettings")["pxtoum"])
62
- self.len_movie = float(ConfigSectionMap(self.config,"MovieSettings")["len_movie"])
63
- self.movie_prefix = ConfigSectionMap(self.config,"MovieSettings")["movie_prefix"]
64
- self.nbr_channels = _extract_nbr_channels_from_config(self.config)
65
- self.channel_names, self.channel_indices = extract_experiment_channels(self.exp_dir)
114
+ def locate_experiment_config(self):
66
115
 
67
- def locate_experiment_config(self):
116
+ parent1 = Path(self.pos).parent
117
+ self.exp_dir = parent1.parent
118
+ self.config = PurePath(self.exp_dir, Path("config.ini"))
68
119
 
69
- parent1 = Path(self.pos).parent
70
- self.exp_dir = parent1.parent
71
- self.config = PurePath(self.exp_dir,Path("config.ini"))
120
+ if not os.path.exists(self.config):
121
+ print(
122
+ "The configuration file for the experiment could not be located. Abort."
123
+ )
124
+ self.abort_process()
72
125
 
73
- if not os.path.exists(self.config):
74
- print('The configuration file for the experiment could not be located. Abort.')
75
- self.abort_process()
126
+ def detect_movie_length(self):
76
127
 
77
- def detect_movie_length(self):
128
+ try:
129
+ self.file = glob(self.pos + f"movie/{self.movie_prefix}*.tif")[0]
130
+ except Exception as e:
131
+ print(f"Error {e}.\nMovie could not be found. Check the prefix.")
132
+ self.abort_process()
78
133
 
79
- try:
80
- self.file = glob(self.pos+f"movie/{self.movie_prefix}*.tif")[0]
81
- except Exception as e:
82
- print(f'Error {e}.\nMovie could not be found. Check the prefix.')
83
- self.abort_process()
134
+ len_movie_auto = auto_load_number_of_frames(self.file)
135
+ if len_movie_auto is not None:
136
+ self.len_movie = len_movie_auto
84
137
 
85
- len_movie_auto = auto_load_number_of_frames(self.file)
86
- if len_movie_auto is not None:
87
- self.len_movie = len_movie_auto
138
+ def end_process(self):
88
139
 
89
- def end_process(self):
140
+ self.terminate()
141
+ self.queue.put("finished")
90
142
 
91
- self.terminate()
92
- self.queue.put("finished")
143
+ def abort_process(self):
93
144
 
94
- def abort_process(self):
95
-
96
- self.terminate()
97
- self.queue.put("error")
145
+ self.terminate()
146
+ self.queue.put("error")
98
147
 
99
148
 
100
149
  class SegmentCellDLProcess(BaseSegmentProcess):
101
-
102
- def __init__(self, *args, **kwargs):
103
-
104
- super().__init__(*args, **kwargs)
105
150
 
106
- self.check_gpu()
151
+ def __init__(self, *args, **kwargs):
152
+
153
+ super().__init__(*args, **kwargs)
154
+
155
+ self.check_gpu()
156
+
157
+ # Model
158
+ self.locate_model_path()
159
+ self.extract_model_input_parameters()
160
+ self.detect_channels()
161
+ self.detect_rescaling()
107
162
 
108
- # Model
109
- self.locate_model_path()
110
- self.extract_model_input_parameters()
111
- self.detect_channels()
112
- self.detect_rescaling()
163
+ self.write_log()
113
164
 
114
- self.write_log()
165
+ self.sum_done = 0
166
+ self.t0 = time.time()
115
167
 
116
- self.sum_done = 0
117
- self.t0 = time.time()
168
+ def extract_model_input_parameters(self):
118
169
 
119
- def extract_model_input_parameters(self):
120
-
121
- self.required_channels = self.input_config["channels"]
122
- if 'selected_channels' in self.input_config:
123
- self.required_channels = self.input_config['selected_channels']
124
-
125
- self.target_cell_size = None
126
- if 'target_cell_size_um' in self.input_config and 'cell_size_um' in self.input_config:
127
- self.target_cell_size = self.input_config['target_cell_size_um']
128
- self.cell_size = self.input_config['cell_size_um']
129
-
130
- self.normalize_kwargs = _get_normalize_kwargs_from_config(self.input_config)
131
-
132
- self.model_type = self.input_config['model_type']
133
- self.required_spatial_calibration = self.input_config['spatial_calibration']
134
- print(f'Spatial calibration expected by the model: {self.required_spatial_calibration}...')
135
-
136
- if self.model_type=='cellpose':
137
- self.diameter = self.input_config['diameter']
138
- self.cellprob_threshold = self.input_config['cellprob_threshold']
139
- self.flow_threshold = self.input_config['flow_threshold']
140
-
141
- def write_log(self):
142
-
143
- log=f'segmentation model: {self.model_name}\n'
144
- with open(self.pos+f'log_{self.mode}.txt', 'a') as f:
145
- f.write(f'{datetime.datetime.now()} SEGMENT \n')
146
- f.write(log)
147
-
148
- def detect_channels(self):
149
-
150
- self.channel_indices = _extract_channel_indices_from_config(self.config, self.required_channels)
151
- print(f'Required channels: {self.required_channels} located at channel indices {self.channel_indices}.')
152
- self.img_num_channels = _get_img_num_per_channel(self.channel_indices, int(self.len_movie), self.nbr_channels)
153
-
154
- def detect_rescaling(self):
155
-
156
- self.scale = _estimate_scale_factor(self.spatial_calibration, self.required_spatial_calibration)
157
- print(f"Scale: {self.scale}...")
158
-
159
- if self.target_cell_size is not None and self.scale is not None:
160
- self.scale *= self.cell_size / self.target_cell_size
161
- elif self.target_cell_size is not None:
162
- if self.target_cell_size != self.cell_size:
163
- self.scale = self.cell_size / self.target_cell_size
164
-
165
- print(f"Scale accounting for expected cell size: {self.scale}...")
166
-
167
- def locate_model_path(self):
168
-
169
- self.model_complete_path = locate_segmentation_model(self.model_name)
170
- if self.model_complete_path is None:
171
- print('Model could not be found. Abort.')
172
- self.abort_process()
173
- else:
174
- print(f'Model path: {self.model_complete_path}...')
175
-
176
- if not os.path.exists(self.model_complete_path+"config_input.json"):
177
- print('The configuration for the inputs to the model could not be located. Abort.')
178
- self.abort_process()
179
-
180
- with open(self.model_complete_path+"config_input.json") as config_file:
181
- self.input_config = json.load(config_file)
182
-
183
- def check_gpu(self):
184
-
185
- if not self.use_gpu:
186
- os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
187
-
188
- def run(self):
189
-
190
- try:
191
-
192
- if self.model_type=='stardist':
193
- model, scale_model = _prep_stardist_model(self.model_name, Path(self.model_complete_path).parent, use_gpu=self.use_gpu, scale=self.scale)
194
-
195
- elif self.model_type=='cellpose':
196
- model, scale_model = _prep_cellpose_model(self.model_name, self.model_complete_path, use_gpu=self.use_gpu, n_channels=len(self.required_channels), scale=self.scale)
197
-
198
- list_indices = range(self.len_movie)
199
- if self.flip:
200
- list_indices = reversed(list_indices)
201
-
202
- for t in tqdm(list_indices,desc="frame"):
203
-
204
- f = _load_frames_to_segment(self.file, self.img_num_channels[:,t], scale_model=scale_model, normalize_kwargs=self.normalize_kwargs)
205
-
206
- if self.model_type=="stardist":
207
- Y_pred = _segment_image_with_stardist_model(f, model=model, return_details=False)
208
-
209
- elif self.model_type=="cellpose":
210
- Y_pred = _segment_image_with_cellpose_model(f, model=model, diameter=self.diameter, cellprob_threshold=self.cellprob_threshold, flow_threshold=self.flow_threshold)
211
-
212
- if self.scale is not None:
213
- Y_pred = _rescale_labels(Y_pred, scale_model=scale_model)
214
-
215
- Y_pred = _check_label_dims(Y_pred, file=self.file)
216
-
217
- save_tiff_imagej_compatible(self.pos+os.sep.join([self.label_folder,f"{str(t).zfill(4)}.tif"]), Y_pred, axes='YX')
218
-
219
- del f;
220
- del Y_pred;
221
- gc.collect()
222
-
223
- # Send signal for progress bar
224
- self.sum_done+=1/self.len_movie*100
225
- mean_exec_per_step = (time.time() - self.t0) / (t+1)
226
- pred_time = (self.len_movie - (t+1)) * mean_exec_per_step
227
- self.queue.put([self.sum_done, pred_time])
228
-
229
- except Exception as e:
230
- print(e)
231
-
232
- try:
233
- del model
234
- except:
235
- pass
236
-
237
- gc.collect()
238
- print("Done.")
170
+ self.required_channels = self.input_config["channels"]
171
+ if "selected_channels" in self.input_config:
172
+ self.required_channels = self.input_config["selected_channels"]
239
173
 
240
- # Send end signal
241
- self.queue.put("finished")
242
- self.queue.close()
174
+ self.target_cell_size = None
175
+ if (
176
+ "target_cell_size_um" in self.input_config
177
+ and "cell_size_um" in self.input_config
178
+ ):
179
+ self.target_cell_size = self.input_config["target_cell_size_um"]
180
+ self.cell_size = self.input_config["cell_size_um"]
181
+
182
+ self.normalize_kwargs = _get_normalize_kwargs_from_config(self.input_config)
183
+
184
+ self.model_type = self.input_config["model_type"]
185
+ self.required_spatial_calibration = self.input_config["spatial_calibration"]
186
+ print(
187
+ f"Spatial calibration expected by the model: {self.required_spatial_calibration}..."
188
+ )
189
+
190
+ if self.model_type == "cellpose":
191
+ self.diameter = self.input_config["diameter"]
192
+ self.cellprob_threshold = self.input_config["cellprob_threshold"]
193
+ self.flow_threshold = self.input_config["flow_threshold"]
194
+
195
+ def write_log(self):
196
+
197
+ log = f"segmentation model: {self.model_name}\n"
198
+ with open(self.pos + f"log_{self.mode}.txt", "a") as f:
199
+ f.write(f"{datetime.datetime.now()} SEGMENT \n")
200
+ f.write(log)
201
+
202
+ def detect_channels(self):
203
+
204
+ self.channel_indices = _extract_channel_indices_from_config(
205
+ self.config, self.required_channels
206
+ )
207
+ print(
208
+ f"Required channels: {self.required_channels} located at channel indices {self.channel_indices}."
209
+ )
210
+ self.img_num_channels = _get_img_num_per_channel(
211
+ self.channel_indices, int(self.len_movie), self.nbr_channels
212
+ )
213
+
214
+ def detect_rescaling(self):
215
+
216
+ self.scale = _estimate_scale_factor(
217
+ self.spatial_calibration, self.required_spatial_calibration
218
+ )
219
+ print(f"Scale: {self.scale}...")
220
+
221
+ if self.target_cell_size is not None and self.scale is not None:
222
+ self.scale *= self.cell_size / self.target_cell_size
223
+ elif self.target_cell_size is not None:
224
+ if self.target_cell_size != self.cell_size:
225
+ self.scale = self.cell_size / self.target_cell_size
226
+
227
+ print(f"Scale accounting for expected cell size: {self.scale}...")
228
+
229
+ def locate_model_path(self):
230
+
231
+ self.model_complete_path = locate_segmentation_model(self.model_name)
232
+ if self.model_complete_path is None:
233
+ print("Model could not be found. Abort.")
234
+ self.abort_process()
235
+ else:
236
+ print(f"Model path: {self.model_complete_path}...")
237
+
238
+ if not os.path.exists(self.model_complete_path + "config_input.json"):
239
+ print(
240
+ "The configuration for the inputs to the model could not be located. Abort."
241
+ )
242
+ self.abort_process()
243
+
244
+ with open(self.model_complete_path + "config_input.json") as config_file:
245
+ self.input_config = json.load(config_file)
246
+
247
+ def check_gpu(self):
248
+
249
+ if not self.use_gpu:
250
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
251
+
252
+ def run(self):
253
+
254
+ try:
255
+
256
+ if self.model_type == "stardist":
257
+ model, scale_model = _prep_stardist_model(
258
+ self.model_name,
259
+ Path(self.model_complete_path).parent,
260
+ use_gpu=self.use_gpu,
261
+ scale=self.scale,
262
+ )
263
+
264
+ elif self.model_type == "cellpose":
265
+ model, scale_model = _prep_cellpose_model(
266
+ self.model_name,
267
+ self.model_complete_path,
268
+ use_gpu=self.use_gpu,
269
+ n_channels=len(self.required_channels),
270
+ scale=self.scale,
271
+ )
272
+
273
+ list_indices = range(self.len_movie)
274
+ if self.flip:
275
+ list_indices = reversed(list_indices)
276
+
277
+ for i, t in enumerate(tqdm(list_indices, desc="frame")):
278
+
279
+ f = _load_frames_to_segment(
280
+ self.file,
281
+ self.img_num_channels[:, t],
282
+ scale_model=scale_model,
283
+ normalize_kwargs=self.normalize_kwargs,
284
+ )
285
+
286
+ if self.model_type == "stardist":
287
+ Y_pred = _segment_image_with_stardist_model(
288
+ f, model=model, return_details=False
289
+ )
290
+
291
+ elif self.model_type == "cellpose":
292
+ Y_pred = _segment_image_with_cellpose_model(
293
+ f,
294
+ model=model,
295
+ diameter=self.diameter,
296
+ cellprob_threshold=self.cellprob_threshold,
297
+ flow_threshold=self.flow_threshold,
298
+ )
299
+
300
+ if self.scale is not None:
301
+ Y_pred = _rescale_labels(Y_pred, scale_model=scale_model)
302
+
303
+ Y_pred = _check_label_dims(Y_pred, file=self.file)
304
+
305
+ save_tiff_imagej_compatible(
306
+ self.pos
307
+ + os.sep.join([self.label_folder, f"{str(t).zfill(4)}.tif"]),
308
+ Y_pred,
309
+ axes="YX",
310
+ )
311
+
312
+ del f
313
+ del Y_pred
314
+ gc.collect()
315
+
316
+ # Send signal for progress bar
317
+ self.sum_done += 1 / self.len_movie * 100
318
+ mean_exec_per_step = (time.time() - self.t0) / (i + 1)
319
+ pred_time = (self.len_movie - (i + 1)) * mean_exec_per_step
320
+ self.queue.put([self.sum_done, pred_time])
321
+
322
+ except Exception as e:
323
+ print(e)
324
+
325
+ try:
326
+ del model
327
+ except:
328
+ pass
329
+
330
+ gc.collect()
331
+ print("Done.")
332
+
333
+ # Send end signal
334
+ self.queue.put("finished")
335
+ self.queue.close()
243
336
 
244
337
 
245
338
  class SegmentCellThresholdProcess(BaseSegmentProcess):
246
-
247
- def __init__(self, *args, **kwargs):
248
-
249
- super().__init__(*args, **kwargs)
250
339
 
251
- self.equalize = False
340
+ def __init__(self, *args, **kwargs):
341
+
342
+ super().__init__(*args, **kwargs)
343
+
344
+ self.equalize = False
345
+
346
+ # Model
347
+
348
+ self.load_threshold_config()
349
+ self.extract_threshold_parameters()
350
+ self.detect_channels()
351
+ self.prepare_equalize()
252
352
 
253
- # Model
353
+ self.write_log()
254
354
 
255
- self.load_threshold_config()
256
- self.extract_threshold_parameters()
257
- self.detect_channels()
258
- self.prepare_equalize()
355
+ self.sum_done = 0
356
+ self.t0 = time.time()
259
357
 
260
- self.write_log()
358
+ def prepare_equalize(self):
261
359
 
262
- self.sum_done = 0
263
- self.t0 = time.time()
360
+ for i in range(len(self.instructions)):
264
361
 
265
- def prepare_equalize(self):
362
+ if self.equalize[i]:
363
+ f_reference = load_frames(
364
+ self.img_num_channels[:, self.equalize_time[i]],
365
+ self.file,
366
+ scale=None,
367
+ normalize_input=False,
368
+ )
369
+ f_reference = f_reference[:, :, self.instructions[i]["target_channel"]]
370
+ else:
371
+ f_reference = None
372
+
373
+ self.instructions[i].update({"equalize_reference": f_reference})
374
+
375
+ def load_threshold_config(self):
266
376
 
267
- for i in range(len(self.instructions)):
377
+ self.instructions = []
378
+ for inst in self.threshold_instructions:
379
+ if os.path.exists(inst):
380
+ with open(inst, "r") as f:
381
+ self.instructions.append(json.load(f))
382
+ else:
383
+ print("The configuration path is not valid. Abort.")
384
+ self.abort_process()
268
385
 
269
- if self.equalize[i]:
270
- f_reference = load_frames(self.img_num_channels[:,self.equalize_time[i]], self.file, scale=None, normalize_input=False)
271
- f_reference = f_reference[:,:,self.instructions[i]['target_channel']]
272
- else:
273
- f_reference = None
274
-
275
- self.instructions[i].update({'equalize_reference': f_reference})
276
-
277
- def load_threshold_config(self):
278
-
279
- self.instructions = []
280
- for inst in self.threshold_instructions:
281
- if os.path.exists(inst):
282
- with open(inst, 'r') as f:
283
- self.instructions.append(json.load(f))
284
- else:
285
- print('The configuration path is not valid. Abort.')
286
- self.abort_process()
287
-
288
- def extract_threshold_parameters(self):
289
-
290
- self.required_channels = []
291
- self.equalize = []
292
- self.equalize_time = []
293
-
294
- for i in range(len(self.instructions)):
295
- ch = [self.instructions[i]['target_channel']]
296
- self.required_channels.append(ch)
297
-
298
- if 'equalize_reference' in self.instructions[i]:
299
- equalize, equalize_time = self.instructions[i]['equalize_reference']
300
- self.equalize.append(equalize)
301
- self.equalize_time.append(equalize_time)
302
-
303
- def write_log(self):
304
-
305
- log=f'Threshold segmentation: {self.threshold_instructions}\n'
306
- with open(self.pos+f'log_{self.mode}.txt', 'a') as f:
307
- f.write(f'{datetime.datetime.now()} SEGMENT \n')
308
- f.write(log)
309
-
310
- def detect_channels(self):
311
-
312
- for i in range(len(self.instructions)):
313
-
314
- self.channel_indices = _extract_channel_indices_from_config(self.config, self.required_channels[i])
315
- print(f'Required channels: {self.required_channels[i]} located at channel indices {self.channel_indices}.')
316
- self.instructions[i].update({'target_channel': self.channel_indices[0]})
317
- self.instructions[i].update({'channel_names': self.channel_names})
318
-
319
- self.img_num_channels = _get_img_num_per_channel(np.arange(self.nbr_channels), self.len_movie, self.nbr_channels)
320
-
321
- def parallel_job(self, indices):
322
-
323
- try:
324
-
325
- for t in tqdm(indices,desc="frame"): #for t in tqdm(range(self.len_movie),desc="frame"):
326
-
327
- # Load channels at time t
328
- masks = []
329
- for i in range(len(self.instructions)):
330
- f = load_frames(self.img_num_channels[:,t], self.file, scale=None, normalize_input=False)
331
- mask = segment_frame_from_thresholds(f, **self.instructions[i])
332
- #print(f'Frame {t}; segment with {self.instructions[i]=}...')
333
- masks.append(mask)
334
-
335
- if len(self.instructions)>1:
336
- mask = merge_instance_segmentation(masks, mode='OR')
337
-
338
- save_tiff_imagej_compatible(os.sep.join([self.pos, self.label_folder, f"{str(t).zfill(4)}.tif"]), mask.astype(np.uint16), axes='YX')
339
-
340
- del f;
341
- del mask;
342
- gc.collect()
343
-
344
- # Send signal for progress bar
345
- self.sum_done+=1/self.len_movie*100
346
- mean_exec_per_step = (time.time() - self.t0) / (self.sum_done*self.len_movie / 100 + 1)
347
- pred_time = (self.len_movie - (self.sum_done*self.len_movie / 100 + 1)) * mean_exec_per_step
348
- self.queue.put([self.sum_done, pred_time])
386
+ def extract_threshold_parameters(self):
349
387
 
350
- except Exception as e:
351
- print(e)
352
-
353
- return
354
-
355
-
356
- def run(self):
357
-
358
- self.indices = list(range(self.img_num_channels.shape[1]))
359
- if self.flip:
360
- self.indices = reversed(self.indices)
361
-
362
- chunks = np.array_split(self.indices, self.n_threads)
363
-
364
- with concurrent.futures.ThreadPoolExecutor(max_workers=self.n_threads) as executor:
365
- results = results = executor.map(self.parallel_job, chunks) #list(map(lambda x: executor.submit(self.parallel_job, x), chunks))
366
- try:
367
- for i,return_value in enumerate(results):
368
- print(f"Thread {i} output check: ",return_value)
369
- except Exception as e:
370
- print("Exception: ", e)
371
-
372
- print('Done.')
373
- # Send end signal
374
- self.queue.put("finished")
375
- self.queue.close()
388
+ self.required_channels = []
389
+ self.equalize = []
390
+ self.equalize_time = []
391
+
392
+ for i in range(len(self.instructions)):
393
+ ch = [self.instructions[i]["target_channel"]]
394
+ self.required_channels.append(ch)
395
+
396
+ if "equalize_reference" in self.instructions[i]:
397
+ equalize, equalize_time = self.instructions[i]["equalize_reference"]
398
+ self.equalize.append(equalize)
399
+ self.equalize_time.append(equalize_time)
400
+
401
+ def write_log(self):
402
+
403
+ log = f"Threshold segmentation: {self.threshold_instructions}\n"
404
+ with open(self.pos + f"log_{self.mode}.txt", "a") as f:
405
+ f.write(f"{datetime.datetime.now()} SEGMENT \n")
406
+ f.write(log)
407
+
408
+ def detect_channels(self):
409
+
410
+ for i in range(len(self.instructions)):
411
+
412
+ self.channel_indices = _extract_channel_indices_from_config(
413
+ self.config, self.required_channels[i]
414
+ )
415
+ print(
416
+ f"Required channels: {self.required_channels[i]} located at channel indices {self.channel_indices}."
417
+ )
418
+ self.instructions[i].update({"target_channel": self.channel_indices[0]})
419
+ self.instructions[i].update({"channel_names": self.channel_names})
420
+
421
+ self.img_num_channels = _get_img_num_per_channel(
422
+ np.arange(self.nbr_channels), self.len_movie, self.nbr_channels
423
+ )
424
+
425
+ def parallel_job(self, indices):
426
+
427
+ try:
428
+
429
+ for t in tqdm(
430
+ indices, desc="frame"
431
+ ): # for t in tqdm(range(self.len_movie),desc="frame"):
432
+
433
+ # Load channels at time t
434
+ masks = []
435
+ for i in range(len(self.instructions)):
436
+ f = load_frames(
437
+ self.img_num_channels[:, t],
438
+ self.file,
439
+ scale=None,
440
+ normalize_input=False,
441
+ )
442
+ mask = segment_frame_from_thresholds(f, **self.instructions[i])
443
+ # print(f'Frame {t}; segment with {self.instructions[i]=}...')
444
+ masks.append(mask)
445
+
446
+ if len(self.instructions) > 1:
447
+ mask = merge_instance_segmentation(masks, mode="OR")
448
+
449
+ save_tiff_imagej_compatible(
450
+ os.sep.join(
451
+ [self.pos, self.label_folder, f"{str(t).zfill(4)}.tif"]
452
+ ),
453
+ mask.astype(np.uint16),
454
+ axes="YX",
455
+ )
456
+
457
+ del f
458
+ del mask
459
+ gc.collect()
460
+
461
+ # Send signal for progress bar
462
+ self.sum_done += 1 / self.len_movie * 100
463
+ mean_exec_per_step = (time.time() - self.t0) / (
464
+ self.sum_done * self.len_movie / 100 + 1
465
+ )
466
+ pred_time = (
467
+ self.len_movie - (self.sum_done * self.len_movie / 100 + 1)
468
+ ) * mean_exec_per_step
469
+ self.queue.put([self.sum_done, pred_time])
470
+
471
+ except Exception as e:
472
+ print(e)
473
+
474
+ return
475
+
476
+ def run(self):
477
+
478
+ self.indices = list(range(self.img_num_channels.shape[1]))
479
+ if self.flip:
480
+ self.indices = np.array(list(reversed(self.indices)))
481
+
482
+ chunks = np.array_split(self.indices, self.n_threads)
483
+
484
+ with concurrent.futures.ThreadPoolExecutor(
485
+ max_workers=self.n_threads
486
+ ) as executor:
487
+ results = results = executor.map(
488
+ self.parallel_job, chunks
489
+ ) # list(map(lambda x: executor.submit(self.parallel_job, x), chunks))
490
+ try:
491
+ for i, return_value in enumerate(results):
492
+ print(f"Thread {i} output check: ", return_value)
493
+ except Exception as e:
494
+ print("Exception: ", e)
495
+
496
+ print("Done.")
497
+ # Send end signal
498
+ self.queue.put("finished")
499
+ self.queue.close()