celldetective 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. celldetective/__init__.py +2 -0
  2. celldetective/__main__.py +432 -0
  3. celldetective/datasets/segmentation_annotations/blank +0 -0
  4. celldetective/datasets/signal_annotations/blank +0 -0
  5. celldetective/events.py +149 -0
  6. celldetective/extra_properties.py +100 -0
  7. celldetective/filters.py +89 -0
  8. celldetective/gui/__init__.py +20 -0
  9. celldetective/gui/about.py +44 -0
  10. celldetective/gui/analyze_block.py +563 -0
  11. celldetective/gui/btrack_options.py +898 -0
  12. celldetective/gui/classifier_widget.py +386 -0
  13. celldetective/gui/configure_new_exp.py +532 -0
  14. celldetective/gui/control_panel.py +438 -0
  15. celldetective/gui/gui_utils.py +495 -0
  16. celldetective/gui/json_readers.py +113 -0
  17. celldetective/gui/measurement_options.py +1425 -0
  18. celldetective/gui/neighborhood_options.py +452 -0
  19. celldetective/gui/plot_signals_ui.py +1042 -0
  20. celldetective/gui/process_block.py +1055 -0
  21. celldetective/gui/retrain_segmentation_model_options.py +706 -0
  22. celldetective/gui/retrain_signal_model_options.py +643 -0
  23. celldetective/gui/seg_model_loader.py +460 -0
  24. celldetective/gui/signal_annotator.py +2388 -0
  25. celldetective/gui/signal_annotator_options.py +340 -0
  26. celldetective/gui/styles.py +217 -0
  27. celldetective/gui/survival_ui.py +903 -0
  28. celldetective/gui/tableUI.py +608 -0
  29. celldetective/gui/thresholds_gui.py +1300 -0
  30. celldetective/icons/logo-large.png +0 -0
  31. celldetective/icons/logo.png +0 -0
  32. celldetective/icons/signals_icon.png +0 -0
  33. celldetective/icons/splash-test.png +0 -0
  34. celldetective/icons/splash.png +0 -0
  35. celldetective/icons/splash0.png +0 -0
  36. celldetective/icons/survival2.png +0 -0
  37. celldetective/icons/vignette_signals2.png +0 -0
  38. celldetective/icons/vignette_signals2.svg +114 -0
  39. celldetective/io.py +2050 -0
  40. celldetective/links/zenodo.json +561 -0
  41. celldetective/measure.py +1258 -0
  42. celldetective/models/segmentation_effectors/blank +0 -0
  43. celldetective/models/segmentation_generic/blank +0 -0
  44. celldetective/models/segmentation_targets/blank +0 -0
  45. celldetective/models/signal_detection/blank +0 -0
  46. celldetective/models/tracking_configs/mcf7.json +68 -0
  47. celldetective/models/tracking_configs/ricm.json +203 -0
  48. celldetective/models/tracking_configs/ricm2.json +203 -0
  49. celldetective/neighborhood.py +717 -0
  50. celldetective/scripts/analyze_signals.py +51 -0
  51. celldetective/scripts/measure_cells.py +275 -0
  52. celldetective/scripts/segment_cells.py +212 -0
  53. celldetective/scripts/segment_cells_thresholds.py +140 -0
  54. celldetective/scripts/track_cells.py +206 -0
  55. celldetective/scripts/train_segmentation_model.py +246 -0
  56. celldetective/scripts/train_signal_model.py +49 -0
  57. celldetective/segmentation.py +712 -0
  58. celldetective/signals.py +2826 -0
  59. celldetective/tracking.py +974 -0
  60. celldetective/utils.py +1681 -0
  61. celldetective-1.0.2.dist-info/LICENSE +674 -0
  62. celldetective-1.0.2.dist-info/METADATA +192 -0
  63. celldetective-1.0.2.dist-info/RECORD +66 -0
  64. celldetective-1.0.2.dist-info/WHEEL +5 -0
  65. celldetective-1.0.2.dist-info/entry_points.txt +2 -0
  66. celldetective-1.0.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,51 @@
1
+ """
2
+ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import gc
8
+ from art import tprint
9
+ from celldetective.signals import analyze_signals
10
+ import pandas as pd
11
+
12
+ tprint("Signals")
13
+
14
+ parser = argparse.ArgumentParser(description="Classify and regress the signals based on the provided model.",
15
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
16
+ parser.add_argument('-p',"--position", required=True, help="Path to the position")
17
+ parser.add_argument('-m',"--model", required=True, help="Path to the model")
18
+ parser.add_argument("--mode", default="target", choices=["target","effector","targets","effectors"],help="Cell population of interest")
19
+ parser.add_argument("--use_gpu", default="True", choices=["True","False"],help="use GPU")
20
+
21
+ args = parser.parse_args()
22
+ process_arguments = vars(args)
23
+ pos = str(process_arguments['position'])
24
+ model = str(process_arguments['model'])
25
+ mode = str(process_arguments['mode'])
26
+ use_gpu = process_arguments['use_gpu']
27
+ if use_gpu=='True' or use_gpu=='true' or use_gpu=='1':
28
+ use_gpu = True
29
+ else:
30
+ use_gpu = False
31
+
32
+ column_labels = {'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}
33
+
34
+ if mode.lower()=="target" or mode.lower()=="targets":
35
+ table_name = "trajectories_targets.csv"
36
+
37
+ elif mode.lower()=="effector" or mode.lower()=="effectors":
38
+ table_name = "trajectories_effectors.csv"
39
+
40
+ # Load trajectories, add centroid if not in trajectory
41
+ trajectories = pos+os.sep.join(['output','tables', table_name])
42
+ if os.path.exists(trajectories):
43
+ trajectories = pd.read_csv(trajectories)
44
+ else:
45
+ print('The trajectories table could not be found. Abort.')
46
+ os.abort()
47
+
48
+ trajectories = analyze_signals(trajectories.copy(), model, interpolate_na=True, selected_signals=None, column_labels = column_labels, plot_outcome=True,output_dir=pos+'output/')
49
+ trajectories = trajectories.sort_values(by=[column_labels['track'], column_labels['time']])
50
+ trajectories.to_csv(pos+os.sep.join(['output','tables', table_name]), index=False)
51
+
@@ -0,0 +1,275 @@
1
+ """
2
+ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import json
8
+ from celldetective.io import auto_load_number_of_frames, load_frames
9
+ from celldetective.utils import extract_experiment_channels, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, extract_experiment_channels
10
+ from celldetective.utils import remove_redundant_features, remove_trajectory_measurements
11
+ from celldetective.measure import drop_tonal_features, measure_features, measure_isotropic_intensity
12
+ from pathlib import Path, PurePath
13
+ from glob import glob
14
+ from shutil import rmtree
15
+ from tqdm import tqdm
16
+ import numpy as np
17
+ import pandas as pd
18
+ import gc
19
+ from natsort import natsorted
20
+ from art import tprint
21
+ from tifffile import imread
22
+ import threading
23
+
24
+ tprint("Measure")
25
+
26
+ parser = argparse.ArgumentParser(description="Measure features and intensities in a multichannel timeseries.",
27
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
28
+ parser.add_argument('-p',"--position", required=True, help="Path to the position")
29
+ parser.add_argument("--mode", default="target", choices=["target","effector","targets","effectors"],help="Cell population of interest")
30
+ parser.add_argument("--threads", default="1", help="Number of parallel threads")
31
+
32
+ args = parser.parse_args()
33
+ process_arguments = vars(args)
34
+ pos = str(process_arguments['position'])
35
+ mode = str(process_arguments['mode'])
36
+ n_threads = int(process_arguments['threads'])
37
+
38
+ column_labels = {'track': "TRACK_ID", 'time': 'FRAME', 'x': 'POSITION_X', 'y': 'POSITION_Y'}
39
+
40
+ if mode.lower()=="target" or mode.lower()=="targets":
41
+ label_folder = "labels_targets"
42
+ table_name = "trajectories_targets.csv"
43
+ instruction_file = os.sep.join(["configs","measurement_instructions_targets.json"])
44
+
45
+ elif mode.lower()=="effector" or mode.lower()=="effectors":
46
+ label_folder = "labels_effectors"
47
+ table_name = "trajectories_effectors.csv"
48
+ instruction_file = os.sep.join(["configs","measurement_instructions_effectors.json"])
49
+
50
+ # Locate experiment config
51
+ parent1 = Path(pos).parent
52
+ expfolder = parent1.parent
53
+ config = PurePath(expfolder,Path("config.ini"))
54
+ assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
55
+ print("Configuration file: ",config)
56
+
57
+ # from exp config fetch spatial calib, channel names
58
+ movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
59
+ spatial_calibration = float(ConfigSectionMap(config,"MovieSettings")["pxtoum"])
60
+ time_calibration = float(ConfigSectionMap(config,"MovieSettings")["frametomin"])
61
+ len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
62
+ channel_names, channel_indices = extract_experiment_channels(config)
63
+ nbr_channels = len(channel_names)
64
+
65
+ # from tracking instructions, fetch btrack config, features, haralick, clean_traj, idea: fetch custom timeline?
66
+ instr_path = PurePath(expfolder,Path(f"{instruction_file}"))
67
+ if os.path.exists(instr_path):
68
+ print(f"Tracking instructions for the {mode} population has been successfully located.")
69
+ with open(instr_path, 'r') as f:
70
+ instructions = json.load(f)
71
+ print("Reading the following instructions: ", instructions)
72
+ if 'background_correction' in instructions:
73
+ background_correction = instructions['background_correction']
74
+
75
+ if 'features' in instructions:
76
+ features = instructions['features']
77
+ else:
78
+ features = None
79
+
80
+ if 'border_distances' in instructions:
81
+ border_distances = instructions['border_distances']
82
+ else:
83
+ border_distances = None
84
+
85
+ if 'spot_detection' in instructions:
86
+ spot_detection = instructions['spot_detection']
87
+ else:
88
+ spot_detection = None
89
+
90
+ if 'haralick_options' in instructions:
91
+ haralick_options = instructions['haralick_options']
92
+ else:
93
+ haralick_options = None
94
+
95
+ if 'intensity_measurement_radii' in instructions:
96
+ intensity_measurement_radii = instructions['intensity_measurement_radii']
97
+ else:
98
+ intensity_measurement_radii = None
99
+
100
+ if 'isotropic_operations' in instructions:
101
+ isotropic_operations = instructions['isotropic_operations']
102
+ else:
103
+ isotropic_operations = None
104
+
105
+ if 'clear_previous' in instructions:
106
+ clear_previous = instructions['clear_previous']
107
+ else:
108
+ clear_previous = True
109
+
110
+ else:
111
+ print('No measurement instructions found. Use default measurements.')
112
+ features = ['area', 'intensity_mean']
113
+ border_distances = None
114
+ haralick_options = None
115
+ clear_previous = False
116
+ background_correction = None
117
+ spot_detection = None
118
+ intensity_measurement_radii = 10
119
+ isotropic_operations = ['mean']
120
+
121
+ if features is None:
122
+ features = []
123
+
124
+ # from pos fetch labels
125
+ label_path = natsorted(glob(os.sep.join([pos, label_folder, '*.tif'])))
126
+ if len(label_path)>0:
127
+ print(f"Found {len(label_path)} segmented frames...")
128
+ else:
129
+ print(f"No segmented frames have been found. Please run segmentation first, skipping... Features cannot be computed.")
130
+ features = None
131
+ haralick_options = None
132
+ border_distances = None
133
+ label_path = None
134
+
135
+ # Do this if features or Haralick is not None, else don't need stack
136
+ try:
137
+ file = glob(pos+os.sep.join(["movie", f"{movie_prefix}*.tif"]))[0]
138
+ except IndexError:
139
+ print('Movie could not be found. Check the prefix. If you intended to measure texture or tone, this will not be performed.')
140
+ file = None
141
+ haralick_option = None
142
+ features = drop_tonal_features(features)
143
+
144
+ # Load trajectories, add centroid if not in trajectory
145
+ trajectories = pos+os.sep.join(['output','tables', table_name])
146
+ if os.path.exists(trajectories):
147
+ trajectories = pd.read_csv(trajectories)
148
+ if 'TRACK_ID' not in list(trajectories.columns):
149
+ do_iso_intensities = False
150
+ intensity_measurement_radii = None
151
+ if clear_previous:
152
+ trajectories = remove_trajectory_measurements(trajectories, column_labels)
153
+ else:
154
+ if clear_previous:
155
+ trajectories = remove_trajectory_measurements(trajectories, column_labels)
156
+ else:
157
+ trajectories = None
158
+ do_features = True
159
+ features += ['centroid']
160
+ do_iso_intensities = False
161
+
162
+
163
+ if (features is not None) and (trajectories is not None):
164
+ features = remove_redundant_features(features,
165
+ trajectories.columns,
166
+ channel_names=channel_names
167
+ )
168
+
169
+ len_movie_auto = auto_load_number_of_frames(file)
170
+ if len_movie_auto is not None:
171
+ len_movie = len_movie_auto
172
+
173
+ img_num_channels = _get_img_num_per_channel(channel_indices, len_movie, nbr_channels)
174
+
175
+
176
+ # Test what to do
177
+ if (file is None) or (intensity_measurement_radii is None):
178
+ do_iso_intensities = False
179
+ print('Either no image, no positions or no radii were provided... Isotropic intensities will not be computed...')
180
+ else:
181
+ do_iso_intensities = True
182
+
183
+ if label_path is None:
184
+ do_features = False
185
+ print('No labels were provided... Features will not be computed...')
186
+ else:
187
+ do_features = True
188
+
189
+ #######################################
190
+ # Loop over all frames and find objects
191
+ #######################################
192
+
193
+ timestep_dataframes = []
194
+ if trajectories is None:
195
+ print('Use features as a substitute for the trajectory table.')
196
+ if 'label' not in features:
197
+ features.append('label')
198
+
199
+
200
+ def measure_index(indices):
201
+
202
+ global column_labels
203
+
204
+ for t in tqdm(indices,desc="frame"):
205
+
206
+ if file is not None:
207
+ img = load_frames(img_num_channels[:,t], file, scale=None, normalize_input=False)
208
+
209
+ if label_path is not None:
210
+ lbl = imread(label_path[t])
211
+
212
+ if trajectories is not None:
213
+
214
+ positions_at_t = trajectories.loc[trajectories[column_labels['time']]==t].copy()
215
+
216
+ if do_features:
217
+ feature_table = measure_features(img, lbl, features=features, border_dist=border_distances,
218
+ channels=channel_names, haralick_options=haralick_options, verbose=False,
219
+ normalisation_list=background_correction, spot_detection=spot_detection)
220
+ if trajectories is None:
221
+ positions_at_t = feature_table[['centroid-1', 'centroid-0', 'class_id']].copy()
222
+ positions_at_t['ID'] = np.arange(len(positions_at_t)) # temporary ID for the cells, that will be reset at the end since they are not tracked
223
+ positions_at_t.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'}, inplace=True)
224
+ positions_at_t['FRAME'] = int(t)
225
+ column_labels = {'track': "ID", 'time': column_labels['time'], 'x': column_labels['x'],
226
+ 'y': column_labels['y']}
227
+ feature_table.rename(columns={'centroid-1': 'POSITION_X', 'centroid-0': 'POSITION_Y'}, inplace=True)
228
+
229
+ if do_iso_intensities:
230
+ iso_table = measure_isotropic_intensity(positions_at_t, img, channels=channel_names, intensity_measurement_radii=intensity_measurement_radii, column_labels=column_labels, operations=isotropic_operations, verbose=False)
231
+
232
+ if do_iso_intensities and do_features:
233
+ measurements_at_t = iso_table.merge(feature_table, how='outer', on='class_id',suffixes=('', '_delme'))
234
+ measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
235
+ elif do_iso_intensities * (not do_features):
236
+ measurements_at_t = iso_table
237
+ elif do_features:
238
+ measurements_at_t = positions_at_t.merge(feature_table, how='outer', on='class_id',suffixes=('', '_delme'))
239
+ measurements_at_t = measurements_at_t[[c for c in measurements_at_t.columns if not c.endswith('_delme')]]
240
+
241
+ if measurements_at_t is not None:
242
+ measurements_at_t[column_labels['time']] = t
243
+ timestep_dataframes.append(measurements_at_t)
244
+
245
+ # Multithreading
246
+ indices = list(range(img_num_channels.shape[1]))
247
+ chunks = np.array_split(indices, n_threads)
248
+ threads = []
249
+ for i in range(n_threads):
250
+ thread_i = threading.Thread(target=measure_index, args=[chunks[i]])
251
+ threads.append(thread_i)
252
+ for th in threads:
253
+ th.start()
254
+ for th in threads:
255
+ th.join()
256
+
257
+
258
+ if len(timestep_dataframes)>0:
259
+ df = pd.concat(timestep_dataframes)
260
+ df.reset_index(inplace=True, drop=True)
261
+
262
+ if trajectories is None:
263
+ df['ID'] = np.arange(len(df))
264
+
265
+ if column_labels['track'] in df.columns:
266
+ df = df.sort_values(by=[column_labels['track'], column_labels['time']])
267
+ else:
268
+ df = df.sort_values(by=column_labels['time'])
269
+
270
+ df.to_csv(pos+os.sep.join(["output", "tables", table_name]), index=False)
271
+ print(f'Measurements successfully written in table {pos+os.sep.join(["output", "tables", table_name])}')
272
+ print('Done.')
273
+ else:
274
+ print('No measurement could be performed. Check your inputs.')
275
+ print('Done.')
@@ -0,0 +1,212 @@
1
+ """
2
+ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import json
8
+ from stardist.models import StarDist2D
9
+ from cellpose.models import CellposeModel
10
+ from celldetective.io import locate_segmentation_model, auto_load_number_of_frames, load_frames
11
+ from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, normalize_per_channel
12
+ from pathlib import Path, PurePath
13
+ from glob import glob
14
+ from shutil import rmtree
15
+ from tqdm import tqdm
16
+ import numpy as np
17
+ from skimage.transform import resize
18
+ from csbdeep.io import save_tiff_imagej_compatible
19
+ import gc
20
+ from art import tprint
21
+ from scipy.ndimage import zoom
22
+ import threading
23
+
24
+ tprint("Segment")
25
+
26
+ parser = argparse.ArgumentParser(description="Segment a movie in position with the selected model",
27
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
28
+ parser.add_argument('-p',"--position", required=True, help="Path to the position")
29
+ parser.add_argument('-m',"--model", required=True,help="Model name")
30
+ parser.add_argument("--mode", default="target", choices=["target","effector","targets","effectors"],help="Cell population of interest")
31
+ parser.add_argument("--use_gpu", default="True", choices=["True","False"],help="use GPU")
32
+ parser.add_argument("--threads", default="1",help="Number of parallel threads")
33
+
34
+ args = parser.parse_args()
35
+ process_arguments = vars(args)
36
+ pos = str(process_arguments['position'])
37
+ mode = str(process_arguments['mode'])
38
+ use_gpu = process_arguments['use_gpu']
39
+ n_threads = int(process_arguments['threads'])
40
+
41
+ if use_gpu=='True' or use_gpu=='true' or use_gpu=='1':
42
+ use_gpu = True
43
+ n_threads = 1 # avoid misbehavior on GPU with multithreading
44
+ else:
45
+ use_gpu = False
46
+
47
+ if not use_gpu:
48
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
49
+ n_threads = int(process_arguments['threads'])
50
+
51
+ modelname = str(process_arguments['model'])
52
+
53
+ if mode.lower()=="target" or mode.lower()=="targets":
54
+ label_folder = "labels_targets"
55
+ elif mode.lower()=="effector" or mode.lower()=="effectors":
56
+ label_folder = "labels_effectors"
57
+
58
+ # Locate experiment config
59
+ parent1 = Path(pos).parent
60
+ expfolder = parent1.parent
61
+ config = PurePath(expfolder,Path("config.ini"))
62
+ assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
63
+ print("Configuration file: ",config)
64
+
65
+ ####################################
66
+ # Check model requirements #########
67
+ ####################################
68
+
69
+ modelpath = os.sep.join([os.path.split(os.path.dirname(os.path.realpath(__file__)))[0],"models"])
70
+ print(modelpath)
71
+ model_complete_path = locate_segmentation_model(modelname)
72
+ if model_complete_path is None:
73
+ print('Model could not be found. Abort.')
74
+ os.abort()
75
+ else:
76
+ print(f'Model successfully located in {model_complete_path}')
77
+
78
+ # load config
79
+ assert os.path.exists(model_complete_path+"config_input.json"),'The configuration for the inputs to the model could not be located. Abort.'
80
+ with open(model_complete_path+"config_input.json") as config_file:
81
+ input_config = json.load(config_file)
82
+
83
+ # Parse target channels
84
+ required_channels = input_config["channels"]
85
+
86
+ channel_indices = _extract_channel_indices_from_config(config, required_channels)
87
+ print(f'Required channels: {required_channels} located at channel indices {channel_indices}.')
88
+ required_spatial_calibration = input_config['spatial_calibration']
89
+ print(f'Expected spatial calibration is {required_spatial_calibration}.')
90
+
91
+ normalization_percentile = input_config['normalization_percentile']
92
+ normalization_clip = input_config['normalization_clip']
93
+ normalization_values = input_config['normalization_values']
94
+
95
+ model_type = input_config['model_type']
96
+
97
+ movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
98
+ spatial_calibration = float(ConfigSectionMap(config,"MovieSettings")["pxtoum"])
99
+ len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
100
+
101
+ # Try to find the file
102
+ try:
103
+ file = glob(pos+f"movie/{movie_prefix}*.tif")[0]
104
+ except IndexError:
105
+ print('Movie could not be found. Check the prefix.')
106
+ os.abort()
107
+
108
+ len_movie_auto = auto_load_number_of_frames(file)
109
+ if len_movie_auto is not None:
110
+ len_movie = len_movie_auto
111
+
112
+ if model_type=='cellpose':
113
+ diameter = input_config['diameter']
114
+ # if diameter!=30:
115
+ # required_spatial_calibration = None # ignore spatial calibration and use diameter
116
+ cellprob_threshold = input_config['cellprob_threshold']
117
+ flow_threshold = input_config['flow_threshold']
118
+
119
+ scale = _estimate_scale_factor(spatial_calibration, required_spatial_calibration)
120
+
121
+ nbr_channels = _extract_nbr_channels_from_config(config)
122
+ print(f'Number of channels in the input movie: {nbr_channels}')
123
+ img_num_channels = _get_img_num_per_channel(channel_indices, int(len_movie), nbr_channels)
124
+
125
+ # If everything OK, prepare output, load models
126
+ print('Erasing previous segmentation folder.')
127
+ if os.path.exists(os.sep.join([pos,label_folder])):
128
+ rmtree(os.sep.join([pos,label_folder]))
129
+ os.mkdir(os.sep.join([pos,label_folder]))
130
+ print(f'Folder {os.sep.join([pos,label_folder])} successfully generated.')
131
+
132
+
133
+ # Loop over all frames and segment
134
+ def segment_index(indices):
135
+
136
+ if model_type=='stardist':
137
+ model = StarDist2D(None, name=modelname, basedir=Path(model_complete_path).parent)
138
+ model.config.use_gpu = use_gpu
139
+ model.use_gpu = use_gpu
140
+ print(f"StarDist model {modelname} successfully loaded.")
141
+
142
+ elif model_type=='cellpose':
143
+
144
+ import torch
145
+ if not use_gpu:
146
+ device = torch.device("cpu")
147
+ else:
148
+ device = torch.device("cuda")
149
+
150
+ model = CellposeModel(gpu=use_gpu, device=device, pretrained_model=model_complete_path+modelname, model_type=None, nchan=len(required_channels)) #diam_mean=30.0,
151
+ model.diam_mean = 30.0
152
+ print(f'Cellpose model {modelname} successfully loaded.')
153
+
154
+ for t in tqdm(indices,desc="frame"):
155
+
156
+ # Load channels at time t
157
+ f = load_frames(img_num_channels[:,t], file, scale=scale, normalize_input=False)
158
+ f = normalize_per_channel([f], normalization_percentile_mode=normalization_percentile, normalization_values=normalization_values,
159
+ normalization_clipping=normalization_clip)
160
+ f = f[0]
161
+ if np.any(img_num_channels[:,t]==-1):
162
+ f[:,:,np.where(img_num_channels[:,t]==-1)[0]] = 0.
163
+
164
+ if model_type=="stardist":
165
+ Y_pred, details = model.predict_instances(f, n_tiles=model._guess_n_tiles(f), show_tile_progress=False, verbose=False)
166
+ Y_pred = Y_pred.astype(np.uint16)
167
+
168
+ elif model_type=="cellpose":
169
+
170
+ img = np.moveaxis(f, -1, 0)
171
+ Y_pred, _, _ = model.eval(img, diameter = diameter, cellprob_threshold=cellprob_threshold, flow_threshold=flow_threshold, channels=None, normalize=False)
172
+ Y_pred = Y_pred.astype(np.uint16)
173
+
174
+ if scale is not None:
175
+ Y_pred = zoom(Y_pred, [1./scale,1./scale],order=0)
176
+
177
+ template = load_frames(0,file,scale=1,normalize_input=False)
178
+ if Y_pred.shape != template.shape[:2]:
179
+ Y_pred = resize(Y_pred, template.shape[:2], order=0)
180
+
181
+ save_tiff_imagej_compatible(os.sep.join([pos,label_folder,f"{str(t).zfill(4)}.tif"]), Y_pred, axes='YX')
182
+
183
+ del f;
184
+ del template;
185
+ del Y_pred;
186
+ gc.collect()
187
+
188
+ # Multithreading
189
+ indices = list(range(img_num_channels.shape[1]))
190
+ chunks = np.array_split(indices, n_threads)
191
+ threads = []
192
+ for i in range(n_threads):
193
+ thread_i = threading.Thread(target=segment_index, args=[chunks[i]])
194
+ threads.append(thread_i)
195
+ for th in threads:
196
+ th.start()
197
+ for th in threads:
198
+ th.join()
199
+
200
+ print('Done.')
201
+
202
+ try:
203
+ del model
204
+ except:
205
+ pass
206
+
207
+ gc.collect()
208
+
209
+
210
+
211
+
212
+
@@ -0,0 +1,140 @@
1
+ """
2
+ Copright © 2022 Laboratoire Adhesion et Inflammation, Authored by Remy Torro.
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+ import json
8
+ from celldetective.io import auto_load_number_of_frames, load_frames
9
+ from celldetective.segmentation import segment_frame_from_thresholds
10
+ from celldetective.utils import _estimate_scale_factor, _extract_channel_indices_from_config, _extract_channel_indices, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel, extract_experiment_channels
11
+ from pathlib import Path, PurePath
12
+ from glob import glob
13
+ from shutil import rmtree
14
+ from tqdm import tqdm
15
+ import numpy as np
16
+ from csbdeep.io import save_tiff_imagej_compatible
17
+ import gc
18
+ from art import tprint
19
+ import threading
20
+
21
+ tprint("Segment")
22
+
23
+ parser = argparse.ArgumentParser(description="Segment a movie in position with a threshold pipeline",
24
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
25
+ parser.add_argument('-p',"--position", required=True, help="Path to the position")
26
+ parser.add_argument('-c',"--config", required=True,help="Threshold instructions")
27
+ parser.add_argument("--mode", default="target", choices=["target","effector","targets","effectors"],help="Cell population of interest")
28
+ parser.add_argument("--threads", default="1",help="Number of parallel threads")
29
+
30
+ args = parser.parse_args()
31
+ process_arguments = vars(args)
32
+ pos = str(process_arguments['position'])
33
+ mode = str(process_arguments['mode'])
34
+ n_threads = int(process_arguments['threads'])
35
+
36
+ threshold_instructions = str(process_arguments['config'])
37
+ equalize = False
38
+
39
+ if os.path.exists(threshold_instructions):
40
+ with open(threshold_instructions, 'r') as f:
41
+ threshold_instructions = json.load(f)
42
+ required_channels = [threshold_instructions['target_channel']]
43
+ if 'equalize_reference' in threshold_instructions:
44
+ equalize_info = threshold_instructions['equalize_reference']
45
+ equalize = equalize_info[0]
46
+ equalize_time = equalize_info[1]
47
+
48
+ else:
49
+ print('The configuration path is not valid. Abort.')
50
+ os.abort()
51
+
52
+ print('The following instructions were successfully loaded: ', threshold_instructions)
53
+
54
+ if mode.lower()=="target" or mode.lower()=="targets":
55
+ label_folder = "labels_targets"
56
+ elif mode.lower()=="effector" or mode.lower()=="effectors":
57
+ label_folder = "labels_effectors"
58
+
59
+ # Locate experiment config
60
+ parent1 = Path(pos).parent
61
+ expfolder = parent1.parent
62
+ config = PurePath(expfolder,Path("config.ini"))
63
+ assert os.path.exists(config),'The configuration file for the experiment could not be located. Abort.'
64
+ print("Configuration file: ",config)
65
+
66
+
67
+ channel_indices = _extract_channel_indices_from_config(config, required_channels)
68
+ # need to abort if channel not found
69
+ print(f'Required channels: {required_channels} located at channel indices {channel_indices}.')
70
+
71
+ threshold_instructions.update({'target_channel': channel_indices[0]})
72
+
73
+ movie_prefix = ConfigSectionMap(config,"MovieSettings")["movie_prefix"]
74
+ len_movie = float(ConfigSectionMap(config,"MovieSettings")["len_movie"])
75
+ channel_names, channel_indices = extract_experiment_channels(config)
76
+ threshold_instructions.update({'channel_names': channel_names})
77
+
78
+ # Try to find the file
79
+ try:
80
+ file = glob(pos+f"movie/{movie_prefix}*.tif")[0]
81
+ except IndexError:
82
+ print('Movie could not be found. Check the prefix.')
83
+ os.abort()
84
+
85
+ len_movie_auto = auto_load_number_of_frames(file)
86
+ if len_movie_auto is not None:
87
+ len_movie = len_movie_auto
88
+
89
+ nbr_channels = _extract_nbr_channels_from_config(config)
90
+ print(f'Number of channels in the input movie: {nbr_channels}')
91
+ img_num_channels = _get_img_num_per_channel(np.arange(nbr_channels), len_movie, nbr_channels)
92
+
93
+ # If everything OK, prepare output, load models
94
+ print('Erasing previous segmentation folder.')
95
+ if os.path.exists(os.sep.join([pos,label_folder])):
96
+ rmtree(os.sep.join([pos,label_folder]))
97
+ os.mkdir(os.sep.join([pos,label_folder]))
98
+ print(f'Folder {os.sep.join([pos,label_folder])} successfully generated.')
99
+
100
+ if equalize:
101
+ f_reference = load_frames(img_num_channels[:,equalize_time], file, scale=None, normalize_input=False)
102
+ f_reference = f_reference[:,:,threshold_instructions['target_channel']]
103
+ else:
104
+ f_reference = None
105
+
106
+ threshold_instructions.update({'equalize_reference': f_reference})
107
+ print(threshold_instructions)
108
+
109
+ # Loop over all frames and segment
110
+ def segment_index(indices):
111
+
112
+ for t in tqdm(indices,desc="frame"):
113
+
114
+ # Load channels at time t
115
+ f = load_frames(img_num_channels[:,t], file, scale=None, normalize_input=False)
116
+ mask = segment_frame_from_thresholds(f, **threshold_instructions)
117
+ save_tiff_imagej_compatible(os.sep.join([pos, label_folder, f"{str(t).zfill(4)}.tif"]), mask.astype(np.uint16), axes='YX')
118
+
119
+ del f;
120
+ del mask;
121
+ gc.collect()
122
+
123
+ # Multithreading
124
+ indices = list(range(img_num_channels.shape[1]))
125
+ chunks = np.array_split(indices, n_threads)
126
+ threads = []
127
+ for i in range(n_threads):
128
+ thread_i = threading.Thread(target=segment_index, args=[chunks[i]])
129
+ threads.append(thread_i)
130
+ for th in threads:
131
+ th.start()
132
+ for th in threads:
133
+ th.join()
134
+
135
+ print('Done.')
136
+ gc.collect()
137
+
138
+
139
+
140
+