celldetective 1.3.7.post1__py3-none-any.whl → 1.3.7.post2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celldetective/_version.py +1 -1
- celldetective/gui/processes/downloader.py +108 -0
- celldetective/gui/processes/measure_cells.py +346 -0
- celldetective/gui/processes/segment_cells.py +327 -0
- celldetective/gui/processes/track_cells.py +298 -0
- celldetective/gui/processes/train_segmentation_model.py +270 -0
- celldetective/gui/processes/train_signal_model.py +108 -0
- celldetective/io.py +25 -0
- celldetective/measure.py +12 -144
- celldetective/relative_measurements.py +40 -43
- celldetective/signals.py +41 -292
- celldetective/tracking.py +16 -22
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/METADATA +11 -2
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/RECORD +18 -12
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/WHEEL +1 -1
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/LICENSE +0 -0
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/entry_points.txt +0 -0
- {celldetective-1.3.7.post1.dist-info → celldetective-1.3.7.post2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
from multiprocessing import Process
|
|
2
|
+
import time
|
|
3
|
+
import datetime
|
|
4
|
+
import os
|
|
5
|
+
import json
|
|
6
|
+
from celldetective.io import extract_position_name, locate_segmentation_model, auto_load_number_of_frames, load_frames, _check_label_dims, _load_frames_to_segment
|
|
7
|
+
from celldetective.utils import _rescale_labels, _segment_image_with_stardist_model, _segment_image_with_cellpose_model, _prep_stardist_model, _prep_cellpose_model, _get_normalize_kwargs_from_config, extract_experiment_channels, _estimate_scale_factor, _extract_channel_indices_from_config, ConfigSectionMap, _extract_nbr_channels_from_config, _get_img_num_per_channel
|
|
8
|
+
from pathlib import Path, PurePath
|
|
9
|
+
from glob import glob
|
|
10
|
+
from shutil import rmtree
|
|
11
|
+
from tqdm import tqdm
|
|
12
|
+
import numpy as np
|
|
13
|
+
from csbdeep.io import save_tiff_imagej_compatible
|
|
14
|
+
from celldetective.segmentation import segment_frame_from_thresholds
|
|
15
|
+
import gc
|
|
16
|
+
from art import tprint
|
|
17
|
+
|
|
18
|
+
import concurrent.futures
|
|
19
|
+
|
|
20
|
+
class BaseSegmentProcess(Process):
|
|
21
|
+
|
|
22
|
+
def __init__(self, queue=None, process_args=None, *args, **kwargs):
|
|
23
|
+
|
|
24
|
+
super().__init__(*args, **kwargs)
|
|
25
|
+
|
|
26
|
+
self.queue = queue
|
|
27
|
+
|
|
28
|
+
if process_args is not None:
|
|
29
|
+
for key, value in process_args.items():
|
|
30
|
+
setattr(self, key, value)
|
|
31
|
+
|
|
32
|
+
tprint("Segment")
|
|
33
|
+
|
|
34
|
+
# Experiment
|
|
35
|
+
self.locate_experiment_config()
|
|
36
|
+
|
|
37
|
+
print(f"Position: {extract_position_name(self.pos)}...")
|
|
38
|
+
print("Configuration file: ",self.config)
|
|
39
|
+
print(f"Population: {self.mode}...")
|
|
40
|
+
|
|
41
|
+
self.extract_experiment_parameters()
|
|
42
|
+
self.detect_movie_length()
|
|
43
|
+
self.write_folders()
|
|
44
|
+
|
|
45
|
+
def write_folders(self):
|
|
46
|
+
|
|
47
|
+
self.mode = self.mode.lower()
|
|
48
|
+
|
|
49
|
+
if self.mode=="target" or self.mode=="targets":
|
|
50
|
+
self.label_folder = "labels_targets"
|
|
51
|
+
elif self.mode=="effector" or self.mode=="effectors":
|
|
52
|
+
self.label_folder = "labels_effectors"
|
|
53
|
+
|
|
54
|
+
if os.path.exists(self.pos+self.label_folder):
|
|
55
|
+
print('Erasing the previous labels folder...')
|
|
56
|
+
rmtree(self.pos+self.label_folder)
|
|
57
|
+
os.mkdir(self.pos+self.label_folder)
|
|
58
|
+
print(f'Labels folder successfully generated...')
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def extract_experiment_parameters(self):
|
|
62
|
+
|
|
63
|
+
self.spatial_calibration = float(ConfigSectionMap(self.config,"MovieSettings")["pxtoum"])
|
|
64
|
+
self.len_movie = float(ConfigSectionMap(self.config,"MovieSettings")["len_movie"])
|
|
65
|
+
self.movie_prefix = ConfigSectionMap(self.config,"MovieSettings")["movie_prefix"]
|
|
66
|
+
self.nbr_channels = _extract_nbr_channels_from_config(self.config)
|
|
67
|
+
self.channel_names, self.channel_indices = extract_experiment_channels(self.config)
|
|
68
|
+
|
|
69
|
+
def locate_experiment_config(self):
|
|
70
|
+
|
|
71
|
+
parent1 = Path(self.pos).parent
|
|
72
|
+
expfolder = parent1.parent
|
|
73
|
+
self.config = PurePath(expfolder,Path("config.ini"))
|
|
74
|
+
|
|
75
|
+
if not os.path.exists(self.config):
|
|
76
|
+
print('The configuration file for the experiment could not be located. Abort.')
|
|
77
|
+
self.abort_process()
|
|
78
|
+
|
|
79
|
+
def detect_movie_length(self):
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
self.file = glob(self.pos+f"movie/{self.movie_prefix}*.tif")[0]
|
|
83
|
+
except Exception as e:
|
|
84
|
+
print(f'Error {e}.\nMovie could not be found. Check the prefix.')
|
|
85
|
+
self.abort_process()
|
|
86
|
+
|
|
87
|
+
len_movie_auto = auto_load_number_of_frames(self.file)
|
|
88
|
+
if len_movie_auto is not None:
|
|
89
|
+
self.len_movie = len_movie_auto
|
|
90
|
+
|
|
91
|
+
def end_process(self):
|
|
92
|
+
|
|
93
|
+
self.terminate()
|
|
94
|
+
self.queue.put("finished")
|
|
95
|
+
|
|
96
|
+
def abort_process(self):
|
|
97
|
+
|
|
98
|
+
self.terminate()
|
|
99
|
+
self.queue.put("error")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class SegmentCellDLProcess(BaseSegmentProcess):
|
|
103
|
+
|
|
104
|
+
def __init__(self, *args, **kwargs):
|
|
105
|
+
|
|
106
|
+
super().__init__(*args, **kwargs)
|
|
107
|
+
|
|
108
|
+
self.check_gpu()
|
|
109
|
+
|
|
110
|
+
# Model
|
|
111
|
+
self.locate_model_path()
|
|
112
|
+
self.extract_model_input_parameters()
|
|
113
|
+
self.detect_channels()
|
|
114
|
+
self.detect_rescaling()
|
|
115
|
+
|
|
116
|
+
self.write_log()
|
|
117
|
+
|
|
118
|
+
self.sum_done = 0
|
|
119
|
+
self.t0 = time.time()
|
|
120
|
+
|
|
121
|
+
def extract_model_input_parameters(self):
|
|
122
|
+
|
|
123
|
+
self.required_channels = self.input_config["channels"]
|
|
124
|
+
self.normalize_kwargs = _get_normalize_kwargs_from_config(self.input_config)
|
|
125
|
+
|
|
126
|
+
self.model_type = self.input_config['model_type']
|
|
127
|
+
self.required_spatial_calibration = self.input_config['spatial_calibration']
|
|
128
|
+
print(f'Spatial calibration expected by the model: {self.required_spatial_calibration}...')
|
|
129
|
+
|
|
130
|
+
if self.model_type=='cellpose':
|
|
131
|
+
self.diameter = self.input_config['diameter']
|
|
132
|
+
self.cellprob_threshold = self.input_config['cellprob_threshold']
|
|
133
|
+
self.flow_threshold = self.input_config['flow_threshold']
|
|
134
|
+
|
|
135
|
+
def write_log(self):
|
|
136
|
+
|
|
137
|
+
log=f'segmentation model: {self.model_name}\n'
|
|
138
|
+
with open(self.pos+f'log_{self.mode}.txt', 'a') as f:
|
|
139
|
+
f.write(f'{datetime.datetime.now()} SEGMENT \n')
|
|
140
|
+
f.write(log)
|
|
141
|
+
|
|
142
|
+
def detect_channels(self):
|
|
143
|
+
|
|
144
|
+
self.channel_indices = _extract_channel_indices_from_config(self.config, self.required_channels)
|
|
145
|
+
print(f'Required channels: {self.required_channels} located at channel indices {self.channel_indices}.')
|
|
146
|
+
self.img_num_channels = _get_img_num_per_channel(self.channel_indices, int(self.len_movie), self.nbr_channels)
|
|
147
|
+
|
|
148
|
+
def detect_rescaling(self):
|
|
149
|
+
|
|
150
|
+
self.scale = _estimate_scale_factor(self.spatial_calibration, self.required_spatial_calibration)
|
|
151
|
+
print(f"Scale: {self.scale}...")
|
|
152
|
+
|
|
153
|
+
def locate_model_path(self):
|
|
154
|
+
|
|
155
|
+
self.model_complete_path = locate_segmentation_model(self.model_name)
|
|
156
|
+
if self.model_complete_path is None:
|
|
157
|
+
print('Model could not be found. Abort.')
|
|
158
|
+
self.abort_process()
|
|
159
|
+
else:
|
|
160
|
+
print(f'Model path: {self.model_complete_path}...')
|
|
161
|
+
|
|
162
|
+
if not os.path.exists(self.model_complete_path+"config_input.json"):
|
|
163
|
+
print('The configuration for the inputs to the model could not be located. Abort.')
|
|
164
|
+
self.abort_process()
|
|
165
|
+
|
|
166
|
+
with open(self.model_complete_path+"config_input.json") as config_file:
|
|
167
|
+
self.input_config = json.load(config_file)
|
|
168
|
+
|
|
169
|
+
def check_gpu(self):
|
|
170
|
+
|
|
171
|
+
if not self.use_gpu:
|
|
172
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
|
173
|
+
|
|
174
|
+
def run(self):
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
|
|
178
|
+
if self.model_type=='stardist':
|
|
179
|
+
model, scale_model = _prep_stardist_model(self.model_name, Path(self.model_complete_path).parent, use_gpu=self.use_gpu, scale=self.scale)
|
|
180
|
+
|
|
181
|
+
elif self.model_type=='cellpose':
|
|
182
|
+
model, scale_model = _prep_cellpose_model(self.model_name, self.model_complete_path, use_gpu=self.use_gpu, n_channels=len(self.required_channels), scale=self.scale)
|
|
183
|
+
|
|
184
|
+
for t in tqdm(range(self.len_movie),desc="frame"):
|
|
185
|
+
|
|
186
|
+
f = _load_frames_to_segment(self.file, self.img_num_channels[:,t], scale_model=scale_model, normalize_kwargs=self.normalize_kwargs)
|
|
187
|
+
|
|
188
|
+
if self.model_type=="stardist":
|
|
189
|
+
Y_pred = _segment_image_with_stardist_model(f, model=model, return_details=False)
|
|
190
|
+
|
|
191
|
+
elif self.model_type=="cellpose":
|
|
192
|
+
Y_pred = _segment_image_with_cellpose_model(f, model=model, diameter=self.diameter, cellprob_threshold=self.cellprob_threshold, flow_threshold=self.flow_threshold)
|
|
193
|
+
|
|
194
|
+
if self.scale is not None:
|
|
195
|
+
Y_pred = _rescale_labels(Y_pred, scale_model=scale_model)
|
|
196
|
+
|
|
197
|
+
Y_pred = _check_label_dims(Y_pred, file=self.file)
|
|
198
|
+
|
|
199
|
+
save_tiff_imagej_compatible(self.pos+os.sep.join([self.label_folder,f"{str(t).zfill(4)}.tif"]), Y_pred, axes='YX')
|
|
200
|
+
|
|
201
|
+
del f;
|
|
202
|
+
del Y_pred;
|
|
203
|
+
gc.collect()
|
|
204
|
+
|
|
205
|
+
# Send signal for progress bar
|
|
206
|
+
self.sum_done+=1/self.len_movie*100
|
|
207
|
+
mean_exec_per_step = (time.time() - self.t0) / (t+1)
|
|
208
|
+
pred_time = (self.len_movie - (t+1)) * mean_exec_per_step
|
|
209
|
+
self.queue.put([self.sum_done, pred_time])
|
|
210
|
+
|
|
211
|
+
except Exception as e:
|
|
212
|
+
print(e)
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
del model
|
|
216
|
+
except:
|
|
217
|
+
pass
|
|
218
|
+
|
|
219
|
+
gc.collect()
|
|
220
|
+
|
|
221
|
+
# Send end signal
|
|
222
|
+
self.queue.put("finished")
|
|
223
|
+
self.queue.close()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class SegmentCellThresholdProcess(BaseSegmentProcess):
|
|
227
|
+
|
|
228
|
+
def __init__(self, *args, **kwargs):
|
|
229
|
+
|
|
230
|
+
super().__init__(*args, **kwargs)
|
|
231
|
+
|
|
232
|
+
self.equalize = False
|
|
233
|
+
|
|
234
|
+
# Model
|
|
235
|
+
self.load_threshold_config()
|
|
236
|
+
self.extract_threshold_parameters()
|
|
237
|
+
self.detect_channels()
|
|
238
|
+
self.prepare_equalize()
|
|
239
|
+
|
|
240
|
+
self.write_log()
|
|
241
|
+
|
|
242
|
+
self.sum_done = 0
|
|
243
|
+
self.t0 = time.time()
|
|
244
|
+
|
|
245
|
+
def prepare_equalize(self):
|
|
246
|
+
|
|
247
|
+
if self.equalize:
|
|
248
|
+
f_reference = load_frames(self.img_num_channels[:,self.equalize_time], self.file, scale=None, normalize_input=False)
|
|
249
|
+
f_reference = f_reference[:,:,self.threshold_instructions['target_channel']]
|
|
250
|
+
else:
|
|
251
|
+
f_reference = None
|
|
252
|
+
|
|
253
|
+
self.threshold_instructions.update({'equalize_reference': f_reference})
|
|
254
|
+
|
|
255
|
+
def load_threshold_config(self):
|
|
256
|
+
|
|
257
|
+
if os.path.exists(self.threshold_instructions):
|
|
258
|
+
with open(self.threshold_instructions, 'r') as f:
|
|
259
|
+
self.threshold_instructions = json.load(f)
|
|
260
|
+
else:
|
|
261
|
+
print('The configuration path is not valid. Abort.')
|
|
262
|
+
self.abort_process()
|
|
263
|
+
|
|
264
|
+
def extract_threshold_parameters(self):
|
|
265
|
+
|
|
266
|
+
self.required_channels = [self.threshold_instructions['target_channel']]
|
|
267
|
+
if 'equalize_reference' in self.threshold_instructions:
|
|
268
|
+
self.equalize, self.equalize_time = self.threshold_instructions['equalize_reference']
|
|
269
|
+
|
|
270
|
+
def write_log(self):
|
|
271
|
+
|
|
272
|
+
log=f'Threshold segmentation: {self.threshold_instructions}\n'
|
|
273
|
+
with open(self.pos+f'log_{self.mode}.txt', 'a') as f:
|
|
274
|
+
f.write(f'{datetime.datetime.now()} SEGMENT \n')
|
|
275
|
+
f.write(log)
|
|
276
|
+
|
|
277
|
+
def detect_channels(self):
|
|
278
|
+
|
|
279
|
+
self.channel_indices = _extract_channel_indices_from_config(self.config, self.required_channels)
|
|
280
|
+
print(f'Required channels: {self.required_channels} located at channel indices {self.channel_indices}.')
|
|
281
|
+
|
|
282
|
+
self.img_num_channels = _get_img_num_per_channel(np.arange(self.nbr_channels), self.len_movie, self.nbr_channels)
|
|
283
|
+
self.threshold_instructions.update({'target_channel': self.channel_indices[0]})
|
|
284
|
+
self.threshold_instructions.update({'channel_names': self.channel_names})
|
|
285
|
+
|
|
286
|
+
def parallel_job(self, indices):
|
|
287
|
+
|
|
288
|
+
try:
|
|
289
|
+
|
|
290
|
+
for t in tqdm(indices,desc="frame"): #for t in tqdm(range(self.len_movie),desc="frame"):
|
|
291
|
+
|
|
292
|
+
# Load channels at time t
|
|
293
|
+
f = load_frames(self.img_num_channels[:,t], self.file, scale=None, normalize_input=False)
|
|
294
|
+
mask = segment_frame_from_thresholds(f, **self.threshold_instructions)
|
|
295
|
+
save_tiff_imagej_compatible(os.sep.join([self.pos, self.label_folder, f"{str(t).zfill(4)}.tif"]), mask.astype(np.uint16), axes='YX')
|
|
296
|
+
|
|
297
|
+
del f;
|
|
298
|
+
del mask;
|
|
299
|
+
gc.collect()
|
|
300
|
+
|
|
301
|
+
# Send signal for progress bar
|
|
302
|
+
self.sum_done+=1/self.len_movie*100
|
|
303
|
+
mean_exec_per_step = (time.time() - self.t0) / (self.sum_done*self.len_movie / 100 + 1)
|
|
304
|
+
pred_time = (self.len_movie - (self.sum_done*self.len_movie / 100 + 1)) * mean_exec_per_step
|
|
305
|
+
self.queue.put([self.sum_done, pred_time])
|
|
306
|
+
|
|
307
|
+
except Exception as e:
|
|
308
|
+
print(e)
|
|
309
|
+
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
def run(self):
|
|
313
|
+
|
|
314
|
+
self.indices = list(range(self.img_num_channels.shape[1]))
|
|
315
|
+
chunks = np.array_split(self.indices, self.n_threads)
|
|
316
|
+
|
|
317
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=self.n_threads) as executor:
|
|
318
|
+
results = results = executor.map(self.parallel_job, chunks) #list(map(lambda x: executor.submit(self.parallel_job, x), chunks))
|
|
319
|
+
try:
|
|
320
|
+
for i,return_value in enumerate(results):
|
|
321
|
+
print(f"Thread {i} output check: ",return_value)
|
|
322
|
+
except Exception as e:
|
|
323
|
+
print("Exception: ", e)
|
|
324
|
+
|
|
325
|
+
# Send end signal
|
|
326
|
+
self.queue.put("finished")
|
|
327
|
+
self.queue.close()
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
from multiprocessing import Process
|
|
2
|
+
import time
|
|
3
|
+
import datetime
|
|
4
|
+
import os
|
|
5
|
+
import json
|
|
6
|
+
from celldetective.io import auto_load_number_of_frames, _load_frames_to_measure, locate_labels
|
|
7
|
+
from celldetective.utils import extract_experiment_channels, ConfigSectionMap, _get_img_num_per_channel, _mask_intensity_measurements
|
|
8
|
+
from pathlib import Path, PurePath
|
|
9
|
+
from glob import glob
|
|
10
|
+
from tqdm import tqdm
|
|
11
|
+
import numpy as np
|
|
12
|
+
import gc
|
|
13
|
+
import concurrent.futures
|
|
14
|
+
import datetime
|
|
15
|
+
import os
|
|
16
|
+
import json
|
|
17
|
+
from celldetective.io import interpret_tracking_configuration
|
|
18
|
+
from celldetective.utils import extract_experiment_channels
|
|
19
|
+
from celldetective.measure import drop_tonal_features, measure_features
|
|
20
|
+
from celldetective.tracking import track
|
|
21
|
+
import pandas as pd
|
|
22
|
+
from natsort import natsorted
|
|
23
|
+
from art import tprint
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class TrackingProcess(Process):
|
|
27
|
+
|
|
28
|
+
def __init__(self, queue=None, process_args=None, *args, **kwargs):
|
|
29
|
+
|
|
30
|
+
super().__init__(*args, **kwargs)
|
|
31
|
+
|
|
32
|
+
self.queue = queue
|
|
33
|
+
|
|
34
|
+
if process_args is not None:
|
|
35
|
+
for key, value in process_args.items():
|
|
36
|
+
setattr(self, key, value)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
tprint("Track")
|
|
40
|
+
self.timestep_dataframes = []
|
|
41
|
+
|
|
42
|
+
# Experiment
|
|
43
|
+
self.prepare_folders()
|
|
44
|
+
|
|
45
|
+
self.locate_experiment_config()
|
|
46
|
+
self.extract_experiment_parameters()
|
|
47
|
+
self.read_tracking_instructions()
|
|
48
|
+
self.detect_movie_and_labels()
|
|
49
|
+
self.detect_channels()
|
|
50
|
+
|
|
51
|
+
self.write_log()
|
|
52
|
+
|
|
53
|
+
if not self.btrack_option:
|
|
54
|
+
self.features = []
|
|
55
|
+
self.channel_names = None
|
|
56
|
+
self.haralick_options = None
|
|
57
|
+
|
|
58
|
+
self.sum_done = 0
|
|
59
|
+
self.t0 = time.time()
|
|
60
|
+
|
|
61
|
+
def read_tracking_instructions(self):
|
|
62
|
+
|
|
63
|
+
instr_path = PurePath(self.expfolder,Path(f"{self.instruction_file}"))
|
|
64
|
+
if os.path.exists(instr_path):
|
|
65
|
+
print(f"Tracking instructions for the {self.mode} population have been successfully loaded...")
|
|
66
|
+
with open(instr_path, 'r') as f:
|
|
67
|
+
self.instructions = json.load(f)
|
|
68
|
+
|
|
69
|
+
self.btrack_config = interpret_tracking_configuration(self.instructions['btrack_config_path'])
|
|
70
|
+
|
|
71
|
+
if 'features' in self.instructions:
|
|
72
|
+
self.features = self.instructions['features']
|
|
73
|
+
else:
|
|
74
|
+
self.features = None
|
|
75
|
+
|
|
76
|
+
if 'mask_channels' in self.instructions:
|
|
77
|
+
self.mask_channels = self.instructions['mask_channels']
|
|
78
|
+
else:
|
|
79
|
+
self.mask_channels = None
|
|
80
|
+
|
|
81
|
+
if 'haralick_options' in self.instructions:
|
|
82
|
+
self.haralick_options = self.instructions['haralick_options']
|
|
83
|
+
else:
|
|
84
|
+
self.haralick_options = None
|
|
85
|
+
|
|
86
|
+
if 'post_processing_options' in self.instructions:
|
|
87
|
+
self.post_processing_options = self.instructions['post_processing_options']
|
|
88
|
+
else:
|
|
89
|
+
self.post_processing_options = None
|
|
90
|
+
|
|
91
|
+
self.btrack_option = True
|
|
92
|
+
if 'btrack_option' in self.instructions:
|
|
93
|
+
self.btrack_option = self.instructions['btrack_option']
|
|
94
|
+
self.search_range = None
|
|
95
|
+
if 'search_range' in self.instructions:
|
|
96
|
+
self.search_range = self.instructions['search_range']
|
|
97
|
+
self.memory = None
|
|
98
|
+
if 'memory' in self.instructions:
|
|
99
|
+
self.memory = self.instructions['memory']
|
|
100
|
+
else:
|
|
101
|
+
print('Tracking instructions could not be located... Using a standard bTrack motion model instead...')
|
|
102
|
+
self.btrack_config = interpret_tracking_configuration(None)
|
|
103
|
+
self.features = None
|
|
104
|
+
self.mask_channels = None
|
|
105
|
+
self.haralick_options = None
|
|
106
|
+
self.post_processing_options = None
|
|
107
|
+
self.btrack_option = True
|
|
108
|
+
self.memory = None
|
|
109
|
+
self.search_range = None
|
|
110
|
+
|
|
111
|
+
if self.features is None:
|
|
112
|
+
self.features = []
|
|
113
|
+
|
|
114
|
+
def detect_channels(self):
|
|
115
|
+
self.img_num_channels = _get_img_num_per_channel(self.channel_indices, self.len_movie, self.nbr_channels)
|
|
116
|
+
|
|
117
|
+
def write_log(self):
|
|
118
|
+
|
|
119
|
+
features_log=f'features: {self.features}'
|
|
120
|
+
mask_channels_log=f'mask_channels: {self.mask_channels}'
|
|
121
|
+
haralick_option_log=f'haralick_options: {self.haralick_options}'
|
|
122
|
+
post_processing_option_log=f'post_processing_options: {self.post_processing_options}'
|
|
123
|
+
log_list=[features_log, mask_channels_log, haralick_option_log, post_processing_option_log]
|
|
124
|
+
log='\n'.join(log_list)
|
|
125
|
+
|
|
126
|
+
with open(self.pos+f'log_{self.mode}.txt', 'a') as f:
|
|
127
|
+
f.write(f'{datetime.datetime.now()} TRACK \n')
|
|
128
|
+
f.write(log+"\n")
|
|
129
|
+
|
|
130
|
+
def prepare_folders(self):
|
|
131
|
+
|
|
132
|
+
if not os.path.exists(self.pos+"output"):
|
|
133
|
+
os.mkdir(self.pos+"output")
|
|
134
|
+
|
|
135
|
+
if not os.path.exists(self.pos+os.sep.join(["output","tables"])):
|
|
136
|
+
os.mkdir(self.pos+os.sep.join(["output","tables"]))
|
|
137
|
+
|
|
138
|
+
if self.mode.lower()=="target" or self.mode.lower()=="targets":
|
|
139
|
+
self.label_folder = "labels_targets"
|
|
140
|
+
self.instruction_file = os.sep.join(["configs", "tracking_instructions_targets.json"])
|
|
141
|
+
self.napari_name = "napari_target_trajectories.npy"
|
|
142
|
+
self.table_name = "trajectories_targets.csv"
|
|
143
|
+
|
|
144
|
+
elif self.mode.lower()=="effector" or self.mode.lower()=="effectors":
|
|
145
|
+
self.label_folder = "labels_effectors"
|
|
146
|
+
self.instruction_file = os.sep.join(["configs","tracking_instructions_effectors.json"])
|
|
147
|
+
self.napari_name = "napari_effector_trajectories.npy"
|
|
148
|
+
self.table_name = "trajectories_effectors.csv"
|
|
149
|
+
|
|
150
|
+
def extract_experiment_parameters(self):
|
|
151
|
+
|
|
152
|
+
self.movie_prefix = ConfigSectionMap(self.config,"MovieSettings")["movie_prefix"]
|
|
153
|
+
self.spatial_calibration = float(ConfigSectionMap(self.config,"MovieSettings")["pxtoum"])
|
|
154
|
+
self.time_calibration = float(ConfigSectionMap(self.config,"MovieSettings")["frametomin"])
|
|
155
|
+
self.len_movie = float(ConfigSectionMap(self.config,"MovieSettings")["len_movie"])
|
|
156
|
+
self.shape_x = int(ConfigSectionMap(self.config,"MovieSettings")["shape_x"])
|
|
157
|
+
self.shape_y = int(ConfigSectionMap(self.config,"MovieSettings")["shape_y"])
|
|
158
|
+
|
|
159
|
+
self.channel_names, self.channel_indices = extract_experiment_channels(self.config)
|
|
160
|
+
self.nbr_channels = len(self.channel_names)
|
|
161
|
+
|
|
162
|
+
def locate_experiment_config(self):
|
|
163
|
+
|
|
164
|
+
parent1 = Path(self.pos).parent
|
|
165
|
+
self.expfolder = parent1.parent
|
|
166
|
+
self.config = PurePath(self.expfolder,Path("config.ini"))
|
|
167
|
+
|
|
168
|
+
if not os.path.exists(self.config):
|
|
169
|
+
print('The configuration file for the experiment was not found...')
|
|
170
|
+
self.abort_process()
|
|
171
|
+
|
|
172
|
+
def detect_movie_and_labels(self):
|
|
173
|
+
|
|
174
|
+
self.label_path = natsorted(glob(self.pos+f"{self.label_folder}"+os.sep+"*.tif"))
|
|
175
|
+
if len(self.label_path)>0:
|
|
176
|
+
print(f"Found {len(self.label_path)} segmented frames...")
|
|
177
|
+
else:
|
|
178
|
+
print(f"No segmented frames have been found. Please run segmentation first. Abort...")
|
|
179
|
+
self.abort_process()
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
self.file = glob(self.pos+f"movie/{self.movie_prefix}*.tif")[0]
|
|
183
|
+
except IndexError:
|
|
184
|
+
self.file = None
|
|
185
|
+
self.haralick_option = None
|
|
186
|
+
self.features = drop_tonal_features(self.features)
|
|
187
|
+
print('Movie could not be found. Check the prefix.')
|
|
188
|
+
|
|
189
|
+
len_movie_auto = auto_load_number_of_frames(self.file)
|
|
190
|
+
if len_movie_auto is not None:
|
|
191
|
+
self.len_movie = len_movie_auto
|
|
192
|
+
|
|
193
|
+
def parallel_job(self, indices):
|
|
194
|
+
|
|
195
|
+
props = []
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
|
|
199
|
+
for t in tqdm(indices,desc="frame"):
|
|
200
|
+
|
|
201
|
+
# Load channels at time t
|
|
202
|
+
img = _load_frames_to_measure(self.file, indices=self.img_num_channels[:,t])
|
|
203
|
+
lbl = locate_labels(self.pos, population=self.mode, frames=t)
|
|
204
|
+
if lbl is None:
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
df_props = measure_features(img, lbl, features = self.features+['centroid'], border_dist=None,
|
|
208
|
+
channels=self.channel_names, haralick_options=self.haralick_options, verbose=False)
|
|
209
|
+
df_props.rename(columns={'centroid-1': 'x', 'centroid-0': 'y'},inplace=True)
|
|
210
|
+
df_props['t'] = int(t)
|
|
211
|
+
|
|
212
|
+
props.append(df_props)
|
|
213
|
+
|
|
214
|
+
self.sum_done+=1/self.len_movie*50
|
|
215
|
+
mean_exec_per_step = (time.time() - self.t0) / (self.sum_done*self.len_movie / 50 + 1)
|
|
216
|
+
pred_time = (self.len_movie - (self.sum_done*self.len_movie / 50 + 1)) * mean_exec_per_step + 30
|
|
217
|
+
self.queue.put([self.sum_done, pred_time])
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
except Exception as e:
|
|
221
|
+
print(e)
|
|
222
|
+
|
|
223
|
+
return props
|
|
224
|
+
|
|
225
|
+
def run(self):
|
|
226
|
+
|
|
227
|
+
self.indices = list(range(self.img_num_channels.shape[1]))
|
|
228
|
+
chunks = np.array_split(self.indices, self.n_threads)
|
|
229
|
+
|
|
230
|
+
self.timestep_dataframes = []
|
|
231
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=self.n_threads) as executor:
|
|
232
|
+
results = executor.map(self.parallel_job, chunks)
|
|
233
|
+
try:
|
|
234
|
+
for i,return_value in enumerate(results):
|
|
235
|
+
print(f'Thread {i} completed...')
|
|
236
|
+
#print(f"Thread {i} output check: ",return_value)
|
|
237
|
+
self.timestep_dataframes.extend(return_value)
|
|
238
|
+
except Exception as e:
|
|
239
|
+
print("Exception: ", e)
|
|
240
|
+
|
|
241
|
+
print('Features successfully measured...')
|
|
242
|
+
|
|
243
|
+
df = pd.concat(self.timestep_dataframes)
|
|
244
|
+
df.reset_index(inplace=True, drop=True)
|
|
245
|
+
df = _mask_intensity_measurements(df, self.mask_channels)
|
|
246
|
+
|
|
247
|
+
# do tracking
|
|
248
|
+
if self.btrack_option:
|
|
249
|
+
tracker = 'bTrack'
|
|
250
|
+
else:
|
|
251
|
+
tracker = 'trackpy'
|
|
252
|
+
|
|
253
|
+
# do tracking
|
|
254
|
+
trajectories, napari_data = track(None,
|
|
255
|
+
configuration=self.btrack_config,
|
|
256
|
+
objects=df,
|
|
257
|
+
spatial_calibration=self.spatial_calibration,
|
|
258
|
+
channel_names=self.channel_names,
|
|
259
|
+
return_napari_data=True,
|
|
260
|
+
optimizer_options = {'tm_lim': int(12e4)},
|
|
261
|
+
track_kwargs={'step_size': 100},
|
|
262
|
+
clean_trajectories_kwargs=self.post_processing_options,
|
|
263
|
+
volume=(self.shape_x, self.shape_y),
|
|
264
|
+
btrack_option=self.btrack_option,
|
|
265
|
+
search_range=self.search_range,
|
|
266
|
+
memory=self.memory,
|
|
267
|
+
)
|
|
268
|
+
print(f"Tracking successfully performed...")
|
|
269
|
+
|
|
270
|
+
# out trajectory table, create POSITION_X_um, POSITION_Y_um, TIME_min (new ones)
|
|
271
|
+
# Save napari data
|
|
272
|
+
np.save(self.pos+os.sep.join(['output', 'tables', self.napari_name]), napari_data, allow_pickle=True)
|
|
273
|
+
|
|
274
|
+
trajectories.to_csv(self.pos+os.sep.join(['output', 'tables', self.table_name]), index=False)
|
|
275
|
+
print(f"Trajectory table successfully exported in {os.sep.join(['output', 'tables'])}...")
|
|
276
|
+
|
|
277
|
+
if os.path.exists(self.pos+os.sep.join(['output', 'tables', self.table_name.replace('.csv','.pkl')])):
|
|
278
|
+
os.remove(self.pos+os.sep.join(['output', 'tables', self.table_name.replace('.csv','.pkl')]))
|
|
279
|
+
|
|
280
|
+
del trajectories; del napari_data;
|
|
281
|
+
gc.collect()
|
|
282
|
+
|
|
283
|
+
# Send end signal
|
|
284
|
+
self.queue.put([100, 0])
|
|
285
|
+
time.sleep(1)
|
|
286
|
+
|
|
287
|
+
self.queue.put("finished")
|
|
288
|
+
self.queue.close()
|
|
289
|
+
|
|
290
|
+
def end_process(self):
|
|
291
|
+
|
|
292
|
+
self.terminate()
|
|
293
|
+
self.queue.put("finished")
|
|
294
|
+
|
|
295
|
+
def abort_process(self):
|
|
296
|
+
|
|
297
|
+
self.terminate()
|
|
298
|
+
self.queue.put("error")
|